From f727a0c324ce2c7e7cbe478d22895bf7bc8ed0a6 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:46 -0500 Subject: staging/hfi1: Add function stubs for TID caching Add mmu notify helper functions and TID caching function stubs in preparation for the TID caching implementation. TID caching makes use of the MMU notifier to allow the driver to respond to the user freeing memory which is allocated to the HFI. This patch implements the basic MMU notifier functions to insert, find and remove buffer pages from memory based on the mmu_notifier being invoked. In addition it places stubs in place for the main entry points by follow on code. Follow up patches will complete the implementation of the interaction with user space and makes use of these functions. Signed-off-by: Mitko Haralanov Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Kconfig | 1 + drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/hfi.h | 4 + drivers/staging/rdma/hfi1/user_exp_rcv.c | 264 +++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/user_exp_rcv.h | 8 + 5 files changed, 278 insertions(+), 1 deletion(-) create mode 100644 drivers/staging/rdma/hfi1/user_exp_rcv.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig index fd25078ee923..bd0249bcf199 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/staging/rdma/hfi1/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_HFI1 tristate "Intel OPA Gen1 support" depends on X86_64 + select MMU_NOTIFIER default m ---help--- This is a low-level driver for Intel OPA Gen1 adapter. diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 68c5a315e557..e63251b9c56b 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o efivar.o eprom.o file_ops.o firmware.o \ init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ - uc.o ud.o user_pages.o user_sdma.o verbs_mcast.o verbs.o + uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o CFLAGS_trace.o = -I$(src) diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 2611bb2e764d..ddb21f0fffe7 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -65,6 +65,8 @@ #include #include #include +#include +#include #include "chip_registers.h" #include "common.h" @@ -1125,6 +1127,8 @@ struct hfi1_devdata { #define PT_EAGER 1 #define PT_INVALID 2 +struct mmu_rb_node; + /* Private data for file operations */ struct hfi1_filedata { struct hfi1_ctxtdata *uctxt; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c new file mode 100644 index 000000000000..bafeddf67c8f --- /dev/null +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -0,0 +1,264 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include + +#include "user_exp_rcv.h" +#include "trace.h" + +struct mmu_rb_node { + struct rb_node rbnode; + unsigned long virt; + unsigned long phys; + unsigned long len; + struct tid_group *grp; + u32 rcventry; + dma_addr_t dma_addr; + bool freed; + unsigned npages; + struct page *pages[0]; +}; + +enum mmu_call_types { + MMU_INVALIDATE_PAGE = 0, + MMU_INVALIDATE_RANGE = 1 +}; + +static const char * const mmu_types[] = { + "PAGE", + "RANGE" +}; + +static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, + unsigned long); +static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, + unsigned long) __maybe_unused; +static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *, + u32); +static int mmu_rb_insert_by_addr(struct rb_root *, + struct mmu_rb_node *) __maybe_unused; +static int mmu_rb_insert_by_entry(struct rb_root *, + struct mmu_rb_node *) __maybe_unused; +static void mmu_notifier_mem_invalidate(struct mmu_notifier *, + unsigned long, unsigned long, + enum mmu_call_types); +static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, + unsigned long); +static inline void mmu_notifier_range_start(struct mmu_notifier *, + struct mm_struct *, + unsigned long, unsigned long); + +static struct mmu_notifier_ops __maybe_unused mn_opts = { + .invalidate_page = mmu_notifier_page, + .invalidate_range_start = mmu_notifier_range_start, +}; + +/* + * Initialize context and file private data needed for Expected + * receive caching. This needs to be done after the context has + * been configured with the eager/expected RcvEntry counts. + */ +int hfi1_user_exp_rcv_init(struct file *fp) +{ + return -EINVAL; +} + +int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) +{ + return -EINVAL; +} + +int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) +{ + return -EINVAL; +} + +int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo) +{ + return -EINVAL; +} + +int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo) +{ + return -EINVAL; +} + +static inline void mmu_notifier_page(struct mmu_notifier *mn, + struct mm_struct *mm, unsigned long addr) +{ + mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE, + MMU_INVALIDATE_PAGE); +} + +static inline void mmu_notifier_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + mmu_notifier_mem_invalidate(mn, start, end, MMU_INVALIDATE_RANGE); +} + +static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, + unsigned long start, unsigned long end, + enum mmu_call_types type) +{ + /* Stub for now */ +} + +static inline int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr, + unsigned long len) +{ + if ((addr + len) <= node->virt) + return -1; + else if (addr >= node->virt && addr < (node->virt + node->len)) + return 0; + else + return 1; +} + +static inline int mmu_entry_cmp(struct mmu_rb_node *node, u32 entry) +{ + if (entry < node->rcventry) + return -1; + else if (entry > node->rcventry) + return 1; + else + return 0; +} + +static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *root, + unsigned long addr) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct mmu_rb_node *mnode = + container_of(node, struct mmu_rb_node, rbnode); + /* + * When searching, use at least one page length for size. The + * MMU notifier will not give us anything less than that. We + * also don't need anything more than a page because we are + * guaranteed to have non-overlapping buffers in the tree. + */ + int result = mmu_addr_cmp(mnode, addr, PAGE_SIZE); + + if (result < 0) + node = node->rb_left; + else if (result > 0) + node = node->rb_right; + else + return mnode; + } + return NULL; +} + +static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *root, + u32 index) +{ + struct mmu_rb_node *rbnode; + struct rb_node *node; + + if (root && !RB_EMPTY_ROOT(root)) + for (node = rb_first(root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct mmu_rb_node, rbnode); + if (rbnode->rcventry == index) + return rbnode; + } + return NULL; +} + +static int mmu_rb_insert_by_entry(struct rb_root *root, + struct mmu_rb_node *node) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + + while (*new) { + struct mmu_rb_node *this = + container_of(*new, struct mmu_rb_node, rbnode); + int result = mmu_entry_cmp(this, node->rcventry); + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else + return 1; + } + + rb_link_node(&node->rbnode, parent, new); + rb_insert_color(&node->rbnode, root); + return 0; +} + +static int mmu_rb_insert_by_addr(struct rb_root *root, struct mmu_rb_node *node) +{ + struct rb_node **new = &root->rb_node, *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct mmu_rb_node *this = + container_of(*new, struct mmu_rb_node, rbnode); + int result = mmu_addr_cmp(this, node->virt, node->len); + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else if (result > 0) + new = &((*new)->rb_right); + else + return 1; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&node->rbnode, parent, new); + rb_insert_color(&node->rbnode, root); + + return 0; +} diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/staging/rdma/hfi1/user_exp_rcv.h index 4f4876e1d353..28ef98a45a1e 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.h +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.h @@ -50,6 +50,8 @@ * */ +#include "hfi.h" + #define EXP_TID_TIDLEN_MASK 0x7FFULL #define EXP_TID_TIDLEN_SHIFT 0 #define EXP_TID_TIDCTRL_MASK 0x3ULL @@ -71,4 +73,10 @@ (tid) |= EXP_TID_SET(field, (value)); \ } while (0) +int hfi1_user_exp_rcv_init(struct file *); +int hfi1_user_exp_rcv_free(struct hfi1_filedata *); +int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *); +int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *); +int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *); + #endif /* _HFI1_USER_EXP_RCV_H */ -- cgit v1.2.3-59-g8ed1b From 955ad36dcde4639664253c2bd39f626cd88d2acf Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:48 -0500 Subject: uapi/hfi1_user: Add command and event for TID caching TID caching will use a new event to signal userland that cache invalidation has occurred and needs a matching command code that will be used to read the invalidated TIDs. Add the event bit and the new command to the exported header file. The command is also added to the switch() statement in file_ops.c for completeness and in preparation for its usage later. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 1 + include/uapi/rdma/hfi/hfi1_user.h | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index d57d549052c8..c66693532be0 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -241,6 +241,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, must_be_root = 1; /* validate user */ copy = 0; break; + case HFI1_CMD_TID_INVAL_READ: default: ret = -EINVAL; goto bail; diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index cf172718e3d5..92be2e373019 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -134,6 +134,7 @@ #define HFI1_CMD_ACK_EVENT 10 /* ack & clear user status bits */ #define HFI1_CMD_SET_PKEY 11 /* set context's pkey */ #define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */ +#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */ /* separate EPROM commands from normal PSM commands */ #define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */ #define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */ @@ -147,13 +148,15 @@ #define _HFI1_EVENT_LID_CHANGE_BIT 2 #define _HFI1_EVENT_LMC_CHANGE_BIT 3 #define _HFI1_EVENT_SL2VL_CHANGE_BIT 4 -#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_SL2VL_CHANGE_BIT +#define _HFI1_EVENT_TID_MMU_NOTIFY_BIT 5 +#define _HFI1_MAX_EVENT_BIT _HFI1_EVENT_TID_MMU_NOTIFY_BIT #define HFI1_EVENT_FROZEN (1UL << _HFI1_EVENT_FROZEN_BIT) #define HFI1_EVENT_LINKDOWN (1UL << _HFI1_EVENT_LINKDOWN_BIT) #define HFI1_EVENT_LID_CHANGE (1UL << _HFI1_EVENT_LID_CHANGE_BIT) #define HFI1_EVENT_LMC_CHANGE (1UL << _HFI1_EVENT_LMC_CHANGE_BIT) #define HFI1_EVENT_SL2VL_CHANGE (1UL << _HFI1_EVENT_SL2VL_CHANGE_BIT) +#define HFI1_EVENT_TID_MMU_NOTIFY (1UL << _HFI1_EVENT_TID_MMU_NOTIFY_BIT) /* * These are the status bits readable (in ASCII form, 64bit value) -- cgit v1.2.3-59-g8ed1b From a86cd357e5be1b7eae3b399c02b972a92808c38a Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:49 -0500 Subject: staging/hfi1: Add definitions needed for TID cache In preparation for adding the TID caching support, there is a set of headers, structures, and variables which will be needed. This commit adds them to the hfi.h header file. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index ddb21f0fffe7..51ecf45ef70b 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -179,6 +179,11 @@ struct ctxt_eager_bufs { } *rcvtids; }; +struct exp_tid_set { + struct list_head list; + u32 count; +}; + struct hfi1_ctxtdata { /* shadow the ctxt's RcvCtrl register */ u64 rcvctrl; @@ -247,6 +252,11 @@ struct hfi1_ctxtdata { struct page **tid_pg_list; /* dma handles for exp tid pages */ dma_addr_t *physshadow; + + struct exp_tid_set tid_group_list; + struct exp_tid_set tid_used_list; + struct exp_tid_set tid_full_list; + /* lock protecting all Expected TID data */ spinlock_t exp_lock; /* number of pio bufs for this ctxt (all procs, if shared) */ @@ -1137,6 +1147,16 @@ struct hfi1_filedata { struct hfi1_user_sdma_pkt_q *pq; /* for cpu affinity; -1 if none */ int rec_cpu_num; + struct mmu_notifier mn; + struct rb_root tid_rb_root; + spinlock_t tid_lock; /* protect tid_[limit,used] counters */ + u32 tid_limit; + u32 tid_used; + spinlock_t rb_lock; /* protect tid_rb_root RB tree */ + u32 *invalid_tids; + u32 invalid_tid_idx; + spinlock_t invalid_lock; /* protect the invalid_tids array */ + int (*mmu_rb_insert)(struct rb_root *, struct mmu_rb_node *); }; extern struct list_head hfi1_dev_list; -- cgit v1.2.3-59-g8ed1b From acac10fdd75a85b10a638381127f7bbed632580d Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:50 -0500 Subject: staging/hfi1: Remove un-needed variable There is no need to use a separate variable for a return value and a label when returning right away would do just as well. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index c66693532be0..76fe60315bb4 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1037,22 +1037,19 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, static int init_subctxts(struct hfi1_ctxtdata *uctxt, const struct hfi1_user_info *uinfo) { - int ret = 0; unsigned num_subctxts; num_subctxts = uinfo->subctxt_cnt; - if (num_subctxts > HFI1_MAX_SHARED_CTXTS) { - ret = -EINVAL; - goto bail; - } + if (num_subctxts > HFI1_MAX_SHARED_CTXTS) + return -EINVAL; uctxt->subctxt_cnt = uinfo->subctxt_cnt; uctxt->subctxt_id = uinfo->subctxt_id; uctxt->active_slaves = 1; uctxt->redirect_seq_cnt = 1; set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); -bail: - return ret; + + return 0; } static int setup_subctxt(struct hfi1_ctxtdata *uctxt) -- cgit v1.2.3-59-g8ed1b From b8abe346737215c6ee6b50c01771b4ca1746801d Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:51 -0500 Subject: staging/hfi1: TID group definitions and support funcs Definitions and functions use to manage sets of TID/RcvArray groups. These will be used by the TID cacheline functionality coming with later patches. TID groups (or RcvArray groups) are groups of TID/RcvArray entries organized in sets of 8 and aligned on cacheline boundaries. The TID/RcvArray entries are managed in this way to make taking advantage of write-combining easier - each group is a entire cacheline. rcv_array_wc_fill() is provided to allow of generating writes to TIDs which are not currently being used in order to cause the flush of the write-combining buffer. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 64 ++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index bafeddf67c8f..7f15024daab9 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -52,6 +52,14 @@ #include "user_exp_rcv.h" #include "trace.h" +struct tid_group { + struct list_head list; + unsigned base; + u8 size; + u8 used; + u8 map; +}; + struct mmu_rb_node { struct rb_node rbnode; unsigned long virt; @@ -75,6 +83,8 @@ static const char * const mmu_types[] = { "RANGE" }; +#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list)) + static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, @@ -94,6 +104,43 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); +static inline void exp_tid_group_init(struct exp_tid_set *set) +{ + INIT_LIST_HEAD(&set->list); + set->count = 0; +} + +static inline void tid_group_remove(struct tid_group *grp, + struct exp_tid_set *set) +{ + list_del_init(&grp->list); + set->count--; +} + +static inline void tid_group_add_tail(struct tid_group *grp, + struct exp_tid_set *set) +{ + list_add_tail(&grp->list, &set->list); + set->count++; +} + +static inline struct tid_group *tid_group_pop(struct exp_tid_set *set) +{ + struct tid_group *grp = + list_first_entry(&set->list, struct tid_group, list); + list_del_init(&grp->list); + set->count--; + return grp; +} + +static inline void tid_group_move(struct tid_group *group, + struct exp_tid_set *s1, + struct exp_tid_set *s2) +{ + tid_group_remove(group, s1); + tid_group_add_tail(group, s2); +} + static struct mmu_notifier_ops __maybe_unused mn_opts = { .invalidate_page = mmu_notifier_page, .invalidate_range_start = mmu_notifier_range_start, @@ -114,6 +161,23 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) return -EINVAL; } +/* + * Write an "empty" RcvArray entry. + * This function exists so the TID registaration code can use it + * to write to unused/unneeded entries and still take advantage + * of the WC performance improvements. The HFI will ignore this + * write to the RcvArray entry. + */ +static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index) +{ + /* + * Doing the WC fill writes only makes sense if the device is + * present and the RcvArray has been mapped as WC memory. + */ + if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) + writeq(0, dd->rcvarray_wc + (index * 8)); +} + int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) { return -EINVAL; -- cgit v1.2.3-59-g8ed1b From f88e0c8a139dc737b997876203885a3168c32e95 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:52 -0500 Subject: staging/hfi1: Add building blocks for TID caching Functions added by this patch are building blocks for the upcoming TID caching functionality. The functions added are currently unsed (and marked as such.) The functions' purposes are to find physically contigous pages in the user's virtual buffer, program the RcvArray group entries with these physical chunks, and unprogram the RcvArray groups. Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 337 +++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 7f15024daab9..5a7e455b9f58 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -83,8 +83,20 @@ static const char * const mmu_types[] = { "RANGE" }; +struct tid_pageset { + u16 idx; + u16 count; +}; + #define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list)) +static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *, + struct rb_root *) __maybe_unused; +static u32 find_phys_blocks(struct page **, unsigned, + struct tid_pageset *) __maybe_unused; +static int set_rcvarray_entry(struct file *, unsigned long, u32, + struct tid_group *, struct page **, + unsigned) __maybe_unused; static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, @@ -103,6 +115,21 @@ static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, static inline void mmu_notifier_range_start(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); +static int program_rcvarray(struct file *, unsigned long, struct tid_group *, + struct tid_pageset *, unsigned, u16, struct page **, + u32 *, unsigned *, unsigned *) __maybe_unused; +static int unprogram_rcvarray(struct file *, u32, + struct tid_group **) __maybe_unused; +static void clear_tid_node(struct hfi1_filedata *, u16, + struct mmu_rb_node *) __maybe_unused; + +static inline u32 rcventry2tidinfo(u32 rcventry) +{ + u32 pair = rcventry & ~0x1; + + return EXP_TID_SET(IDX, pair >> 1) | + EXP_TID_SET(CTRL, 1 << (rcventry - pair)); +} static inline void exp_tid_group_init(struct exp_tid_set *set) { @@ -193,6 +220,316 @@ int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo) return -EINVAL; } +static u32 find_phys_blocks(struct page **pages, unsigned npages, + struct tid_pageset *list) +{ + unsigned pagecount, pageidx, setcount = 0, i; + unsigned long pfn, this_pfn; + + if (!npages) + return 0; + + /* + * Look for sets of physically contiguous pages in the user buffer. + * This will allow us to optimize Expected RcvArray entry usage by + * using the bigger supported sizes. + */ + pfn = page_to_pfn(pages[0]); + for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { + this_pfn = i < npages ? page_to_pfn(pages[i]) : 0; + + /* + * If the pfn's are not sequential, pages are not physically + * contiguous. + */ + if (this_pfn != ++pfn) { + /* + * At this point we have to loop over the set of + * physically contiguous pages and break them down it + * sizes supported by the HW. + * There are two main constraints: + * 1. The max buffer size is MAX_EXPECTED_BUFFER. + * If the total set size is bigger than that + * program only a MAX_EXPECTED_BUFFER chunk. + * 2. The buffer size has to be a power of two. If + * it is not, round down to the closes power of + * 2 and program that size. + */ + while (pagecount) { + int maxpages = pagecount; + u32 bufsize = pagecount * PAGE_SIZE; + + if (bufsize > MAX_EXPECTED_BUFFER) + maxpages = + MAX_EXPECTED_BUFFER >> + PAGE_SHIFT; + else if (!is_power_of_2(bufsize)) + maxpages = + rounddown_pow_of_two(bufsize) >> + PAGE_SHIFT; + + list[setcount].idx = pageidx; + list[setcount].count = maxpages; + pagecount -= maxpages; + pageidx += maxpages; + setcount++; + } + pageidx = i; + pagecount = 1; + pfn = this_pfn; + } else { + pagecount++; + } + } + return setcount; +} + +/** + * program_rcvarray() - program an RcvArray group with receive buffers + * @fp: file pointer + * @vaddr: starting user virtual address + * @grp: RcvArray group + * @sets: array of struct tid_pageset holding information on physically + * contiguous chunks from the user buffer + * @start: starting index into sets array + * @count: number of struct tid_pageset's to program + * @pages: an array of struct page * for the user buffer + * @tidlist: the array of u32 elements when the information about the + * programmed RcvArray entries is to be encoded. + * @tididx: starting offset into tidlist + * @pmapped: (output parameter) number of pages programmed into the RcvArray + * entries. + * + * This function will program up to 'count' number of RcvArray entries from the + * group 'grp'. To make best use of write-combining writes, the function will + * perform writes to the unused RcvArray entries which will be ignored by the + * HW. Each RcvArray entry will be programmed with a physically contiguous + * buffer chunk from the user's virtual buffer. + * + * Return: + * -EINVAL if the requested count is larger than the size of the group, + * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or + * number of RcvArray entries programmed. + */ +static int program_rcvarray(struct file *fp, unsigned long vaddr, + struct tid_group *grp, + struct tid_pageset *sets, + unsigned start, u16 count, struct page **pages, + u32 *tidlist, unsigned *tididx, unsigned *pmapped) +{ + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct hfi1_devdata *dd = uctxt->dd; + u16 idx; + u32 tidinfo = 0, rcventry, useidx = 0; + int mapped = 0; + + /* Count should never be larger than the group size */ + if (count > grp->size) + return -EINVAL; + + /* Find the first unused entry in the group */ + for (idx = 0; idx < grp->size; idx++) { + if (!(grp->map & (1 << idx))) { + useidx = idx; + break; + } + rcv_array_wc_fill(dd, grp->base + idx); + } + + idx = 0; + while (idx < count) { + u16 npages, pageidx, setidx = start + idx; + int ret = 0; + + /* + * If this entry in the group is used, move to the next one. + * If we go past the end of the group, exit the loop. + */ + if (useidx >= grp->size) { + break; + } else if (grp->map & (1 << useidx)) { + rcv_array_wc_fill(dd, grp->base + useidx); + useidx++; + continue; + } + + rcventry = grp->base + useidx; + npages = sets[setidx].count; + pageidx = sets[setidx].idx; + + ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE), + rcventry, grp, pages + pageidx, + npages); + if (ret) + return ret; + mapped += npages; + + tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) | + EXP_TID_SET(LEN, npages); + tidlist[(*tididx)++] = tidinfo; + grp->used++; + grp->map |= 1 << useidx++; + idx++; + } + + /* Fill the rest of the group with "blank" writes */ + for (; useidx < grp->size; useidx++) + rcv_array_wc_fill(dd, grp->base + useidx); + *pmapped = mapped; + return idx; +} + +static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, + u32 rcventry, struct tid_group *grp, + struct page **pages, unsigned npages) +{ + int ret; + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct mmu_rb_node *node; + struct hfi1_devdata *dd = uctxt->dd; + struct rb_root *root = &fd->tid_rb_root; + dma_addr_t phys; + + /* + * Allocate the node first so we can handle a potential + * failure before we've programmed anything. + */ + node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages), + GFP_KERNEL); + if (!node) + return -ENOMEM; + + phys = pci_map_single(dd->pcidev, + __va(page_to_phys(pages[0])), + npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&dd->pcidev->dev, phys)) { + dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n", + phys); + kfree(node); + return -EFAULT; + } + + node->virt = vaddr; + node->phys = page_to_phys(pages[0]); + node->len = npages * PAGE_SIZE; + node->npages = npages; + node->rcventry = rcventry; + node->dma_addr = phys; + node->grp = grp; + node->freed = false; + memcpy(node->pages, pages, sizeof(struct page *) * npages); + + spin_lock(&fd->rb_lock); + ret = fd->mmu_rb_insert(root, node); + spin_unlock(&fd->rb_lock); + + if (ret) { + hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", + node->rcventry, node->virt, node->phys, ret); + pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE, + PCI_DMA_FROMDEVICE); + kfree(node); + return -EFAULT; + } + hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); + return 0; +} + +static int unprogram_rcvarray(struct file *fp, u32 tidinfo, + struct tid_group **grp) +{ + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct hfi1_devdata *dd = uctxt->dd; + struct mmu_rb_node *node; + u8 tidctrl = EXP_TID_GET(tidinfo, CTRL); + u32 tidbase = uctxt->expected_base, + tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; + + if (tididx >= uctxt->expected_count) { + dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n", + tididx, uctxt->ctxt); + return -EINVAL; + } + + if (tidctrl == 0x3) + return -EINVAL; + + rcventry = tidbase + tididx + (tidctrl - 1); + + spin_lock(&fd->rb_lock); + node = mmu_rb_search_by_entry(&fd->tid_rb_root, rcventry); + if (!node) { + spin_unlock(&fd->rb_lock); + return -EBADF; + } + rb_erase(&node->rbnode, &fd->tid_rb_root); + spin_unlock(&fd->rb_lock); + if (grp) + *grp = node->grp; + clear_tid_node(fd, fd->subctxt, node); + return 0; +} + +static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, + struct mmu_rb_node *node) +{ + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct hfi1_devdata *dd = uctxt->dd; + + hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0); + /* + * Make sure device has seen the write before we unpin the + * pages. + */ + flush_wc(); + + pci_unmap_single(dd->pcidev, node->dma_addr, node->len, + PCI_DMA_FROMDEVICE); + hfi1_release_user_pages(node->pages, node->npages, true); + + node->grp->used--; + node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); + + if (node->grp->used == node->grp->size - 1) + tid_group_move(node->grp, &uctxt->tid_full_list, + &uctxt->tid_used_list); + else if (!node->grp->used) + tid_group_move(node->grp, &uctxt->tid_used_list, + &uctxt->tid_group_list); + kfree(node); +} + +static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, + struct exp_tid_set *set, struct rb_root *root) +{ + struct tid_group *grp, *ptr; + struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata, + tid_rb_root); + int i; + + list_for_each_entry_safe(grp, ptr, &set->list, list) { + list_del_init(&grp->list); + + spin_lock(&fd->rb_lock); + for (i = 0; i < grp->size; i++) { + if (grp->map & (1 << i)) { + u16 rcventry = grp->base + i; + struct mmu_rb_node *node; + + node = mmu_rb_search_by_entry(root, rcventry); + if (!node) + continue; + rb_erase(&node->rbnode, root); + clear_tid_node(fd, -1, node); + } + } + spin_unlock(&fd->rb_lock); + } +} + static inline void mmu_notifier_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long addr) { -- cgit v1.2.3-59-g8ed1b From 463e6ebc86578ef3ff5bb500f6fc9449afaeea7e Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:53 -0500 Subject: staging/hfi1: Convert lock to mutex The exp_lock lock does not need to be a spinlock as all its uses are in process context and allowing the process to sleep when the mutex is contended might be beneficial. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 12 ++++++------ drivers/staging/rdma/hfi1/hfi.h | 2 +- drivers/staging/rdma/hfi1/init.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 76fe60315bb4..b0348263b901 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1611,14 +1611,14 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo) * reserved, we don't need the lock anymore since we * are guaranteed the groups. */ - spin_lock(&uctxt->exp_lock); + mutex_lock(&uctxt->exp_lock); if (uctxt->tidusemap[useidx] == -1ULL || bitidx >= BITS_PER_LONG) { /* no free groups in the set, use the next */ useidx = (useidx + 1) % uctxt->tidmapcnt; idx++; bitidx = 0; - spin_unlock(&uctxt->exp_lock); + mutex_unlock(&uctxt->exp_lock); continue; } ngroups = ((npages - mapped) / dd->rcv_entries.group_size) + @@ -1635,13 +1635,13 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo) * as 0 because we don't check the entire bitmap but * we start from bitidx. */ - spin_unlock(&uctxt->exp_lock); + mutex_unlock(&uctxt->exp_lock); continue; } bits_used = min(free, ngroups); tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx; uctxt->tidusemap[useidx] |= tidmap[useidx]; - spin_unlock(&uctxt->exp_lock); + mutex_unlock(&uctxt->exp_lock); /* * At this point, we know where in the map we have free bits. @@ -1677,10 +1677,10 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo) * Let go of the bits that we reserved since we are not * going to use them. */ - spin_lock(&uctxt->exp_lock); + mutex_lock(&uctxt->exp_lock); uctxt->tidusemap[useidx] &= ~(((1ULL << bits_used) - 1) << bitidx); - spin_unlock(&uctxt->exp_lock); + mutex_unlock(&uctxt->exp_lock); goto done; } /* diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 51ecf45ef70b..53f464cc40ef 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -258,7 +258,7 @@ struct hfi1_ctxtdata { struct exp_tid_set tid_full_list; /* lock protecting all Expected TID data */ - spinlock_t exp_lock; + struct mutex exp_lock; /* number of pio bufs for this ctxt (all procs, if shared) */ u32 piocnt; /* first pio buffer for this ctxt */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 4dd8051aba7e..72c51431b2bf 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -227,7 +227,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) rcd->numa_id = numa_node_id(); rcd->rcv_array_groups = dd->rcv_entries.ngroups; - spin_lock_init(&rcd->exp_lock); + mutex_init(&rcd->exp_lock); /* * Calculate the context's RcvArray entry starting point. -- cgit v1.2.3-59-g8ed1b From 3abb33ac652135da9c3c36d9def73ede67e4ba03 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:54 -0500 Subject: staging/hfi1: Add TID cache receive init and free funcs The upcoming TID caching feature requires different data structures and, by extension, different initialization for each of the MPI processes. The two new functions (currently unused) perform the required initialization and freeing of required resources and structures. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 154 +++++++++++++++++++++++++++++-- 1 file changed, 144 insertions(+), 10 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 5a7e455b9f58..843023e2e2c7 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -90,23 +90,25 @@ struct tid_pageset { #define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list)) +#define num_user_pages(vaddr, len) \ + (1 + (((((unsigned long)(vaddr) + \ + (unsigned long)(len) - 1) & PAGE_MASK) - \ + ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT)) + static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *, - struct rb_root *) __maybe_unused; + struct rb_root *); static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *) __maybe_unused; static int set_rcvarray_entry(struct file *, unsigned long, u32, - struct tid_group *, struct page **, - unsigned) __maybe_unused; + struct tid_group *, struct page **, unsigned); static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, unsigned long) __maybe_unused; static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *, u32); -static int mmu_rb_insert_by_addr(struct rb_root *, - struct mmu_rb_node *) __maybe_unused; -static int mmu_rb_insert_by_entry(struct rb_root *, - struct mmu_rb_node *) __maybe_unused; +static int mmu_rb_insert_by_addr(struct rb_root *, struct mmu_rb_node *); +static int mmu_rb_insert_by_entry(struct rb_root *, struct mmu_rb_node *); static void mmu_notifier_mem_invalidate(struct mmu_notifier *, unsigned long, unsigned long, enum mmu_call_types); @@ -168,7 +170,7 @@ static inline void tid_group_move(struct tid_group *group, tid_group_add_tail(group, s2); } -static struct mmu_notifier_ops __maybe_unused mn_opts = { +static struct mmu_notifier_ops mn_opts = { .invalidate_page = mmu_notifier_page, .invalidate_range_start = mmu_notifier_range_start, }; @@ -180,12 +182,144 @@ static struct mmu_notifier_ops __maybe_unused mn_opts = { */ int hfi1_user_exp_rcv_init(struct file *fp) { - return -EINVAL; + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct hfi1_devdata *dd = uctxt->dd; + unsigned tidbase; + int i, ret = 0; + + INIT_HLIST_NODE(&fd->mn.hlist); + spin_lock_init(&fd->rb_lock); + spin_lock_init(&fd->tid_lock); + spin_lock_init(&fd->invalid_lock); + fd->mn.ops = &mn_opts; + fd->tid_rb_root = RB_ROOT; + + if (!uctxt->subctxt_cnt || !fd->subctxt) { + exp_tid_group_init(&uctxt->tid_group_list); + exp_tid_group_init(&uctxt->tid_used_list); + exp_tid_group_init(&uctxt->tid_full_list); + + tidbase = uctxt->expected_base; + for (i = 0; i < uctxt->expected_count / + dd->rcv_entries.group_size; i++) { + struct tid_group *grp; + + grp = kzalloc(sizeof(*grp), GFP_KERNEL); + if (!grp) { + /* + * If we fail here, the groups already + * allocated will be freed by the close + * call. + */ + ret = -ENOMEM; + goto done; + } + grp->size = dd->rcv_entries.group_size; + grp->base = tidbase; + tid_group_add_tail(grp, &uctxt->tid_group_list); + tidbase += dd->rcv_entries.group_size; + } + } + + if (!HFI1_CAP_IS_USET(TID_UNMAP)) { + fd->invalid_tid_idx = 0; + fd->invalid_tids = kzalloc(uctxt->expected_count * + sizeof(u32), GFP_KERNEL); + if (!fd->invalid_tids) { + ret = -ENOMEM; + goto done; + } else { + /* + * Register MMU notifier callbacks. If the registration + * fails, continue but turn off the TID caching for + * all user contexts. + */ + ret = mmu_notifier_register(&fd->mn, current->mm); + if (ret) { + dd_dev_info(dd, + "Failed MMU notifier registration %d\n", + ret); + HFI1_CAP_USET(TID_UNMAP); + ret = 0; + } + } + } + + if (HFI1_CAP_IS_USET(TID_UNMAP)) + fd->mmu_rb_insert = mmu_rb_insert_by_entry; + else + fd->mmu_rb_insert = mmu_rb_insert_by_addr; + + /* + * PSM does not have a good way to separate, count, and + * effectively enforce a limit on RcvArray entries used by + * subctxts (when context sharing is used) when TID caching + * is enabled. To help with that, we calculate a per-process + * RcvArray entry share and enforce that. + * If TID caching is not in use, PSM deals with usage on its + * own. In that case, we allow any subctxt to take all of the + * entries. + * + * Make sure that we set the tid counts only after successful + * init. + */ + if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) { + u16 remainder; + + fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt; + remainder = uctxt->expected_count % uctxt->subctxt_cnt; + if (remainder && fd->subctxt < remainder) + fd->tid_limit++; + } else { + fd->tid_limit = uctxt->expected_count; + } +done: + return ret; } int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) { - return -EINVAL; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct tid_group *grp, *gptr; + + /* + * The notifier would have been removed when the process'es mm + * was freed. + */ + if (current->mm && !HFI1_CAP_IS_USET(TID_UNMAP)) + mmu_notifier_unregister(&fd->mn, current->mm); + + kfree(fd->invalid_tids); + + if (!uctxt->cnt) { + if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) + unlock_exp_tids(uctxt, &uctxt->tid_full_list, + &fd->tid_rb_root); + if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) + unlock_exp_tids(uctxt, &uctxt->tid_used_list, + &fd->tid_rb_root); + list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, + list) { + list_del_init(&grp->list); + kfree(grp); + } + spin_lock(&fd->rb_lock); + if (!RB_EMPTY_ROOT(&fd->tid_rb_root)) { + struct rb_node *node; + struct mmu_rb_node *rbnode; + + while ((node = rb_first(&fd->tid_rb_root))) { + rbnode = rb_entry(node, struct mmu_rb_node, + rbnode); + rb_erase(&rbnode->rbnode, &fd->tid_rb_root); + kfree(rbnode); + } + } + spin_unlock(&fd->rb_lock); + hfi1_clear_tids(uctxt); + } + return 0; } /* -- cgit v1.2.3-59-g8ed1b From b5eb3b2ffd1bf5be17df08565f4ab56c3fdae43e Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:55 -0500 Subject: staging/hfi1: Add MMU notifier callback function TID caching will rely on the MMU notifier to be told when memory is being invalidated. When the callback is called, the driver will find all RcvArray entries that span the invalidated buffer and "schedule" them to be freed by the PSM library. This function is currently unused and is being added in preparation for the TID caching feature. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 67 +++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 843023e2e2c7..1787c55d21d6 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -104,7 +104,7 @@ static int set_rcvarray_entry(struct file *, unsigned long, u32, static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, - unsigned long) __maybe_unused; + unsigned long); static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *, u32); static int mmu_rb_insert_by_addr(struct rb_root *, struct mmu_rb_node *); @@ -683,7 +683,70 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, unsigned long start, unsigned long end, enum mmu_call_types type) { - /* Stub for now */ + struct hfi1_filedata *fd = container_of(mn, struct hfi1_filedata, mn); + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct rb_root *root = &fd->tid_rb_root; + struct mmu_rb_node *node; + unsigned long addr = start; + + spin_lock(&fd->rb_lock); + while (addr < end) { + node = mmu_rb_search_by_addr(root, addr); + + if (!node) { + /* + * Didn't find a node at this address. However, the + * range could be bigger than what we have registered + * so we have to keep looking. + */ + addr += PAGE_SIZE; + continue; + } + + /* + * The next address to be looked up is computed based + * on the node's starting address. This is due to the + * fact that the range where we start might be in the + * middle of the node's buffer so simply incrementing + * the address by the node's size would result is a + * bad address. + */ + addr = node->virt + (node->npages * PAGE_SIZE); + if (node->freed) + continue; + + node->freed = true; + + spin_lock(&fd->invalid_lock); + if (fd->invalid_tid_idx < uctxt->expected_count) { + fd->invalid_tids[fd->invalid_tid_idx] = + rcventry2tidinfo(node->rcventry - + uctxt->expected_base); + fd->invalid_tids[fd->invalid_tid_idx] |= + EXP_TID_SET(LEN, node->npages); + if (!fd->invalid_tid_idx) { + unsigned long *ev; + + /* + * hfi1_set_uevent_bits() sets a user event flag + * for all processes. Because calling into the + * driver to process TID cache invalidations is + * expensive and TID cache invalidations are + * handled on a per-process basis, we can + * optimize this to set the flag only for the + * process in question. + */ + ev = uctxt->dd->events + + (((uctxt->ctxt - + uctxt->dd->first_user_ctxt) * + HFI1_MAX_SHARED_CTXTS) + fd->subctxt); + set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); + } + fd->invalid_tid_idx++; + } + spin_unlock(&fd->invalid_lock); + } + spin_unlock(&fd->rb_lock); } static inline int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr, -- cgit v1.2.3-59-g8ed1b From 455d7f1ab86b7b1703898c75c4bc01df869da4a6 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:56 -0500 Subject: staging/hfi1: Add TID free/clear function bodies Up to now, the functions which cleared the programmed TID entries and gave PSM the list of invalidated TID entries were just stubs. With this commit, the bodies of these functions are added. This commit is a bit asymmetric as it only contains the free code path. This is done on purpose to help with patch reviews as the programming code path is much longer. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 91 +++++++++++++++++++++++++++++--- 1 file changed, 85 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 1787c55d21d6..776ce003248e 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -120,10 +120,8 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *, static int program_rcvarray(struct file *, unsigned long, struct tid_group *, struct tid_pageset *, unsigned, u16, struct page **, u32 *, unsigned *, unsigned *) __maybe_unused; -static int unprogram_rcvarray(struct file *, u32, - struct tid_group **) __maybe_unused; -static void clear_tid_node(struct hfi1_filedata *, u16, - struct mmu_rb_node *) __maybe_unused; +static int unprogram_rcvarray(struct file *, u32, struct tid_group **); +static void clear_tid_node(struct hfi1_filedata *, u16, struct mmu_rb_node *); static inline u32 rcventry2tidinfo(u32 rcventry) { @@ -264,6 +262,7 @@ int hfi1_user_exp_rcv_init(struct file *fp) * Make sure that we set the tid counts only after successful * init. */ + spin_lock(&fd->tid_lock); if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) { u16 remainder; @@ -274,6 +273,7 @@ int hfi1_user_exp_rcv_init(struct file *fp) } else { fd->tid_limit = uctxt->expected_count; } + spin_unlock(&fd->tid_lock); done: return ret; } @@ -346,12 +346,91 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo) { - return -EINVAL; + int ret = 0; + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + u32 *tidinfo; + unsigned tididx; + + tidinfo = kcalloc(tinfo->tidcnt, sizeof(*tidinfo), GFP_KERNEL); + if (!tidinfo) + return -ENOMEM; + + if (copy_from_user(tidinfo, (void __user *)(unsigned long) + tinfo->tidlist, sizeof(tidinfo[0]) * + tinfo->tidcnt)) { + ret = -EFAULT; + goto done; + } + + mutex_lock(&uctxt->exp_lock); + for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { + ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL); + if (ret) { + hfi1_cdbg(TID, "Failed to unprogram rcv array %d", + ret); + break; + } + } + spin_lock(&fd->tid_lock); + fd->tid_used -= tididx; + spin_unlock(&fd->tid_lock); + tinfo->tidcnt = tididx; + mutex_unlock(&uctxt->exp_lock); +done: + kfree(tidinfo); + return ret; } int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo) { - return -EINVAL; + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + unsigned long *ev = uctxt->dd->events + + (((uctxt->ctxt - uctxt->dd->first_user_ctxt) * + HFI1_MAX_SHARED_CTXTS) + fd->subctxt); + u32 *array; + int ret = 0; + + if (!fd->invalid_tids) + return -EINVAL; + + /* + * copy_to_user() can sleep, which will leave the invalid_lock + * locked and cause the MMU notifier to be blocked on the lock + * for a long time. + * Copy the data to a local buffer so we can release the lock. + */ + array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL); + if (!array) + return -EFAULT; + + spin_lock(&fd->invalid_lock); + if (fd->invalid_tid_idx) { + memcpy(array, fd->invalid_tids, sizeof(*array) * + fd->invalid_tid_idx); + memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) * + fd->invalid_tid_idx); + tinfo->tidcnt = fd->invalid_tid_idx; + fd->invalid_tid_idx = 0; + /* + * Reset the user flag while still holding the lock. + * Otherwise, PSM can miss events. + */ + clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); + } else { + tinfo->tidcnt = 0; + } + spin_unlock(&fd->invalid_lock); + + if (tinfo->tidcnt) { + if (copy_to_user((void __user *)tinfo->tidlist, + array, sizeof(*array) * tinfo->tidcnt)) + ret = -EFAULT; + } + kfree(array); + + return ret; } static u32 find_phys_blocks(struct page **pages, unsigned npages, -- cgit v1.2.3-59-g8ed1b From 7e7a436ecb6e703a232df0613b5f24accbe3d7d2 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:57 -0500 Subject: staging/hfi1: Add TID entry program function body The previous patch in the series added the free/invalidate function bodies. Now, it's time for the programming side. This large function takes the user's buffer, breaks it up into manageable chunks, allocates enough RcvArray groups and programs the chunks into the RcvArray entries in the hardware. With this function, the TID caching functionality is implemented. However, it is still unused. The switch will come in a later patch in the series, which will remove the old functionality and switch the driver over to TID caching. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_exp_rcv.c | 263 ++++++++++++++++++++++++++++++- 1 file changed, 259 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 776ce003248e..d33f579675b7 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -97,8 +97,7 @@ struct tid_pageset { static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *, struct rb_root *); -static u32 find_phys_blocks(struct page **, unsigned, - struct tid_pageset *) __maybe_unused; +static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *); static int set_rcvarray_entry(struct file *, unsigned long, u32, struct tid_group *, struct page **, unsigned); static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, @@ -119,7 +118,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *, unsigned long, unsigned long); static int program_rcvarray(struct file *, unsigned long, struct tid_group *, struct tid_pageset *, unsigned, u16, struct page **, - u32 *, unsigned *, unsigned *) __maybe_unused; + u32 *, unsigned *, unsigned *); static int unprogram_rcvarray(struct file *, u32, struct tid_group **); static void clear_tid_node(struct hfi1_filedata *, u16, struct mmu_rb_node *); @@ -339,9 +338,265 @@ static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index) writeq(0, dd->rcvarray_wc + (index * 8)); } +/* + * RcvArray entry allocation for Expected Receives is done by the + * following algorithm: + * + * The context keeps 3 lists of groups of RcvArray entries: + * 1. List of empty groups - tid_group_list + * This list is created during user context creation and + * contains elements which describe sets (of 8) of empty + * RcvArray entries. + * 2. List of partially used groups - tid_used_list + * This list contains sets of RcvArray entries which are + * not completely used up. Another mapping request could + * use some of all of the remaining entries. + * 3. List of full groups - tid_full_list + * This is the list where sets that are completely used + * up go. + * + * An attempt to optimize the usage of RcvArray entries is + * made by finding all sets of physically contiguous pages in a + * user's buffer. + * These physically contiguous sets are further split into + * sizes supported by the receive engine of the HFI. The + * resulting sets of pages are stored in struct tid_pageset, + * which describes the sets as: + * * .count - number of pages in this set + * * .idx - starting index into struct page ** array + * of this set + * + * From this point on, the algorithm deals with the page sets + * described above. The number of pagesets is divided by the + * RcvArray group size to produce the number of full groups + * needed. + * + * Groups from the 3 lists are manipulated using the following + * rules: + * 1. For each set of 8 pagesets, a complete group from + * tid_group_list is taken, programmed, and moved to + * the tid_full_list list. + * 2. For all remaining pagesets: + * 2.1 If the tid_used_list is empty and the tid_group_list + * is empty, stop processing pageset and return only + * what has been programmed up to this point. + * 2.2 If the tid_used_list is empty and the tid_group_list + * is not empty, move a group from tid_group_list to + * tid_used_list. + * 2.3 For each group is tid_used_group, program as much as + * can fit into the group. If the group becomes fully + * used, move it to tid_full_list. + */ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) { - return -EINVAL; + int ret = 0, need_group = 0, pinned; + struct hfi1_filedata *fd = fp->private_data; + struct hfi1_ctxtdata *uctxt = fd->uctxt; + struct hfi1_devdata *dd = uctxt->dd; + unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets, + tididx = 0, mapped, mapped_pages = 0; + unsigned long vaddr = tinfo->vaddr; + struct page **pages = NULL; + u32 *tidlist = NULL; + struct tid_pageset *pagesets = NULL; + + /* Get the number of pages the user buffer spans */ + npages = num_user_pages(vaddr, tinfo->length); + if (!npages) + return -EINVAL; + + if (npages > uctxt->expected_count) { + dd_dev_err(dd, "Expected buffer too big\n"); + return -EINVAL; + } + + /* Verify that access is OK for the user buffer */ + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, + npages * PAGE_SIZE)) { + dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n", + (void *)vaddr, npages); + return -EFAULT; + } + + pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets), + GFP_KERNEL); + if (!pagesets) + return -ENOMEM; + + /* Allocate the array of struct page pointers needed for pinning */ + pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto bail; + } + + /* + * Pin all the pages of the user buffer. If we can't pin all the + * pages, accept the amount pinned so far and program only that. + * User space knows how to deal with partially programmed buffers. + */ + pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages); + if (pinned <= 0) { + ret = pinned; + goto bail; + } + + /* Find sets of physically contiguous pages */ + npagesets = find_phys_blocks(pages, pinned, pagesets); + + /* + * We don't need to access this under a lock since tid_used is per + * process and the same process cannot be in hfi1_user_exp_rcv_clear() + * and hfi1_user_exp_rcv_setup() at the same time. + */ + spin_lock(&fd->tid_lock); + if (fd->tid_used + npagesets > fd->tid_limit) + pageset_count = fd->tid_limit - fd->tid_used; + else + pageset_count = npagesets; + spin_unlock(&fd->tid_lock); + + if (!pageset_count) + goto bail; + + ngroups = pageset_count / dd->rcv_entries.group_size; + tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); + if (!tidlist) { + ret = -ENOMEM; + goto nomem; + } + + tididx = 0; + + /* + * From this point on, we are going to be using shared (between master + * and subcontexts) context resources. We need to take the lock. + */ + mutex_lock(&uctxt->exp_lock); + /* + * The first step is to program the RcvArray entries which are complete + * groups. + */ + while (ngroups && uctxt->tid_group_list.count) { + struct tid_group *grp = + tid_group_pop(&uctxt->tid_group_list); + + ret = program_rcvarray(fp, vaddr, grp, pagesets, + pageidx, dd->rcv_entries.group_size, + pages, tidlist, &tididx, &mapped); + /* + * If there was a failure to program the RcvArray + * entries for the entire group, reset the grp fields + * and add the grp back to the free group list. + */ + if (ret <= 0) { + tid_group_add_tail(grp, &uctxt->tid_group_list); + hfi1_cdbg(TID, + "Failed to program RcvArray group %d", ret); + goto unlock; + } + + tid_group_add_tail(grp, &uctxt->tid_full_list); + ngroups--; + pageidx += ret; + mapped_pages += mapped; + } + + while (pageidx < pageset_count) { + struct tid_group *grp, *ptr; + /* + * If we don't have any partially used tid groups, check + * if we have empty groups. If so, take one from there and + * put in the partially used list. + */ + if (!uctxt->tid_used_list.count || need_group) { + if (!uctxt->tid_group_list.count) + goto unlock; + + grp = tid_group_pop(&uctxt->tid_group_list); + tid_group_add_tail(grp, &uctxt->tid_used_list); + need_group = 0; + } + /* + * There is an optimization opportunity here - instead of + * fitting as many page sets as we can, check for a group + * later on in the list that could fit all of them. + */ + list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list, + list) { + unsigned use = min_t(unsigned, pageset_count - pageidx, + grp->size - grp->used); + + ret = program_rcvarray(fp, vaddr, grp, pagesets, + pageidx, use, pages, tidlist, + &tididx, &mapped); + if (ret < 0) { + hfi1_cdbg(TID, + "Failed to program RcvArray entries %d", + ret); + ret = -EFAULT; + goto unlock; + } else if (ret > 0) { + if (grp->used == grp->size) + tid_group_move(grp, + &uctxt->tid_used_list, + &uctxt->tid_full_list); + pageidx += ret; + mapped_pages += mapped; + need_group = 0; + /* Check if we are done so we break out early */ + if (pageidx >= pageset_count) + break; + } else if (WARN_ON(ret == 0)) { + /* + * If ret is 0, we did not program any entries + * into this group, which can only happen if + * we've screwed up the accounting somewhere. + * Warn and try to continue. + */ + need_group = 1; + } + } + } +unlock: + mutex_unlock(&uctxt->exp_lock); +nomem: + hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, + mapped_pages, ret); + if (tididx) { + spin_lock(&fd->tid_lock); + fd->tid_used += tididx; + spin_unlock(&fd->tid_lock); + tinfo->tidcnt = tididx; + tinfo->length = mapped_pages * PAGE_SIZE; + + if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist, + tidlist, sizeof(tidlist[0]) * tididx)) { + /* + * On failure to copy to the user level, we need to undo + * everything done so far so we don't leak resources. + */ + tinfo->tidlist = (unsigned long)&tidlist; + hfi1_user_exp_rcv_clear(fp, tinfo); + tinfo->tidlist = 0; + ret = -EFAULT; + goto bail; + } + } + + /* + * If not everything was mapped (due to insufficient RcvArray entries, + * for example), unpin all unmapped pages so we can pin them nex time. + */ + if (mapped_pages != pinned) + hfi1_release_user_pages(&pages[mapped_pages], + pinned - mapped_pages, + false); +bail: + kfree(pagesets); + kfree(pages); + kfree(tidlist); + return ret > 0 ? 0 : ret; } int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo) -- cgit v1.2.3-59-g8ed1b From 0b091fb32c5ae4737bf606a313e6625dad34bbc6 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 5 Feb 2016 11:57:58 -0500 Subject: staging/hfi1: Enable TID caching feature This commit "flips the switch" on the TID caching feature implemented in this patch series. As well as enabling the new feature by tying the new function with the PSM API, it also cleans up the old unneeded code, data structure members, and variables. Due to difference in operation and information, the tracing functions related to expected receives had to be changed. This patch include these changes. The tracing function changes could not be split into a separate commit without including both tracing variants at the same time. This would have caused other complications and ugliness. Signed-off-by: Mitko Haralanov Reviewed-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 448 +++---------------------------- drivers/staging/rdma/hfi1/hfi.h | 14 - drivers/staging/rdma/hfi1/init.c | 3 - drivers/staging/rdma/hfi1/trace.h | 132 +++++---- drivers/staging/rdma/hfi1/user_exp_rcv.c | 12 + drivers/staging/rdma/hfi1/user_pages.c | 14 - include/uapi/rdma/hfi/hfi1_user.h | 7 +- 7 files changed, 132 insertions(+), 498 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index b0348263b901..d36588934f99 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -96,9 +96,6 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); static int vma_fault(struct vm_area_struct *, struct vm_fault *); -static int exp_tid_setup(struct file *, struct hfi1_tid_info *); -static int exp_tid_free(struct file *, struct hfi1_tid_info *); -static void unlock_exp_tids(struct hfi1_ctxtdata *); static const struct file_operations hfi1_file_ops = { .owner = THIS_MODULE, @@ -188,6 +185,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, struct hfi1_cmd cmd; struct hfi1_user_info uinfo; struct hfi1_tid_info tinfo; + unsigned long addr; ssize_t consumed = 0, copy = 0, ret = 0; void *dest = NULL; __u64 user_val = 0; @@ -219,6 +217,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, break; case HFI1_CMD_TID_UPDATE: case HFI1_CMD_TID_FREE: + case HFI1_CMD_TID_INVAL_READ: copy = sizeof(tinfo); dest = &tinfo; break; @@ -241,7 +240,6 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, must_be_root = 1; /* validate user */ copy = 0; break; - case HFI1_CMD_TID_INVAL_READ: default: ret = -EINVAL; goto bail; @@ -295,9 +293,8 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, sc_return_credits(uctxt->sc); break; case HFI1_CMD_TID_UPDATE: - ret = exp_tid_setup(fp, &tinfo); + ret = hfi1_user_exp_rcv_setup(fp, &tinfo); if (!ret) { - unsigned long addr; /* * Copy the number of tidlist entries we used * and the length of the buffer we registered. @@ -312,8 +309,25 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, ret = -EFAULT; } break; + case HFI1_CMD_TID_INVAL_READ: + ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); + if (ret) + break; + addr = (unsigned long)cmd.addr + + offsetof(struct hfi1_tid_info, tidcnt); + if (copy_to_user((void __user *)addr, &tinfo.tidcnt, + sizeof(tinfo.tidcnt))) + ret = -EFAULT; + break; case HFI1_CMD_TID_FREE: - ret = exp_tid_free(fp, &tinfo); + ret = hfi1_user_exp_rcv_clear(fp, &tinfo); + if (ret) + break; + addr = (unsigned long)cmd.addr + + offsetof(struct hfi1_tid_info, tidcnt); + if (copy_to_user((void __user *)addr, &tinfo.tidcnt, + sizeof(tinfo.tidcnt))) + ret = -EFAULT; break; case HFI1_CMD_RECV_CTRL: ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); @@ -779,12 +793,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) uctxt->pionowait = 0; uctxt->event_flags = 0; - hfi1_clear_tids(uctxt); + hfi1_user_exp_rcv_free(fdata); hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); - if (uctxt->tid_pg_list) - unlock_exp_tids(uctxt); - hfi1_stats.sps_ctxts--; dd->freectxts++; mutex_unlock(&hfi1_mutex); @@ -1107,7 +1118,7 @@ static int user_init(struct file *fp) ret = wait_event_interruptible(uctxt->wait, !test_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags)); - goto done; + goto expected; } /* initialize poll variables... */ @@ -1154,8 +1165,18 @@ static int user_init(struct file *fp) clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags); wake_up(&uctxt->wait); } - ret = 0; +expected: + /* + * Expected receive has to be setup for all processes (including + * shared contexts). However, it has to be done after the master + * context has been fully configured as it depends on the + * eager/expected split of the RcvArray entries. + * Setting it up here ensures that the subcontexts will be waiting + * (due to the above wait_event_interruptible() until the master + * is setup. + */ + ret = hfi1_user_exp_rcv_init(fp); done: return ret; } @@ -1225,46 +1246,6 @@ static int setup_ctxt(struct file *fp) if (ret) goto done; } - /* Setup Expected Rcv memories */ - uctxt->tid_pg_list = vzalloc(uctxt->expected_count * - sizeof(struct page **)); - if (!uctxt->tid_pg_list) { - ret = -ENOMEM; - goto done; - } - uctxt->physshadow = vzalloc(uctxt->expected_count * - sizeof(*uctxt->physshadow)); - if (!uctxt->physshadow) { - ret = -ENOMEM; - goto done; - } - /* allocate expected TID map and initialize the cursor */ - atomic_set(&uctxt->tidcursor, 0); - uctxt->numtidgroups = uctxt->expected_count / - dd->rcv_entries.group_size; - uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG + - !!(uctxt->numtidgroups % BITS_PER_LONG); - uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt * - sizeof(*uctxt->tidusemap), - GFP_KERNEL, uctxt->numa_id); - if (!uctxt->tidusemap) { - ret = -ENOMEM; - goto done; - } - /* - * In case that the number of groups is not a multiple of - * 64 (the number of groups in a tidusemap element), mark - * the extra ones as used. This will effectively make them - * permanently used and should never be assigned. Otherwise, - * the code which checks how many free groups we have will - * get completely confused about the state of the bits. - */ - if (uctxt->numtidgroups % BITS_PER_LONG) - uctxt->tidusemap[uctxt->tidmapcnt - 1] = - ~((1ULL << (uctxt->numtidgroups % - BITS_PER_LONG)) - 1); - trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0, - uctxt->tidusemap, uctxt->tidmapcnt); } ret = hfi1_user_sdma_alloc_queues(uctxt, fp); if (ret) @@ -1503,367 +1484,6 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt, return 0; } -#define num_user_pages(vaddr, len) \ - (1 + (((((unsigned long)(vaddr) + \ - (unsigned long)(len) - 1) & PAGE_MASK) - \ - ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT)) - -/** - * tzcnt - count the number of trailing zeros in a 64bit value - * @value: the value to be examined - * - * Returns the number of trailing least significant zeros in the - * the input value. If the value is zero, return the number of - * bits of the value. - */ -static inline u8 tzcnt(u64 value) -{ - return value ? __builtin_ctzl(value) : sizeof(value) * 8; -} - -static inline unsigned num_free_groups(unsigned long map, u16 *start) -{ - unsigned free; - u16 bitidx = *start; - - if (bitidx >= BITS_PER_LONG) - return 0; - /* "Turn off" any bits set before our bit index */ - map &= ~((1ULL << bitidx) - 1); - free = tzcnt(map) - bitidx; - while (!free && bitidx < BITS_PER_LONG) { - /* Zero out the last set bit so we look at the rest */ - map &= ~(1ULL << bitidx); - /* - * Account for the previously checked bits and advance - * the bit index. We don't have to check for bitidx - * getting bigger than BITS_PER_LONG here as it would - * mean extra instructions that we don't need. If it - * did happen, it would push free to a negative value - * which will break the loop. - */ - free = tzcnt(map) - ++bitidx; - } - *start = bitidx; - return free; -} - -static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo) -{ - int ret = 0; - struct hfi1_filedata *fd = fp->private_data; - struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct hfi1_devdata *dd = uctxt->dd; - unsigned tid, mapped = 0, npages, ngroups, exp_groups, - tidpairs = uctxt->expected_count / 2; - struct page **pages; - unsigned long vaddr, tidmap[uctxt->tidmapcnt]; - dma_addr_t *phys; - u32 tidlist[tidpairs], pairidx = 0, tidcursor; - u16 useidx, idx, bitidx, tidcnt = 0; - - vaddr = tinfo->vaddr; - - if (offset_in_page(vaddr)) { - ret = -EINVAL; - goto bail; - } - - npages = num_user_pages(vaddr, tinfo->length); - if (!npages) { - ret = -EINVAL; - goto bail; - } - if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, - npages * PAGE_SIZE)) { - dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n", - (void *)vaddr, npages); - ret = -EFAULT; - goto bail; - } - - memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt); - memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs); - - exp_groups = uctxt->expected_count / dd->rcv_entries.group_size; - /* which group set do we look at first? */ - tidcursor = atomic_read(&uctxt->tidcursor); - useidx = (tidcursor >> 16) & 0xffff; - bitidx = tidcursor & 0xffff; - - /* - * Keep going until we've mapped all pages or we've exhausted all - * RcvArray entries. - * This iterates over the number of tidmaps + 1 - * (idx <= uctxt->tidmapcnt) so we check the bitmap which we - * started from one more time for any free bits before the - * starting point bit. - */ - for (mapped = 0, idx = 0; - mapped < npages && idx <= uctxt->tidmapcnt;) { - u64 i, offset = 0; - unsigned free, pinned, pmapped = 0, bits_used; - u16 grp; - - /* - * "Reserve" the needed group bits under lock so other - * processes can't step in the middle of it. Once - * reserved, we don't need the lock anymore since we - * are guaranteed the groups. - */ - mutex_lock(&uctxt->exp_lock); - if (uctxt->tidusemap[useidx] == -1ULL || - bitidx >= BITS_PER_LONG) { - /* no free groups in the set, use the next */ - useidx = (useidx + 1) % uctxt->tidmapcnt; - idx++; - bitidx = 0; - mutex_unlock(&uctxt->exp_lock); - continue; - } - ngroups = ((npages - mapped) / dd->rcv_entries.group_size) + - !!((npages - mapped) % dd->rcv_entries.group_size); - - /* - * If we've gotten here, the current set of groups does have - * one or more free groups. - */ - free = num_free_groups(uctxt->tidusemap[useidx], &bitidx); - if (!free) { - /* - * Despite the check above, free could still come back - * as 0 because we don't check the entire bitmap but - * we start from bitidx. - */ - mutex_unlock(&uctxt->exp_lock); - continue; - } - bits_used = min(free, ngroups); - tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx; - uctxt->tidusemap[useidx] |= tidmap[useidx]; - mutex_unlock(&uctxt->exp_lock); - - /* - * At this point, we know where in the map we have free bits. - * properly offset into the various "shadow" arrays and compute - * the RcvArray entry index. - */ - offset = ((useidx * BITS_PER_LONG) + bitidx) * - dd->rcv_entries.group_size; - pages = uctxt->tid_pg_list + offset; - phys = uctxt->physshadow + offset; - tid = uctxt->expected_base + offset; - - /* Calculate how many pages we can pin based on free bits */ - pinned = min((bits_used * dd->rcv_entries.group_size), - (npages - mapped)); - /* - * Now that we know how many free RcvArray entries we have, - * we can pin that many user pages. - */ - ret = hfi1_acquire_user_pages(vaddr + (mapped * PAGE_SIZE), - pinned, true, pages); - if (ret) { - /* - * We can't continue because the pages array won't be - * initialized. This should never happen, - * unless perhaps the user has mpin'ed the pages - * themselves. - */ - dd_dev_info(dd, - "Failed to lock addr %p, %u pages: errno %d\n", - (void *) vaddr, pinned, -ret); - /* - * Let go of the bits that we reserved since we are not - * going to use them. - */ - mutex_lock(&uctxt->exp_lock); - uctxt->tidusemap[useidx] &= - ~(((1ULL << bits_used) - 1) << bitidx); - mutex_unlock(&uctxt->exp_lock); - goto done; - } - /* - * How many groups do we need based on how many pages we have - * pinned? - */ - ngroups = (pinned / dd->rcv_entries.group_size) + - !!(pinned % dd->rcv_entries.group_size); - /* - * Keep programming RcvArray entries for all the free - * groups. - */ - for (i = 0, grp = 0; grp < ngroups; i++, grp++) { - unsigned j; - u32 pair_size = 0, tidsize; - /* - * This inner loop will program an entire group or the - * array of pinned pages (which ever limit is hit - * first). - */ - for (j = 0; j < dd->rcv_entries.group_size && - pmapped < pinned; j++, pmapped++, tid++) { - tidsize = PAGE_SIZE; - phys[pmapped] = hfi1_map_page(dd->pcidev, - pages[pmapped], 0, - tidsize, PCI_DMA_FROMDEVICE); - trace_hfi1_exp_rcv_set(uctxt->ctxt, - fd->subctxt, - tid, vaddr, - phys[pmapped], - pages[pmapped]); - /* - * Each RcvArray entry is programmed with one - * page * worth of memory. This will handle - * the 8K MTU as well as anything smaller - * due to the fact that both entries in the - * RcvTidPair are programmed with a page. - * PSM currently does not handle anything - * bigger than 8K MTU, so should we even worry - * about 10K here? - */ - hfi1_put_tid(dd, tid, PT_EXPECTED, - phys[pmapped], - ilog2(tidsize >> PAGE_SHIFT) + 1); - pair_size += tidsize >> PAGE_SHIFT; - EXP_TID_RESET(tidlist[pairidx], LEN, pair_size); - if (!(tid % 2)) { - tidlist[pairidx] |= - EXP_TID_SET(IDX, - (tid - uctxt->expected_base) - / 2); - tidlist[pairidx] |= - EXP_TID_SET(CTRL, 1); - tidcnt++; - } else { - tidlist[pairidx] |= - EXP_TID_SET(CTRL, 2); - pair_size = 0; - pairidx++; - } - } - /* - * We've programmed the entire group (or as much of the - * group as we'll use. Now, it's time to push it out... - */ - flush_wc(); - } - mapped += pinned; - atomic_set(&uctxt->tidcursor, - (((useidx & 0xffffff) << 16) | - ((bitidx + bits_used) & 0xffffff))); - } - trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0, uctxt->tidusemap, - uctxt->tidmapcnt); - -done: - /* If we've mapped anything, copy relevant info to user */ - if (mapped) { - if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist, - tidlist, sizeof(tidlist[0]) * tidcnt)) { - ret = -EFAULT; - goto done; - } - /* copy TID info to user */ - if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap, - tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt)) - ret = -EFAULT; - } -bail: - /* - * Calculate mapped length. New Exp TID protocol does not "unwind" and - * report an error if it can't map the entire buffer. It just reports - * the length that was mapped. - */ - tinfo->length = mapped * PAGE_SIZE; - tinfo->tidcnt = tidcnt; - return ret; -} - -static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo) -{ - struct hfi1_filedata *fd = fp->private_data; - struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct hfi1_devdata *dd = uctxt->dd; - unsigned long tidmap[uctxt->tidmapcnt]; - struct page **pages; - dma_addr_t *phys; - u16 idx, bitidx, tid; - int ret = 0; - - if (copy_from_user(&tidmap, (void __user *)(unsigned long) - tinfo->tidmap, - sizeof(tidmap[0]) * uctxt->tidmapcnt)) { - ret = -EFAULT; - goto done; - } - for (idx = 0; idx < uctxt->tidmapcnt; idx++) { - unsigned long map; - - bitidx = 0; - if (!tidmap[idx]) - continue; - map = tidmap[idx]; - while ((bitidx = tzcnt(map)) < BITS_PER_LONG) { - int i, pcount = 0; - struct page *pshadow[dd->rcv_entries.group_size]; - unsigned offset = ((idx * BITS_PER_LONG) + bitidx) * - dd->rcv_entries.group_size; - - pages = uctxt->tid_pg_list + offset; - phys = uctxt->physshadow + offset; - tid = uctxt->expected_base + offset; - for (i = 0; i < dd->rcv_entries.group_size; - i++, tid++) { - if (pages[i]) { - hfi1_put_tid(dd, tid, PT_INVALID, - 0, 0); - trace_hfi1_exp_rcv_free(uctxt->ctxt, - fd->subctxt, - tid, phys[i], - pages[i]); - pci_unmap_page(dd->pcidev, phys[i], - PAGE_SIZE, PCI_DMA_FROMDEVICE); - pshadow[pcount] = pages[i]; - pages[i] = NULL; - pcount++; - phys[i] = 0; - } - } - flush_wc(); - hfi1_release_user_pages(pshadow, pcount, true); - clear_bit(bitidx, &uctxt->tidusemap[idx]); - map &= ~(1ULL<ctxt, fd->subctxt, 1, uctxt->tidusemap, - uctxt->tidmapcnt); -done: - return ret; -} - -static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt) -{ - struct hfi1_devdata *dd = uctxt->dd; - unsigned tid; - - dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n", - uctxt->ctxt); - for (tid = 0; tid < uctxt->expected_count; tid++) { - struct page *p = uctxt->tid_pg_list[tid]; - dma_addr_t phys; - - if (!p) - continue; - - phys = uctxt->physshadow[tid]; - uctxt->physshadow[tid] = 0; - uctxt->tid_pg_list[tid] = NULL; - pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE); - hfi1_release_user_pages(&p, 1, true); - } -} - static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt, u16 pkey) { diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 53f464cc40ef..62157cc34727 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -240,18 +240,6 @@ struct hfi1_ctxtdata { u32 expected_count; /* index of first expected TID entry. */ u32 expected_base; - /* cursor into the exp group sets */ - atomic_t tidcursor; - /* number of exp TID groups assigned to the ctxt */ - u16 numtidgroups; - /* size of exp TID group fields in tidusemap */ - u16 tidmapcnt; - /* exp TID group usage bitfield array */ - unsigned long *tidusemap; - /* pinned pages for exp sends, allocated at open */ - struct page **tid_pg_list; - /* dma handles for exp tid pages */ - dma_addr_t *physshadow; struct exp_tid_set tid_group_list; struct exp_tid_set tid_used_list; @@ -1660,8 +1648,6 @@ int get_platform_config_field(struct hfi1_devdata *dd, enum platform_config_table_type_encoding table_type, int table_index, int field_index, u32 *data, u32 len); -dma_addr_t hfi1_map_page(struct pci_dev *, struct page *, unsigned long, - size_t, int); const char *get_unit_name(int unit); /* diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 72c51431b2bf..00f52e815242 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -962,13 +962,10 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) kfree(rcd->egrbufs.buffers); sc_free(rcd->sc); - vfree(rcd->physshadow); - vfree(rcd->tid_pg_list); vfree(rcd->user_event_mask); vfree(rcd->subctxt_uregbase); vfree(rcd->subctxt_rcvegrbuf); vfree(rcd->subctxt_rcvhdr_base); - kfree(rcd->tidusemap); kfree(rcd->opstats); kfree(rcd); } diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index 86c12ebfd4f0..1e435675335f 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -153,92 +153,130 @@ TRACE_EVENT(hfi1_receive_interrupt, ) ); -const char *print_u64_array(struct trace_seq *, u64 *, int); +TRACE_EVENT(hfi1_exp_tid_reg, + TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, + u32 npages, unsigned long va, unsigned long pa, + dma_addr_t dma), + TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), + TP_STRUCT__entry( + __field(unsigned, ctxt) + __field(u16, subctxt) + __field(u32, rarr) + __field(u32, npages) + __field(unsigned long, va) + __field(unsigned long, pa) + __field(dma_addr_t, dma) + ), + TP_fast_assign( + __entry->ctxt = ctxt; + __entry->subctxt = subctxt; + __entry->rarr = rarr; + __entry->npages = npages; + __entry->va = va; + __entry->pa = pa; + __entry->dma = dma; + ), + TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx", + __entry->ctxt, + __entry->subctxt, + __entry->rarr, + __entry->npages, + __entry->pa, + __entry->va, + __entry->dma + ) + ); -TRACE_EVENT(hfi1_exp_tid_map, - TP_PROTO(unsigned ctxt, u16 subctxt, int dir, - unsigned long *maps, u16 count), - TP_ARGS(ctxt, subctxt, dir, maps, count), +TRACE_EVENT(hfi1_exp_tid_unreg, + TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages, + unsigned long va, unsigned long pa, dma_addr_t dma), + TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), TP_STRUCT__entry( __field(unsigned, ctxt) __field(u16, subctxt) - __field(int, dir) - __field(u16, count) - __dynamic_array(unsigned long, maps, sizeof(*maps) * count) + __field(u32, rarr) + __field(u32, npages) + __field(unsigned long, va) + __field(unsigned long, pa) + __field(dma_addr_t, dma) ), TP_fast_assign( __entry->ctxt = ctxt; __entry->subctxt = subctxt; - __entry->dir = dir; - __entry->count = count; - memcpy(__get_dynamic_array(maps), maps, - sizeof(*maps) * count); + __entry->rarr = rarr; + __entry->npages = npages; + __entry->va = va; + __entry->pa = pa; + __entry->dma = dma; ), - TP_printk("[%3u:%02u] %s tidmaps %s", + TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx", __entry->ctxt, __entry->subctxt, - (__entry->dir ? ">" : "<"), - print_u64_array(p, __get_dynamic_array(maps), - __entry->count) + __entry->rarr, + __entry->npages, + __entry->pa, + __entry->va, + __entry->dma ) ); -TRACE_EVENT(hfi1_exp_rcv_set, - TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid, - unsigned long vaddr, u64 phys_addr, void *page), - TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page), +TRACE_EVENT(hfi1_exp_tid_inval, + TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr, + u32 npages, dma_addr_t dma), + TP_ARGS(ctxt, subctxt, va, rarr, npages, dma), TP_STRUCT__entry( __field(unsigned, ctxt) __field(u16, subctxt) - __field(u32, tid) - __field(unsigned long, vaddr) - __field(u64, phys_addr) - __field(void *, page) + __field(unsigned long, va) + __field(u32, rarr) + __field(u32, npages) + __field(dma_addr_t, dma) ), TP_fast_assign( __entry->ctxt = ctxt; __entry->subctxt = subctxt; - __entry->tid = tid; - __entry->vaddr = vaddr; - __entry->phys_addr = phys_addr; - __entry->page = page; + __entry->va = va; + __entry->rarr = rarr; + __entry->npages = npages; + __entry->dma = dma; ), - TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p", + TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx", __entry->ctxt, __entry->subctxt, - __entry->tid, - __entry->vaddr, - __entry->phys_addr, - __entry->page + __entry->rarr, + __entry->npages, + __entry->va, + __entry->dma ) ); -TRACE_EVENT(hfi1_exp_rcv_free, - TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid, - unsigned long phys, void *page), - TP_ARGS(ctxt, subctxt, tid, phys, page), +TRACE_EVENT(hfi1_mmu_invalidate, + TP_PROTO(unsigned ctxt, u16 subctxt, const char *type, + unsigned long start, unsigned long end), + TP_ARGS(ctxt, subctxt, type, start, end), TP_STRUCT__entry( __field(unsigned, ctxt) __field(u16, subctxt) - __field(u32, tid) - __field(unsigned long, phys) - __field(void *, page) + __string(type, type) + __field(unsigned long, start) + __field(unsigned long, end) ), TP_fast_assign( __entry->ctxt = ctxt; __entry->subctxt = subctxt; - __entry->tid = tid; - __entry->phys = phys; - __entry->page = page; + __assign_str(type, type); + __entry->start = start; + __entry->end = end; ), - TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p", + TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx", __entry->ctxt, __entry->subctxt, - __entry->tid, - __entry->phys, - __entry->page + __get_str(type), + __entry->start, + __entry->end ) ); + #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_tx diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index d33f579675b7..79612a2bd07d 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -902,6 +902,8 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, return -EFAULT; } hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); + trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, + npages, node->virt, node->phys, phys); return 0; } @@ -947,6 +949,10 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; + trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, + node->npages, node->virt, node->phys, + node->dma_addr); + hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0); /* * Make sure device has seen the write before we unpin the @@ -1023,6 +1029,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, struct mmu_rb_node *node; unsigned long addr = start; + trace_hfi1_mmu_invalidate(uctxt->ctxt, fd->subctxt, mmu_types[type], + start, end); + spin_lock(&fd->rb_lock); while (addr < end) { node = mmu_rb_search_by_addr(root, addr); @@ -1049,6 +1058,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, if (node->freed) continue; + trace_hfi1_exp_tid_inval(uctxt->ctxt, fd->subctxt, node->virt, + node->rcventry, node->npages, + node->dma_addr); node->freed = true; spin_lock(&fd->invalid_lock); diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index 692de658f0dc..1854c0c7ce7e 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -54,20 +54,6 @@ #include "hfi.h" -/** - * hfi1_map_page - a safety wrapper around pci_map_page() - * - */ -dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) -{ - dma_addr_t phys; - - phys = pci_map_page(hwdev, page, offset, size, direction); - - return phys; -} - int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, struct page **pages) { diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 92be2e373019..a533cecab14f 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -66,7 +66,7 @@ * The major version changes when data structures change in an incompatible * way. The driver must be the same for initialization to succeed. */ -#define HFI1_USER_SWMAJOR 4 +#define HFI1_USER_SWMAJOR 5 /* * Minor version differences are always compatible @@ -241,11 +241,6 @@ struct hfi1_tid_info { __u32 tidcnt; /* length of transfer buffer programmed by this request */ __u32 length; - /* - * pointer to bitmap of TIDs used for this call; - * checked for being large enough at open - */ - __u64 tidmap; }; struct hfi1_cmd { -- cgit v1.2.3-59-g8ed1b From a1edc18a484285fcaf4bb73241f573ccb8f06fbc Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 11 Jan 2016 13:04:32 -0500 Subject: staging/hfi1: add dd_dev_dbg To be used in future patches add dd_dev_dbg. dd_* functions properly decode the hfi1_devdata structure used throughout the driver Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 62157cc34727..52dcc87689f1 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1804,6 +1804,10 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) dev_info(&(dd)->pcidev->dev, "%s: " fmt, \ get_unit_name((dd)->unit), ##__VA_ARGS__) +#define dd_dev_dbg(dd, fmt, ...) \ + dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \ + get_unit_name((dd)->unit), ##__VA_ARGS__) + #define hfi1_dev_porterr(dd, port, fmt, ...) \ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ get_unit_name((dd)->unit), (dd)->unit, (port), \ -- cgit v1.2.3-59-g8ed1b From a06e825a13c363da31936d04645e94b99aeb34e2 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Mon, 11 Jan 2016 13:04:33 -0500 Subject: staging/hfi1: set Gen3 half-swing for integrated devices Correctly set half-swing for integrated devices. A0 needs all fields set for CcePcieCtrl. B0 and later only need a few fields set. Reviewed-by: Stuart Summers Signed-off-by: Dean Luick Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip_registers.h | 11 ++++ drivers/staging/rdma/hfi1/pcie.c | 82 ++++++++++++++++++++++++++++-- 2 files changed, 89 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h index 701e9e1012a6..014d7a609ea0 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/staging/rdma/hfi1/chip_registers.h @@ -551,6 +551,17 @@ #define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008) #define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull #define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400) +#define CCE_PCIE_CTRL (CCE + 0x0000000000C0) +#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull +#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0 +#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull +#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2 +#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8 +#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9 +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12 +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13 #define CCE_REVISION (CCE + 0x000000000000) #define CCE_REVISION2 (CCE + 0x000000000008) #define CCE_REVISION2_HFI_ID_MASK 0x1ull diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 8317b07d722a..9917faff823c 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -866,6 +866,83 @@ static void arm_gasket_logic(struct hfi1_devdata *dd) read_csr(dd, ASIC_PCIE_SD_HOST_CMD); } +/* + * CCE_PCIE_CTRL long name helpers + * We redefine these shorter macros to use in the code while leaving + * chip_registers.h to be autogenerated from the hardware spec. + */ +#define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK +#define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT +#define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK +#define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT +#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT +#define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT +#define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK +#define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT +#define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK +#define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT + + /* + * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C). + */ +static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname) +{ + u64 pcie_ctrl; + u64 xmt_margin; + u64 xmt_margin_oe; + u64 lane_delay; + u64 lane_bundle; + + pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL); + + /* + * For Discrete, use full-swing. + * - PCIe TX defaults to full-swing. + * Leave this register as default. + * For Integrated, use half-swing + * - Copy xmt_margin and xmt_margin_oe + * from Gen1/Gen2 to Gen3. + */ + if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */ + /* extract initial fields */ + xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT) + & MARGIN_GEN1_GEN2_MASK; + xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT) + & MARGIN_G1_G2_OVERWRITE_MASK; + lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK; + lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT) + & LANE_BUNDLE_MASK; + + /* + * For A0, EFUSE values are not set. Override with the + * correct values. + */ + if (is_ax(dd)) { + /* + * xmt_margin and OverwiteEnabel should be the + * same for Gen1/Gen2 and Gen3 + */ + xmt_margin = 0x5; + xmt_margin_oe = 0x1; + lane_delay = 0xF; /* Delay 240ns. */ + lane_bundle = 0x0; /* Set to 1 lane. */ + } + + /* overwrite existing values */ + pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT) + | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT) + | (xmt_margin << MARGIN_SHIFT) + | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT) + | (lane_delay << LANE_DELAY_SHIFT) + | (lane_bundle << LANE_BUNDLE_SHIFT); + + write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl); + } + + dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n", + fname, pcie_ctrl); +} + /* * Do all the steps needed to transition the PCIe link to Gen3 speed. */ @@ -1064,11 +1141,8 @@ retry: /* * step 5d: program XMT margin - * Right now, leave the default alone. To change, do a - * read-modify-write of: - * CcePcieCtrl.XmtMargin - * CcePcieCtrl.XmitMarginOverwriteEnable */ + write_xmt_margin(dd, __func__); /* step 5e: disable active state power management (ASPM) */ dd_dev_info(dd, "%s: clearing ASPM\n", __func__); -- cgit v1.2.3-59-g8ed1b From 349ac71ffad79281f3c0dc908cd4b6f7d7dbb477 Mon Sep 17 00:00:00 2001 From: "jubin.john@intel.com" Date: Mon, 11 Jan 2016 18:30:52 -0500 Subject: staging/hfi1: Use BIT macro This patch fixes the checkpatch issue: CHECK: Prefer using the BIT macro Use of BIT macro for HDRQ_INCREMENT in chip.h causes a change in format specifier for error message in init.c in order to avoid a build warning. Reviewed-by: Dean Luick Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.h | 48 +++++++++++++++++++------------------- drivers/staging/rdma/hfi1/common.h | 4 ++-- drivers/staging/rdma/hfi1/hfi.h | 22 ++++++++--------- drivers/staging/rdma/hfi1/init.c | 2 +- drivers/staging/rdma/hfi1/mad.c | 4 ++-- drivers/staging/rdma/hfi1/qp.h | 2 +- drivers/staging/rdma/hfi1/qsfp.h | 10 ++++---- drivers/staging/rdma/hfi1/sdma.c | 8 +++---- 8 files changed, 50 insertions(+), 50 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 5b375ddc345d..1368a4455b28 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -242,18 +242,18 @@ #define HCMD_SUCCESS 2 /* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR - error flags */ -#define SPICO_ROM_FAILED (1 << 0) -#define UNKNOWN_FRAME (1 << 1) -#define TARGET_BER_NOT_MET (1 << 2) -#define FAILED_SERDES_INTERNAL_LOOPBACK (1 << 3) -#define FAILED_SERDES_INIT (1 << 4) -#define FAILED_LNI_POLLING (1 << 5) -#define FAILED_LNI_DEBOUNCE (1 << 6) -#define FAILED_LNI_ESTBCOMM (1 << 7) -#define FAILED_LNI_OPTEQ (1 << 8) -#define FAILED_LNI_VERIFY_CAP1 (1 << 9) -#define FAILED_LNI_VERIFY_CAP2 (1 << 10) -#define FAILED_LNI_CONFIGLT (1 << 11) +#define SPICO_ROM_FAILED BIT(0) +#define UNKNOWN_FRAME BIT(1) +#define TARGET_BER_NOT_MET BIT(2) +#define FAILED_SERDES_INTERNAL_LOOPBACK BIT(3) +#define FAILED_SERDES_INIT BIT(4) +#define FAILED_LNI_POLLING BIT(5) +#define FAILED_LNI_DEBOUNCE BIT(6) +#define FAILED_LNI_ESTBCOMM BIT(7) +#define FAILED_LNI_OPTEQ BIT(8) +#define FAILED_LNI_VERIFY_CAP1 BIT(9) +#define FAILED_LNI_VERIFY_CAP2 BIT(10) +#define FAILED_LNI_CONFIGLT BIT(11) #define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \ | FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \ @@ -262,16 +262,16 @@ | FAILED_LNI_CONFIGLT) /* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */ -#define HOST_REQ_DONE (1 << 0) -#define BC_PWR_MGM_MSG (1 << 1) -#define BC_SMA_MSG (1 << 2) -#define BC_BCC_UNKOWN_MSG (1 << 3) -#define BC_IDLE_UNKNOWN_MSG (1 << 4) -#define EXT_DEVICE_CFG_REQ (1 << 5) -#define VERIFY_CAP_FRAME (1 << 6) -#define LINKUP_ACHIEVED (1 << 7) -#define LINK_GOING_DOWN (1 << 8) -#define LINK_WIDTH_DOWNGRADED (1 << 9) +#define HOST_REQ_DONE BIT(0) +#define BC_PWR_MGM_MSG BIT(1) +#define BC_SMA_MSG BIT(2) +#define BC_BCC_UNKNOWN_MSG BIT(3) +#define BC_IDLE_UNKNOWN_MSG BIT(4) +#define EXT_DEVICE_CFG_REQ BIT(5) +#define VERIFY_CAP_FRAME BIT(6) +#define LINKUP_ACHIEVED BIT(7) +#define LINK_GOING_DOWN BIT(8) +#define LINK_WIDTH_DOWNGRADED BIT(9) /* DC_DC8051_CFG_EXT_DEV_1.REQ_TYPE - 8051 host requests */ #define HREQ_LOAD_CONFIG 0x01 @@ -335,14 +335,14 @@ * the CSR fields hold multiples of this value. */ #define RCV_SHIFT 3 -#define RCV_INCREMENT (1 << RCV_SHIFT) +#define RCV_INCREMENT BIT(RCV_SHIFT) /* * Receive header queue entry increment - the CSR holds multiples of * this value. */ #define HDRQ_SIZE_SHIFT 5 -#define HDRQ_INCREMENT (1 << HDRQ_SIZE_SHIFT) +#define HDRQ_INCREMENT BIT(HDRQ_SIZE_SHIFT) /* * Freeze handling flags diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h index 5dd92720faae..e4b1dc6d0328 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/staging/rdma/hfi1/common.h @@ -349,10 +349,10 @@ struct hfi1_message_header { #define HFI1_QPN_MASK 0xFFFFFF #define HFI1_FECN_SHIFT 31 #define HFI1_FECN_MASK 1 -#define HFI1_FECN_SMASK (1 << HFI1_FECN_SHIFT) +#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT) #define HFI1_BECN_SHIFT 30 #define HFI1_BECN_MASK 1 -#define HFI1_BECN_SMASK (1 << HFI1_BECN_SHIFT) +#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) #define HFI1_MULTICAST_LID_BASE 0xC000 static inline __u64 rhf_to_cpu(const __le32 *rbuf) diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 52dcc87689f1..55202c79686d 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -424,17 +424,17 @@ struct hfi1_sge_state; #define __HLS_GOING_OFFLINE_BP 9 #define __HLS_LINK_COOLDOWN_BP 10 -#define HLS_UP_INIT (1 << __HLS_UP_INIT_BP) -#define HLS_UP_ARMED (1 << __HLS_UP_ARMED_BP) -#define HLS_UP_ACTIVE (1 << __HLS_UP_ACTIVE_BP) -#define HLS_DN_DOWNDEF (1 << __HLS_DN_DOWNDEF_BP) /* link down default */ -#define HLS_DN_POLL (1 << __HLS_DN_POLL_BP) -#define HLS_DN_DISABLE (1 << __HLS_DN_DISABLE_BP) -#define HLS_DN_OFFLINE (1 << __HLS_DN_OFFLINE_BP) -#define HLS_VERIFY_CAP (1 << __HLS_VERIFY_CAP_BP) -#define HLS_GOING_UP (1 << __HLS_GOING_UP_BP) -#define HLS_GOING_OFFLINE (1 << __HLS_GOING_OFFLINE_BP) -#define HLS_LINK_COOLDOWN (1 << __HLS_LINK_COOLDOWN_BP) +#define HLS_UP_INIT BIT(__HLS_UP_INIT_BP) +#define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP) +#define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP) +#define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */ +#define HLS_DN_POLL BIT(__HLS_DN_POLL_BP) +#define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP) +#define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP) +#define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP) +#define HLS_GOING_UP BIT(__HLS_GOING_UP_BP) +#define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP) +#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 00f52e815242..aa46923b4d54 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -260,7 +260,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) /* Validate and initialize Rcv Hdr Q variables */ if (rcvhdrcnt % HDRQ_INCREMENT) { dd_dev_err(dd, - "ctxt%u: header queue count %d must be divisible by %d\n", + "ctxt%u: header queue count %d must be divisible by %lu\n", rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); goto bail; } diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 4f5dbd14b5de..eeb868796750 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1782,7 +1782,7 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, u32 len = OPA_AM_CI_LEN(am) + 1; int ret; -#define __CI_PAGE_SIZE (1 << 7) /* 128 bytes */ +#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */ #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) @@ -3402,7 +3402,7 @@ struct opa_led_info { }; #define OPA_LED_SHIFT 31 -#define OPA_LED_MASK (1 << OPA_LED_SHIFT) +#define OPA_LED_MASK BIT(OPA_LED_SHIFT) static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 62a94c5d7dca..19b16a9a99ea 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -54,7 +54,7 @@ #include "verbs.h" #include "sdma.h" -#define QPN_MAX (1 << 24) +#define QPN_MAX BIT(24) #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) /* diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index d30c2a6baa0b..16aebdc7f679 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -59,11 +59,11 @@ * Below are masks for QSFP pins. Pins are the same for HFI0 and HFI1. * _N means asserted low */ -#define QSFP_HFI0_I2CCLK (1 << 0) -#define QSFP_HFI0_I2CDAT (1 << 1) -#define QSFP_HFI0_RESET_N (1 << 2) -#define QSFP_HFI0_INT_N (1 << 3) -#define QSFP_HFI0_MODPRST_N (1 << 4) +#define QSFP_HFI0_I2CCLK BIT(0) +#define QSFP_HFI0_I2CDAT BIT(1) +#define QSFP_HFI0_RESET_N BIT(2) +#define QSFP_HFI0_INT_N BIT(3) +#define QSFP_HFI0_MODPRST_N BIT(4) /* QSFP is paged at 256 bytes */ #define QSFP_PAGESIZE 256 diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 9a15f1f32b45..1d38be54e8fd 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -112,10 +112,10 @@ MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) /* sdma_sendctrl operations */ -#define SDMA_SENDCTRL_OP_ENABLE (1U << 0) -#define SDMA_SENDCTRL_OP_INTENABLE (1U << 1) -#define SDMA_SENDCTRL_OP_HALT (1U << 2) -#define SDMA_SENDCTRL_OP_CLEANUP (1U << 3) +#define SDMA_SENDCTRL_OP_ENABLE BIT(0) +#define SDMA_SENDCTRL_OP_INTENABLE BIT(1) +#define SDMA_SENDCTRL_OP_HALT BIT(2) +#define SDMA_SENDCTRL_OP_CLEANUP BIT(3) /* handle long defines */ #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ -- cgit v1.2.3-59-g8ed1b From 9eb0432baa150c31222be71f435091ea562f93cf Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Mon, 11 Jan 2016 18:30:53 -0500 Subject: staging/hfi1: Move s_sde to read mostly section of hfi1_qp This would reduce L2 cache misses on s_sde in the _hfi1_schedule_send function when invoked from post_send thereby improving performance of post_send. Reviewed-by: Mike Marciniszyn Signed-off-by: Harish Chegondi Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 286e468b0479..a163fc275440 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -441,6 +441,7 @@ struct hfi1_qp { struct hfi1_swqe *s_wq; /* send work queue */ struct hfi1_mmap_info *ip; struct ahg_ib_header *s_hdr; /* next packet header to send */ + struct sdma_engine *s_sde; /* current sde */ /* sc for UC/RC QPs - based on ah for UD */ u8 s_sc; unsigned long timeout_jiffies; /* computed from timeout */ @@ -506,7 +507,6 @@ struct hfi1_qp { struct hfi1_swqe *s_wqe; struct hfi1_sge_state s_sge; /* current send request data */ struct hfi1_mregion *s_rdma_mr; - struct sdma_engine *s_sde; /* current sde */ u32 s_cur_size; /* size of send packet in bytes */ u32 s_len; /* total length of s_sge */ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ -- cgit v1.2.3-59-g8ed1b From 5b55ea3b6eb8abe30aea4ae1604a6f067bd5e010 Mon Sep 17 00:00:00 2001 From: "Mark F. Brown" Date: Mon, 11 Jan 2016 18:30:54 -0500 Subject: staging/hfi1: change krcvqs mod param from byte to uint The krcvqs parameter is displayed incorrectly in sysfs. The workaround is to set the param type as uint. Reviewed-by: Mike Marciniszyn Reviewed-by: Mitko Haralanov Signed-off-by: Mark F. Brown Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 2 +- drivers/staging/rdma/hfi1/init.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 55202c79686d..b33bcca541bc 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1677,7 +1677,7 @@ extern unsigned int hfi1_cu; extern unsigned int user_credit_return_threshold; extern int num_user_contexts; extern unsigned n_krcvqs; -extern u8 krcvqs[]; +extern uint krcvqs[]; extern int krcvqsset; extern uint kdeth_qp; extern uint loopback; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index aa46923b4d54..48269a2ab756 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -87,9 +87,9 @@ module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); MODULE_PARM_DESC( num_user_contexts, "Set max number of user contexts to use"); -u8 krcvqs[RXE_NUM_DATA_VL]; +uint krcvqs[RXE_NUM_DATA_VL]; int krcvqsset; -module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO); +module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO); MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL"); /* computed based on above array */ -- cgit v1.2.3-59-g8ed1b From 0edf80eae01b7f211a1142856c6c8fc41ea3ce06 Mon Sep 17 00:00:00 2001 From: "jubin.john@intel.com" Date: Mon, 11 Jan 2016 18:30:55 -0500 Subject: staging/hfi1: Change default krcvqs Change the default number of krcvqs to number of numa nodes + 1 based on the performance data collected. Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index bbe5ad85cec0..503bfca584c0 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12445,7 +12445,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) */ num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1; else - num_kernel_contexts = num_online_nodes(); + num_kernel_contexts = num_online_nodes() + 1; num_kernel_contexts = max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts); /* -- cgit v1.2.3-59-g8ed1b From a699c6c27fbce4942bc902f42b69e03c5ce03fa9 Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Mon, 11 Jan 2016 18:30:56 -0500 Subject: staging/hfi1: add per SDMA engine stats to hfistats Added the following per sdma engine stats: - SendDmaDescFetchedCnt - software maintained count of SDMA interrupts (SDmaInt, SDmaIdleInt, SDmaProgressInt) - software maintained counts of SDMA error cases Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 110 ++++++++++++++++++++++++++++- drivers/staging/rdma/hfi1/chip.h | 5 ++ drivers/staging/rdma/hfi1/chip_registers.h | 1 + drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/sdma.c | 30 ++++---- drivers/staging/rdma/hfi1/sdma.h | 7 ++ 6 files changed, 139 insertions(+), 15 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 503bfca584c0..f4f720d604da 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1297,10 +1297,58 @@ static u64 dev_access_u32_csr(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; + u64 csr = entry->csr; - if (vl != CNTR_INVALID_VL) - return 0; - return read_write_csr(dd, entry->csr, mode, data); + if (entry->flags & CNTR_SDMA) { + if (vl == CNTR_INVALID_VL) + return 0; + csr += 0x100 * vl; + } else { + if (vl != CNTR_INVALID_VL) + return 0; + } + return read_write_csr(dd, csr, mode, data); +} + +static u64 access_sde_err_cnt(const struct cntr_entry *entry, + void *context, int idx, int mode, u64 data) +{ + struct hfi1_devdata *dd = (struct hfi1_devdata *)context; + + if (dd->per_sdma && idx < dd->num_sdma) + return dd->per_sdma[idx].err_cnt; + return 0; +} + +static u64 access_sde_int_cnt(const struct cntr_entry *entry, + void *context, int idx, int mode, u64 data) +{ + struct hfi1_devdata *dd = (struct hfi1_devdata *)context; + + if (dd->per_sdma && idx < dd->num_sdma) + return dd->per_sdma[idx].sdma_int_cnt; + return 0; +} + +static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, + void *context, int idx, int mode, u64 data) +{ + struct hfi1_devdata *dd = (struct hfi1_devdata *)context; + + if (dd->per_sdma && idx < dd->num_sdma) + return dd->per_sdma[idx].idle_int_cnt; + return 0; +} + +static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, + void *context, int idx, int mode, + u64 data) +{ + struct hfi1_devdata *dd = (struct hfi1_devdata *)context; + + if (dd->per_sdma && idx < dd->num_sdma) + return dd->per_sdma[idx].progress_int_cnt; + return 0; } static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, @@ -4070,6 +4118,22 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { access_sw_kmem_wait), [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, access_sw_send_schedule), +[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", + SEND_DMA_DESC_FETCHED_CNT, 0, + CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, + dev_access_u32_csr), +[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0, + CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, + access_sde_int_cnt), +[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0, + CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, + access_sde_err_cnt), +[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0, + CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, + access_sde_idle_int_cnt), +[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0, + CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, + access_sde_progress_int_cnt), /* MISC_ERR_STATUS */ [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0, CNTR_NORMAL, @@ -5707,6 +5771,7 @@ static void handle_sdma_eng_err(struct hfi1_devdata *dd, dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", sde->this_idx, source, (unsigned long long)status); #endif + sde->err_cnt++; sdma_engine_error(sde, status); /* @@ -11150,6 +11215,20 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep, dd->cntrs[entry->offset + j] = val; } + } else if (entry->flags & CNTR_SDMA) { + hfi1_cdbg(CNTR, + "\t Per SDMA Engine\n"); + for (j = 0; j < dd->chip_sdma_engines; + j++) { + val = + entry->rw_cntr(entry, dd, j, + CNTR_MODE_R, 0); + hfi1_cdbg(CNTR, + "\t\tRead 0x%llx for %d\n", + val, j); + dd->cntrs[entry->offset + j] = + val; + } } else { val = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, @@ -11553,6 +11632,21 @@ static int init_cntrs(struct hfi1_devdata *dd) dd->ndevcntrs++; index++; } + } else if (dev_cntrs[i].flags & CNTR_SDMA) { + hfi1_dbg_early( + "\tProcessing per SDE counters chip enginers %u\n", + dd->chip_sdma_engines); + dev_cntrs[i].offset = index; + for (j = 0; j < dd->chip_sdma_engines; j++) { + memset(name, '\0', C_MAX_NAME); + snprintf(name, C_MAX_NAME, "%s%d", + dev_cntrs[i].name, j); + sz += strlen(name); + sz++; + hfi1_dbg_early("\t\t%s\n", name); + dd->ndevcntrs++; + index++; + } } else { /* +1 for newline */ sz += strlen(dev_cntrs[i].name) + 1; @@ -11594,6 +11688,16 @@ static int init_cntrs(struct hfi1_devdata *dd) p += strlen(name); *p++ = '\n'; } + } else if (dev_cntrs[i].flags & CNTR_SDMA) { + for (j = 0; j < TXE_NUM_SDMA_ENGINES; + j++) { + memset(name, '\0', C_MAX_NAME); + snprintf(name, C_MAX_NAME, "%s%d", + dev_cntrs[i].name, j); + memcpy(p, name, strlen(name)); + p += strlen(name); + *p++ = '\n'; + } } else { memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 1368a4455b28..b46ef6675d45 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -787,6 +787,11 @@ enum { C_SW_PIO_WAIT, C_SW_KMEM_WAIT, C_SW_SEND_SCHED, + C_SDMA_DESC_FETCHED_CNT, + C_SDMA_INT_CNT, + C_SDMA_ERR_CNT, + C_SDMA_IDLE_INT_CNT, + C_SDMA_PROGRESS_INT_CNT, /* MISC_ERR_STATUS */ C_MISC_PLL_LOCK_FAIL_ERR, C_MISC_MBIST_FAIL_ERR, diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h index 014d7a609ea0..3cd3352af2ce 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/staging/rdma/hfi1/chip_registers.h @@ -1301,5 +1301,6 @@ #define CCE_INT_BLOCKED (CCE + 0x000000110C00) #define SEND_DMA_IDLE_CNT (TXE + 0x000000200040) #define SEND_DMA_DESC_FETCHED_CNT (TXE + 0x000000200058) +#define CCE_MSIX_PBA_OFFSET 0X0110000 #endif /* DEF_CHIP_REG */ diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index b33bcca541bc..6bfa5c8e7b84 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -490,6 +490,7 @@ struct hfi1_sge_state; #define CNTR_DISABLED 0x2 /* Disable this counter */ #define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */ #define CNTR_VL 0x8 /* Per VL counter */ +#define CNTR_SDMA 0x10 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */ #define CNTR_MODE_W 0x0 #define CNTR_MODE_R 0x1 diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 1d38be54e8fd..4eb55facfea2 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -1061,18 +1061,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) sde->desc_avail = sdma_descq_freecnt(sde); sde->sdma_shift = ilog2(descq_cnt); sde->sdma_mask = (1 << sde->sdma_shift) - 1; - sde->descq_full_count = 0; - - /* Create a mask for all 3 chip interrupt sources */ - sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx) - | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx) - | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx); - /* Create a mask specifically for sdma_idle */ - sde->idle_mask = - (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx); - /* Create a mask specifically for sdma_progress */ - sde->progress_mask = - (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx); + + /* Create a mask specifically for each interrupt source */ + sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + + this_idx); + sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + + this_idx); + sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + + this_idx); + /* Create a combined mask to cover all 3 interrupt sources */ + sde->imask = sde->int_mask | sde->progress_mask | + sde->idle_mask; + spin_lock_init(&sde->tail_lock); seqlock_init(&sde->head_lock); spin_lock_init(&sde->senddmactrl_lock); @@ -1552,6 +1552,12 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) trace_hfi1_sdma_engine_interrupt(sde, status); write_seqlock(&sde->head_lock); sdma_set_desc_cnt(sde, sdma_desct_intr); + if (status & sde->idle_mask) + sde->idle_int_cnt++; + else if (status & sde->progress_mask) + sde->progress_int_cnt++; + else if (status & sde->int_mask) + sde->sdma_int_cnt++; sdma_make_progress(sde, status); write_sequnlock(&sde->head_lock); } diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index da89e6458162..757017a04d95 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -409,6 +409,7 @@ struct sdma_engine { u64 imask; /* clear interrupt mask */ u64 idle_mask; u64 progress_mask; + u64 int_mask; /* private: */ volatile __le64 *head_dma; /* DMA'ed by chip */ /* private: */ @@ -465,6 +466,12 @@ struct sdma_engine { u16 tx_head; /* private: */ u64 last_status; + /* private */ + u64 err_cnt; + /* private */ + u64 sdma_int_cnt; + u64 idle_int_cnt; + u64 progress_int_cnt; /* private: */ struct list_head dmawait; -- cgit v1.2.3-59-g8ed1b From c024c554aeaf6197a1869fdc79c190139182203a Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Mon, 11 Jan 2016 18:30:57 -0500 Subject: staging/hfi1: Remove unneeded variable index The variable "index" increments the same as dd->ndevcntrs. Just use the later. Remove uneeded usage of "index" in the fill loop - it is not used there or later in the function. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index f4f720d604da..1109049b366d 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -11592,7 +11592,7 @@ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); #define C_MAX_NAME 13 /* 12 chars + one for /0 */ static int init_cntrs(struct hfi1_devdata *dd) { - int i, rcv_ctxts, index, j; + int i, rcv_ctxts, j; size_t sz; char *p; char name[C_MAX_NAME]; @@ -11609,7 +11609,6 @@ static int init_cntrs(struct hfi1_devdata *dd) /* size names and determine how many we have*/ dd->ndevcntrs = 0; sz = 0; - index = 0; for (i = 0; i < DEV_CNTR_LAST; i++) { hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name); @@ -11620,7 +11619,7 @@ static int init_cntrs(struct hfi1_devdata *dd) if (dev_cntrs[i].flags & CNTR_VL) { hfi1_dbg_early("\tProcessing VL cntr\n"); - dev_cntrs[i].offset = index; + dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < C_VL_COUNT; j++) { memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", @@ -11630,13 +11629,12 @@ static int init_cntrs(struct hfi1_devdata *dd) sz++; hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; - index++; } } else if (dev_cntrs[i].flags & CNTR_SDMA) { hfi1_dbg_early( "\tProcessing per SDE counters chip enginers %u\n", dd->chip_sdma_engines); - dev_cntrs[i].offset = index; + dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < dd->chip_sdma_engines; j++) { memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", @@ -11645,24 +11643,22 @@ static int init_cntrs(struct hfi1_devdata *dd) sz++; hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; - index++; } } else { /* +1 for newline */ sz += strlen(dev_cntrs[i].name) + 1; + dev_cntrs[i].offset = dd->ndevcntrs; dd->ndevcntrs++; - dev_cntrs[i].offset = index; - index++; hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name); } } /* allocate space for the counter values */ - dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL); + dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); if (!dd->cntrs) goto bail; - dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL); + dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); if (!dd->scntrs) goto bail; @@ -11674,7 +11670,7 @@ static int init_cntrs(struct hfi1_devdata *dd) goto bail; /* fill in the names */ - for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) { + for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { if (dev_cntrs[i].flags & CNTR_DISABLED) { /* Nothing */ } else { @@ -11704,7 +11700,6 @@ static int init_cntrs(struct hfi1_devdata *dd) p += strlen(dev_cntrs[i].name); *p++ = '\n'; } - index++; } } -- cgit v1.2.3-59-g8ed1b From 624be1dbdb7c69c0218e78a3afec98a09a08e747 Mon Sep 17 00:00:00 2001 From: Edward Mascarenhas Date: Mon, 11 Jan 2016 18:31:43 -0500 Subject: staging/hfi1: Clean up comments Clean up comments by deleting numbering and terms internal to Intel. The information on the actual bugs is not deleted. Reviewed-by: Mike Marciniszyn Signed-off-by: Edward Mascarenhas Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 1 - drivers/staging/rdma/hfi1/driver.c | 2 +- drivers/staging/rdma/hfi1/hfi.h | 4 ++-- drivers/staging/rdma/hfi1/pcie.c | 2 +- drivers/staging/rdma/hfi1/ud.c | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 1109049b366d..f7bf90202ad1 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13636,7 +13636,6 @@ int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey) write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg); /* * Enable send-side J_KEY integrity check, unless this is A0 h/w - * (due to A0 erratum). */ if (!is_ax(dd)) { reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE); diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 8485de1fce08..321852060213 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -368,7 +368,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if (opcode == IB_OPCODE_CNP) { /* * Only in pre-B0 h/w is the CNP_OPCODE handled - * via this code path (errata 291394). + * via this code path. */ struct hfi1_qp *qp = NULL; u32 lqpn, rqpn; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 6bfa5c8e7b84..5bc385a574c5 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1741,7 +1741,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; if (is_ax(dd)) - /* turn off send-side job key checks - A0 erratum */ + /* turn off send-side job key checks - A0 */ return base_sc_integrity & ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; return base_sc_integrity; @@ -1768,7 +1768,7 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; if (is_ax(dd)) - /* turn off send-side job key checks - A0 erratum */ + /* turn off send-side job key checks - A0 */ return base_sdma_integrity & ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; return base_sdma_integrity; diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 9917faff823c..b2f553d86042 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -1063,7 +1063,7 @@ retry: * PcieCfgRegPl100 - Gen3 Control * * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl - * turn on PcieCfgRegPl100.EqEieosCnt (erratum) + * turn on PcieCfgRegPl100.EqEieosCnt * Everything else zero. */ reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index bd1b402c1e14..25e6053c38db 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -671,7 +671,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) if (unlikely(bth1 & HFI1_BECN_SMASK)) { /* * In pre-B0 h/w the CNP_OPCODE is handled via an - * error path (errata 291394). + * error path. */ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; -- cgit v1.2.3-59-g8ed1b From f4ddedf4263bb94c81b2647ec5cf5ee79c6c20b0 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 11 Jan 2016 18:31:44 -0500 Subject: staging/hfi1: Fix Xmit Wait calculation Total XMIT wait needs to sum the xmit wait values of all the VLs not just those requested in the query. Also, make the algorithm used for both PortStatus and PortDataCounters the same. Reviewed-by: Arthur Kepner Reviewed-by: Breyer, Scott J Signed-off-by: Ira Weiny Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index eeb868796750..aa847818f4f7 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2279,17 +2279,23 @@ static void a0_portstatus(struct hfi1_pportdata *ppd, { if (!is_bx(ppd->dd)) { unsigned long vl; - u64 max_vl_xmit_wait = 0, tmp; + u64 sum_vl_xmit_wait = 0; u32 vl_all_mask = VL_MASK_ALL; for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), 8 * sizeof(vl_all_mask)) { - tmp = read_port_cntr(ppd, C_TX_WAIT_VL, - idx_from_vl(vl)); - if (tmp > max_vl_xmit_wait) - max_vl_xmit_wait = tmp; + u64 tmp = sum_vl_xmit_wait + + read_port_cntr(ppd, C_TX_WAIT_VL, + idx_from_vl(vl)); + if (tmp < sum_vl_xmit_wait) { + /* we wrapped */ + sum_vl_xmit_wait = (u64)~0; + break; + } + sum_vl_xmit_wait = tmp; } - rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait); + if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) + rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); } } @@ -2491,18 +2497,19 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp, +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, u32 vl_select_mask) { - if (!is_bx(dd)) { + if (!is_bx(ppd->dd)) { unsigned long vl; - int vfi = 0; u64 sum_vl_xmit_wait = 0; + u32 vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), + 8 * sizeof(vl_all_mask)) { u64 tmp = sum_vl_xmit_wait + - be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait); + read_port_cntr(ppd, C_TX_WAIT_VL, + idx_from_vl(vl)); if (tmp < sum_vl_xmit_wait) { /* we wrapped */ sum_vl_xmit_wait = (u64) ~0; @@ -2665,7 +2672,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(dd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp, vl_select_mask); if (resp_len) *resp_len += response_data_size; -- cgit v1.2.3-59-g8ed1b From fb9036dd8cd85533456aec43d7892b707561eba8 Mon Sep 17 00:00:00 2001 From: Jim Snow Date: Mon, 11 Jan 2016 18:32:21 -0500 Subject: staging/hfi1: check for ARMED->ACTIVE change in recv int The link state will transition from ARMED to ACTIVE when a non-SC15 packet arrives, but the driver might not notice the change. With this fix, if the slowpath receive interrupt handler sees a non-SC15 packet while in the ARMED state, we queue work to call linkstate_active_work from process context to promote it to ACTIVE. Reviewed-by: Dean Luick Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jim Snow Signed-off-by: Brendan Cunningham Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 5 +-- drivers/staging/rdma/hfi1/chip.h | 2 ++ drivers/staging/rdma/hfi1/driver.c | 72 ++++++++++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/hfi.h | 11 ++++++ drivers/staging/rdma/hfi1/init.c | 1 + 5 files changed, 89 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index f7bf90202ad1..63d5d71e8c92 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -7878,7 +7878,7 @@ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) } /* force the receive interrupt */ -static inline void force_recv_intr(struct hfi1_ctxtdata *rcd) +void force_recv_intr(struct hfi1_ctxtdata *rcd) { write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); } @@ -7977,7 +7977,7 @@ u32 read_physical_state(struct hfi1_devdata *dd) & DC_DC8051_STS_CUR_STATE_PORT_MASK; } -static u32 read_logical_state(struct hfi1_devdata *dd) +u32 read_logical_state(struct hfi1_devdata *dd) { u64 reg; @@ -9952,6 +9952,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ppd->link_enabled = 1; } + set_all_slowpath(ppd->dd); ret = set_local_link_attributes(ppd); if (ret) break; diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index b46ef6675d45..78ba42567f2b 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -690,6 +690,8 @@ u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl); u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data); u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl); u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data); +u32 read_logical_state(struct hfi1_devdata *dd); +void force_recv_intr(struct hfi1_ctxtdata *rcd); /* Per VL indexes */ enum { diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 321852060213..d096f11c0baa 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -862,6 +862,37 @@ static inline void set_all_dma_rtail(struct hfi1_devdata *dd) &handle_receive_interrupt_dma_rtail; } +void set_all_slowpath(struct hfi1_devdata *dd) +{ + int i; + + /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ + for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++) + dd->rcd[i]->do_interrupt = &handle_receive_interrupt; +} + +static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd, + struct hfi1_packet packet, + struct hfi1_devdata *dd) +{ + struct work_struct *lsaw = &rcd->ppd->linkstate_active_work; + struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd, + packet.rhf_addr); + + if (hdr2sc(hdr, packet.rhf) != 0xf) { + int hwstate = read_logical_state(dd); + + if (hwstate != LSTATE_ACTIVE) { + dd_dev_info(dd, "Unexpected link state %d\n", hwstate); + return 0; + } + + queue_work(rcd->ppd->hfi1_wq, lsaw); + return 1; + } + return 0; +} + /* * handle_receive_interrupt - receive a packet * @rcd: the context @@ -929,6 +960,11 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) last = skip_rcv_packet(&packet, thread); skip_pkt = 0; } else { + /* Auto activate link on non-SC15 packet receive */ + if (unlikely(rcd->ppd->host_link_state == + HLS_UP_ARMED) && + set_armed_to_active(rcd, packet, dd)) + goto bail; last = process_rcv_packet(&packet, thread); } @@ -983,6 +1019,42 @@ bail: return last; } +/* + * We may discover in the interrupt that the hardware link state has + * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), + * and we need to update the driver's notion of the link state. We cannot + * run set_link_state from interrupt context, so we queue this function on + * a workqueue. + * + * We delay the regular interrupt processing until after the state changes + * so that the link will be in the correct state by the time any application + * we wake up attempts to send a reply to any message it received. + * (Subsequent receive interrupts may possibly force the wakeup before we + * update the link state.) + * + * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes + * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, + * so we're safe from use-after-free of the rcd. + */ +void receive_interrupt_work(struct work_struct *work) +{ + struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, + linkstate_active_work); + struct hfi1_devdata *dd = ppd->dd; + int i; + + /* Received non-SC15 packet implies neighbor_normal */ + ppd->neighbor_normal = 1; + set_link_state(ppd, HLS_UP_ACTIVE); + + /* + * Interrupt all kernel contexts that could have had an + * interrupt during auto activation. + */ + for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++) + force_recv_intr(dd->rcd[i]); +} + /* * Convert a given MTU size to the on-wire MAD packet enumeration. * Return -1 if the size is invalid. diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 5bc385a574c5..23d7e0249f32 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -729,6 +729,7 @@ struct hfi1_pportdata { u8 remote_link_down_reason; /* Error events that will cause a port bounce. */ u32 port_error_action; + struct work_struct linkstate_active_work; }; typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); @@ -1177,6 +1178,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *); int handle_receive_interrupt(struct hfi1_ctxtdata *, int); int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int); int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int); +void set_all_slowpath(struct hfi1_devdata *dd); /* receive packet handler dispositions */ #define RCV_PKT_OK 0x0 /* keep going */ @@ -1197,6 +1199,15 @@ static inline u32 driver_lstate(struct hfi1_pportdata *ppd) return ppd->lstate; /* use the cached value */ } +void receive_interrupt_work(struct work_struct *work); + +/* extract service channel from header and rhf */ +static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf) +{ + return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) | + ((!!(rhf & RHF_DC_INFO_MASK)) << 4); +} + static inline u16 generate_jkey(kuid_t uid) { return from_kuid(current_user_ns(), uid) & 0xffff; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 48269a2ab756..27b31fc88592 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -498,6 +498,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); INIT_WORK(&ppd->sma_message_work, handle_sma_message); INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); + INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); mutex_init(&ppd->hls_lock); spin_lock_init(&ppd->sdma_alllock); spin_lock_init(&ppd->qsfp_info.qsfp_lock); -- cgit v1.2.3-59-g8ed1b From ec3f2c12a1e6bea48fd58f2dfa97d7373263b39a Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:41:33 -0800 Subject: staging/rdma/hfi1: Begin to use rdmavt for verbs This patch begins to make use of rdmavt by registering with it and providing access to the header files. This is just the beginning of rdmavt support in hfi1. Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Kconfig | 2 +- drivers/staging/rdma/hfi1/chip.c | 2 +- drivers/staging/rdma/hfi1/diag.c | 2 +- drivers/staging/rdma/hfi1/hfi.h | 3 +-- drivers/staging/rdma/hfi1/init.c | 5 +++-- drivers/staging/rdma/hfi1/intr.c | 2 +- drivers/staging/rdma/hfi1/mad.c | 5 +++-- drivers/staging/rdma/hfi1/qp.c | 4 ++-- drivers/staging/rdma/hfi1/sysfs.c | 18 +++++++++--------- drivers/staging/rdma/hfi1/verbs.c | 15 ++++++++++----- drivers/staging/rdma/hfi1/verbs.h | 8 ++++++-- 11 files changed, 38 insertions(+), 28 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig index bd0249bcf199..846c240c80aa 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/staging/rdma/hfi1/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_HFI1 tristate "Intel OPA Gen1 support" - depends on X86_64 + depends on X86_64 && INFINIBAND_RDMAVT select MMU_NOTIFIER default m ---help--- diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 63d5d71e8c92..da2718f05f21 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -9925,7 +9925,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) sdma_all_running(dd); /* Signal the IB layer that the port has went active */ - event.device = &dd->verbs_dev.ibdev; + event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = IB_EVENT_PORT_ACTIVE; } diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 0c8831705664..fbe9b15c9a65 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -860,7 +860,7 @@ static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data, vl = sc4; } else { sl = (byte_two >> 4) & 0xf; - ibp = to_iport(&dd->verbs_dev.ibdev, 1); + ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1); sc5 = ibp->sl_to_sc[sl]; vl = sc_to_vlt(dd, sc5); if (vl != sc4) { diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 23d7e0249f32..dbea286cde72 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -65,8 +65,7 @@ #include #include #include -#include -#include +#include #include "chip_registers.h" #include "common.h" diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 27b31fc88592..b4076b22af14 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -56,6 +56,7 @@ #include #include #include +#include #include "hfi.h" #include "device.h" @@ -983,7 +984,7 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); - ib_dealloc_device(&dd->verbs_dev.ibdev); + ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); } /* @@ -1079,7 +1080,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) bail: if (!list_empty(&dd->list)) list_del_init(&dd->list); - ib_dealloc_device(&dd->verbs_dev.ibdev); + ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); return ERR_PTR(ret); } diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 426582b9ab65..1283f2d9136c 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -98,7 +98,7 @@ static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev) */ if (!(dd->flags & HFI1_INITTED)) return; - event.device = &dd->verbs_dev.ibdev; + event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = ev; ib_dispatch_event(&event); diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index aa847818f4f7..ed88a5aab140 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1388,7 +1388,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); event.event = IB_EVENT_PKEY_CHANGE; - event.device = &dd->verbs_dev.ibdev; + event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = port; ib_dispatch_event(&event); } @@ -4171,7 +4171,8 @@ int hfi1_create_agents(struct hfi1_ibdev *dev) for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; - agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, + agent = ib_register_mad_agent(&dev->rdi.ibdev, p + 1, + IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL, 0); if (IS_ERR(agent)) { diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index ce036810d576..bb447b56dd6b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -1570,7 +1570,7 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) return NULL; iter->dev = dev; - iter->specials = dev->ibdev.phys_port_cnt * 2; + iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; if (qp_iter_next(iter)) { kfree(iter); return NULL; @@ -1610,7 +1610,7 @@ int qp_iter_next(struct qp_iter *iter) struct hfi1_ibport *ibp; int pidx; - pidx = n % dev->ibdev.phys_port_cnt; + pidx = n % dev->rdi.ibdev.phys_port_cnt; ppd = &dd_from_dev(dev)->pport[pidx]; ibp = &ppd->ibport_data; diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index 1dd6727dd5ef..d05b9f37da0a 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -446,7 +446,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev); } @@ -455,7 +455,7 @@ static ssize_t show_hfi(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); int ret; @@ -470,7 +470,7 @@ static ssize_t show_boardversion(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ @@ -482,7 +482,7 @@ static ssize_t show_nctxts(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); /* @@ -500,7 +500,7 @@ static ssize_t show_nfreectxts(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); /* Return the number of free user ports (contexts) available. */ @@ -511,7 +511,7 @@ static ssize_t show_serial(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); return scnprintf(buf, PAGE_SIZE, "%s", dd->serial); @@ -523,7 +523,7 @@ static ssize_t store_chip_reset(struct device *device, size_t count) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); int ret; @@ -552,7 +552,7 @@ static ssize_t show_tempsense(struct device *device, struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, ibdev.dev); + container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); struct hfi1_devdata *dd = dd_from_dev(dev); struct hfi1_temp temp; int ret; @@ -700,7 +700,7 @@ bail: */ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd) { - struct ib_device *dev = &dd->verbs_dev.ibdev; + struct ib_device *dev = &dd->verbs_dev.rdi.ibdev; int i, ret; for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) { diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 09b8d412ee90..0692ec48226f 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1903,7 +1903,7 @@ static void verbs_txreq_kmem_cache_ctor(void *obj) int hfi1_register_ib_device(struct hfi1_devdata *dd) { struct hfi1_ibdev *dev = &dd->verbs_dev; - struct ib_device *ibdev = &dev->ibdev; + struct ib_device *ibdev = &dev->rdi.ibdev; struct hfi1_pportdata *ppd = dd->pport; unsigned i, lk_tab_size; int ret; @@ -2069,7 +2069,13 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) strncpy(ibdev->node_desc, init_utsname()->nodename, sizeof(ibdev->node_desc)); - ret = ib_register_device(ibdev, hfi1_create_port_files); + /* + * Fill in rvt info object. + */ + dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files; + dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; + + ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) goto err_reg; @@ -2086,7 +2092,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) err_class: hfi1_free_agents(dev); err_agents: - ib_unregister_device(ibdev); + rvt_unregister_device(&dd->verbs_dev.rdi); err_reg: err_verbs_txreq: kmem_cache_destroy(dev->verbs_txreq_cache); @@ -2102,13 +2108,12 @@ bail: void hfi1_unregister_ib_device(struct hfi1_devdata *dd) { struct hfi1_ibdev *dev = &dd->verbs_dev; - struct ib_device *ibdev = &dev->ibdev; hfi1_verbs_unregister_sysfs(dd); hfi1_free_agents(dev); - ib_unregister_device(ibdev); + rvt_unregister_device(&dd->verbs_dev.rdi); if (!list_empty(&dev->txwait)) dd_dev_err(dd, "txwait list not empty!\n"); diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index a163fc275440..58fb122509b1 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -62,6 +62,7 @@ #include #include #include +#include struct hfi1_ctxtdata; struct hfi1_pportdata; @@ -749,7 +750,7 @@ struct hfi1_ibport { struct hfi1_qp_ibdev; struct hfi1_ibdev { - struct ib_device ibdev; + struct rvt_dev_info rdi; /* Must be first */ struct list_head pending_mmaps; spinlock_t mmap_offset_lock; /* protect mmap_offset */ u32 mmap_offset; @@ -843,7 +844,10 @@ static inline struct hfi1_qp *to_iqp(struct ib_qp *ibqp) static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) { - return container_of(ibdev, struct hfi1_ibdev, ibdev); + struct rvt_dev_info *rdi; + + rdi = container_of(ibdev, struct rvt_dev_info, ibdev); + return container_of(rdi, struct hfi1_ibdev, rdi); } /* -- cgit v1.2.3-59-g8ed1b From 583be13cde4f90aeac5d3c7ba555fb4909553c16 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:41:39 -0800 Subject: staging/rdma/hfi1: Add basic rdmavt capability flags for hfi1 Most functionality is still being done in the driver, set flags so that rdmavt will let hfi1 continue to handle mr, qp, and cq init. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 0692ec48226f..5e2113244e6d 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -2074,6 +2074,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) */ dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; + dd->verbs_dev.rdi.flags = (RVT_FLAG_MR_INIT_DRIVER | + RVT_FLAG_QP_INIT_DRIVER | + RVT_FLAG_CQ_INIT_DRIVER); ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) -- cgit v1.2.3-59-g8ed1b From f326674ae374e08b34d8b02b2357bad4ef07317c Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 19 Jan 2016 14:41:44 -0800 Subject: staging/rdma/hfi1: Consolidate dma ops for hfi1 Remove the dma.c file from hfi1 in favor of using that which is present in rdmavt. Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/verbs.c | 2 +- drivers/staging/rdma/hfi1/verbs.h | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index e63251b9c56b..69fb10f6a111 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -7,7 +7,7 @@ # obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o -hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o efivar.o eprom.o file_ops.o firmware.o \ +hfi1-y := chip.o cq.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 5e2113244e6d..347409ef3b1e 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -2063,7 +2063,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->detach_mcast = hfi1_multicast_detach; ibdev->process_mad = hfi1_process_mad; ibdev->mmap = hfi1_mmap; - ibdev->dma_ops = &hfi1_dma_mapping_ops; + ibdev->dma_ops = NULL; ibdev->get_port_immutable = port_immutable; strncpy(ibdev->node_desc, init_utsname()->nodename, diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 58fb122509b1..a505545ed703 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -1151,6 +1151,4 @@ extern unsigned int hfi1_max_srq_wrs; extern const u32 ib_hfi1_rnr_table[]; -extern struct ib_dma_mapping_ops hfi1_dma_mapping_ops; - #endif /* HFI1_VERBS_H */ -- cgit v1.2.3-59-g8ed1b From 4f87ccfca0c29bb0fb9d2e6037656e871714f9e7 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:41:50 -0800 Subject: staging/rdma/hfi1: Use rdmavt protection domain Remove protection domain from hfi1 and use rdmavt's version. Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/keys.c | 4 +-- drivers/staging/rdma/hfi1/mr.c | 2 +- drivers/staging/rdma/hfi1/ruc.c | 4 +-- drivers/staging/rdma/hfi1/verbs.c | 67 ++++----------------------------------- drivers/staging/rdma/hfi1/verbs.h | 15 +-------- 5 files changed, 12 insertions(+), 80 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c index cb4e6087dfdb..57a266fc27dd 100644 --- a/drivers/staging/rdma/hfi1/keys.c +++ b/drivers/staging/rdma/hfi1/keys.c @@ -176,7 +176,7 @@ out: * Check the IB SGE for validity and initialize our internal version * of it. */ -int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd, +int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd, struct hfi1_sge *isge, struct ib_sge *sge, int acc) { struct hfi1_mregion *mr; @@ -285,7 +285,7 @@ int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, */ rcu_read_lock(); if (rkey == 0) { - struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); + struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); struct hfi1_ibdev *dev = to_idev(pd->ibpd.device); if (pd->user) diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c index a3f8b884fdd6..3f1ef582b6db 100644 --- a/drivers/staging/rdma/hfi1/mr.c +++ b/drivers/staging/rdma/hfi1/mr.c @@ -116,7 +116,7 @@ struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc) struct ib_mr *ret; int rval; - if (to_ipd(pd)->user) { + if (ibpd_to_rvtpd(pd)->user) { ret = ERR_PTR(-EPERM); goto bail; } diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 4a91975b68d7..d255f31ba9fd 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -102,11 +102,11 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) int i, j, ret; struct ib_wc wc; struct hfi1_lkey_table *rkt; - struct hfi1_pd *pd; + struct rvt_pd *pd; struct hfi1_sge_state *ss; rkt = &to_idev(qp->ibqp.device)->lk_table; - pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); + pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 347409ef3b1e..ddfcfafb4002 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -368,7 +368,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) int j; int acc; struct hfi1_lkey_table *rkt; - struct hfi1_pd *pd; + struct rvt_pd *pd; struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; @@ -413,7 +413,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) return -ENOMEM; rkt = &to_idev(qp->ibqp.device)->lk_table; - pd = to_ipd(qp->ibqp.pd); + pd = ibpd_to_rvtpd(qp->ibqp.pd); wqe = get_swqe_ptr(qp, qp->s_head); @@ -1394,7 +1394,7 @@ static int query_device(struct ib_device *ibdev, props->max_mr = dev->lk_table.max; props->max_fmr = dev->lk_table.max; props->max_map_per_fmr = 32767; - props->max_pd = hfi1_max_pds; + props->max_pd = dev->rdi.dparms.props.max_pd; props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; props->max_qp_init_rd_atom = 255; /* props->max_res_rd_atom */ @@ -1592,61 +1592,6 @@ static int query_gid(struct ib_device *ibdev, u8 port, return ret; } -static struct ib_pd *alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) -{ - struct hfi1_ibdev *dev = to_idev(ibdev); - struct hfi1_pd *pd; - struct ib_pd *ret; - - /* - * This is actually totally arbitrary. Some correctness tests - * assume there's a maximum number of PDs that can be allocated. - * We don't actually have this limit, but we fail the test if - * we allow allocations of more than we report for this value. - */ - - pd = kmalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - spin_lock(&dev->n_pds_lock); - if (dev->n_pds_allocated == hfi1_max_pds) { - spin_unlock(&dev->n_pds_lock); - kfree(pd); - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - dev->n_pds_allocated++; - spin_unlock(&dev->n_pds_lock); - - /* ib_alloc_pd() will initialize pd->ibpd. */ - pd->user = udata != NULL; - - ret = &pd->ibpd; - -bail: - return ret; -} - -static int dealloc_pd(struct ib_pd *ibpd) -{ - struct hfi1_pd *pd = to_ipd(ibpd); - struct hfi1_ibdev *dev = to_idev(ibpd->device); - - spin_lock(&dev->n_pds_lock); - dev->n_pds_allocated--; - spin_unlock(&dev->n_pds_lock); - - kfree(pd); - - return 0; -} - /* * convert ah port,sl to sc */ @@ -1920,7 +1865,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) init_ibport(ppd + i); /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->n_pds_lock); + spin_lock_init(&dev->n_ahs_lock); spin_lock_init(&dev->n_cqs_lock); spin_lock_init(&dev->n_qps_lock); @@ -2029,8 +1974,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->query_gid = query_gid; ibdev->alloc_ucontext = alloc_ucontext; ibdev->dealloc_ucontext = dealloc_ucontext; - ibdev->alloc_pd = alloc_pd; - ibdev->dealloc_pd = dealloc_pd; + ibdev->alloc_pd = NULL; + ibdev->dealloc_pd = NULL; ibdev->create_ah = create_ah; ibdev->destroy_ah = destroy_ah; ibdev->modify_ah = modify_ah; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index a505545ed703..30791491d9cc 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -234,12 +234,6 @@ struct hfi1_mcast { int n_attached; }; -/* Protection domain */ -struct hfi1_pd { - struct ib_pd ibpd; - int user; /* non-zero if created from user space */ -}; - /* Address Handle */ struct hfi1_ah { struct ib_ah ibah; @@ -776,8 +770,6 @@ struct hfi1_ibdev { u64 n_kmem_wait; u64 n_send_schedule; - u32 n_pds_allocated; /* number of PDs allocated for device */ - spinlock_t n_pds_lock; u32 n_ahs_allocated; /* number of AHs allocated for device */ spinlock_t n_ahs_lock; u32 n_cqs_allocated; /* number of CQs allocated for device */ @@ -817,11 +809,6 @@ static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr) return container_of(ibmr, struct hfi1_mr, ibmr); } -static inline struct hfi1_pd *to_ipd(struct ib_pd *ibpd) -{ - return container_of(ibpd, struct hfi1_pd, ibpd); -} - static inline struct hfi1_ah *to_iah(struct ib_ah *ibah) { return container_of(ibah, struct hfi1_ah, ibah); @@ -983,7 +970,7 @@ int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region); void hfi1_free_lkey(struct hfi1_mregion *mr); -int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd, +int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd, struct hfi1_sge *isge, struct ib_sge *sge, int acc); int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, -- cgit v1.2.3-59-g8ed1b From cd4ceee341ca9d8b176762d3ad783e46538589a7 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:41:55 -0800 Subject: staging/rdma/hfi1: Remove MR data structures from hfi1 Remove MR data structures from hfi1 and use the version in rdmavt Reviewed-by: Dean Luick Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/keys.c | 30 +++++++++---------- drivers/staging/rdma/hfi1/mr.c | 20 ++++++------- drivers/staging/rdma/hfi1/ruc.c | 4 +-- drivers/staging/rdma/hfi1/sdma.h | 2 +- drivers/staging/rdma/hfi1/ud.c | 2 +- drivers/staging/rdma/hfi1/verbs.c | 16 +++++----- drivers/staging/rdma/hfi1/verbs.h | 63 +++++++-------------------------------- 7 files changed, 47 insertions(+), 90 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c index 57a266fc27dd..ffaaa6fd7a1f 100644 --- a/drivers/staging/rdma/hfi1/keys.c +++ b/drivers/staging/rdma/hfi1/keys.c @@ -63,21 +63,21 @@ * */ -int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region) +int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; int ret = 0; struct hfi1_ibdev *dev = to_idev(mr->pd->device); - struct hfi1_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; hfi1_get_mr(mr); spin_lock_irqsave(&rkt->lock, flags); /* special case for dma_mr lkey == 0 */ if (dma_region) { - struct hfi1_mregion *tmr; + struct rvt_mregion *tmr; tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { @@ -133,13 +133,13 @@ bail: * hfi1_free_lkey - free an lkey * @mr: mr to free from tables */ -void hfi1_free_lkey(struct hfi1_mregion *mr) +void hfi1_free_lkey(struct rvt_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; struct hfi1_ibdev *dev = to_idev(mr->pd->device); - struct hfi1_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; int freed = 0; spin_lock_irqsave(&rkt->lock, flags); @@ -176,10 +176,10 @@ out: * Check the IB SGE for validity and initialize our internal version * of it. */ -int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd, +int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, struct hfi1_sge *isge, struct ib_sge *sge, int acc) { - struct hfi1_mregion *mr; + struct rvt_mregion *mr; unsigned n, m; size_t off; @@ -231,15 +231,15 @@ int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd, entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off / HFI1_SEGSZ; - n = entries_spanned_by_off % HFI1_SEGSZ; + m = entries_spanned_by_off / RVT_SEGSZ; + n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; - if (n >= HFI1_SEGSZ) { + if (n >= RVT_SEGSZ) { m++; n = 0; } @@ -274,8 +274,8 @@ bail: int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) { - struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct hfi1_mregion *mr; + struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; + struct rvt_mregion *mr; unsigned n, m; size_t off; @@ -328,15 +328,15 @@ int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off / HFI1_SEGSZ; - n = entries_spanned_by_off % HFI1_SEGSZ; + m = entries_spanned_by_off / RVT_SEGSZ; + n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; - if (n >= HFI1_SEGSZ) { + if (n >= RVT_SEGSZ) { m++; n = 0; } diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c index 3f1ef582b6db..7e14965a02cd 100644 --- a/drivers/staging/rdma/hfi1/mr.c +++ b/drivers/staging/rdma/hfi1/mr.c @@ -56,7 +56,7 @@ /* Fast memory region */ struct hfi1_fmr { struct ib_fmr ibfmr; - struct hfi1_mregion mr; /* must be last */ + struct rvt_mregion mr; /* must be last */ }; static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr) @@ -64,13 +64,13 @@ static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr) return container_of(ibfmr, struct hfi1_fmr, ibfmr); } -static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd, +static int init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, int count) { int m, i = 0; int rval = 0; - m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; for (; i < m; i++) { mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); if (!mr->map[i]) @@ -91,7 +91,7 @@ bail: goto out; } -static void deinit_mregion(struct hfi1_mregion *mr) +static void deinit_mregion(struct rvt_mregion *mr) { int i = mr->mapsz; @@ -159,7 +159,7 @@ static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd) int m; /* Allocate struct plus pointers to first level page tables. */ - m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); if (!mr) goto bail; @@ -245,7 +245,7 @@ struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->mr.map[m]->segs[n].vaddr = vaddr; mr->mr.map[m]->segs[n].length = umem->page_size; n++; - if (n == HFI1_SEGSZ) { + if (n == RVT_SEGSZ) { m++; n = 0; } @@ -333,7 +333,7 @@ struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, int rval = -ENOMEM; /* Allocate struct plus pointers to first level page tables. */ - m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); if (!fmr) goto bail; @@ -385,7 +385,7 @@ int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int list_len, u64 iova) { struct hfi1_fmr *fmr = to_ifmr(ibfmr); - struct hfi1_lkey_table *rkt; + struct rvt_lkey_table *rkt; unsigned long flags; int m, n, i; u32 ps; @@ -410,7 +410,7 @@ int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, for (i = 0; i < list_len; i++) { fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; fmr->mr.map[m]->segs[n].length = ps; - if (++n == HFI1_SEGSZ) { + if (++n == RVT_SEGSZ) { m++; n = 0; } @@ -431,7 +431,7 @@ bail: int hfi1_unmap_fmr(struct list_head *fmr_list) { struct hfi1_fmr *fmr; - struct hfi1_lkey_table *rkt; + struct rvt_lkey_table *rkt; unsigned long flags; list_for_each_entry(fmr, fmr_list, ibfmr.list) { diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index d255f31ba9fd..ea5efa4da69e 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -101,7 +101,7 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) { int i, j, ret; struct ib_wc wc; - struct hfi1_lkey_table *rkt; + struct rvt_lkey_table *rkt; struct rvt_pd *pd; struct hfi1_sge_state *ss; @@ -534,7 +534,7 @@ again: if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= HFI1_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 757017a04d95..fbd0e41be135 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -381,7 +381,7 @@ struct verbs_txreq { struct sdma_txreq txreq; struct hfi1_qp *qp; struct hfi1_swqe *wqe; - struct hfi1_mregion *mr; + struct rvt_mregion *mr; struct hfi1_sge_state *ss; struct sdma_engine *sde; u16 hdr_dwords; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 25e6053c38db..970d42ff32bb 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -210,7 +210,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) if (--ssge.num_sge) *sge = *ssge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= HFI1_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index ddfcfafb4002..dc846d55f4d7 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -300,7 +300,7 @@ void hfi1_copy_sge( if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= HFI1_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -341,7 +341,7 @@ void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release) if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= HFI1_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -367,7 +367,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) int i; int j; int acc; - struct hfi1_lkey_table *rkt; + struct rvt_lkey_table *rkt; struct rvt_pd *pd; struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_pportdata *ppd; @@ -725,7 +725,7 @@ void update_sge(struct hfi1_sge_state *ss, u32 length) if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= HFI1_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; @@ -1883,13 +1883,13 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) spin_lock_init(&dev->lk_table.lock); dev->lk_table.max = 1 << hfi1_lkey_table_size; /* ensure generation is at least 4 bits (keys.c) */ - if (hfi1_lkey_table_size > MAX_LKEY_TABLE_BITS) { + if (hfi1_lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) { dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", - hfi1_lkey_table_size, MAX_LKEY_TABLE_BITS); - hfi1_lkey_table_size = MAX_LKEY_TABLE_BITS; + hfi1_lkey_table_size, RVT_MAX_LKEY_TABLE_BITS); + hfi1_lkey_table_size = RVT_MAX_LKEY_TABLE_BITS; } lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - dev->lk_table.table = (struct hfi1_mregion __rcu **) + dev->lk_table.table = (struct rvt_mregion __rcu **) vmalloc(lk_tab_size); if (dev->lk_table.table == NULL) { ret = -ENOMEM; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 30791491d9cc..14aa81c1b11c 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -284,45 +284,12 @@ struct hfi1_cq { struct hfi1_mmap_info *ip; }; -/* - * A segment is a linear region of low physical memory. - * Used by the verbs layer. - */ -struct hfi1_seg { - void *vaddr; - size_t length; -}; - -/* The number of hfi1_segs that fit in a page. */ -#define HFI1_SEGSZ (PAGE_SIZE / sizeof(struct hfi1_seg)) - -struct hfi1_segarray { - struct hfi1_seg segs[HFI1_SEGSZ]; -}; - -struct hfi1_mregion { - struct ib_pd *pd; /* shares refcnt of ibmr.pd */ - u64 user_base; /* User's address for this region */ - u64 iova; /* IB start address of this region */ - size_t length; - u32 lkey; - u32 offset; /* offset (bytes) to start of region */ - int access_flags; - u32 max_segs; /* number of hfi1_segs in all the arrays */ - u32 mapsz; /* size of the map array */ - u8 page_shift; /* 0 - non unform/non powerof2 sizes */ - u8 lkey_published; /* in global table */ - struct completion comp; /* complete when refcount goes to zero */ - atomic_t refcount; - struct hfi1_segarray *map[0]; /* the segments */ -}; - /* * These keep track of the copy progress within a memory region. * Used by the verbs layer. */ struct hfi1_sge { - struct hfi1_mregion *mr; + struct rvt_mregion *mr; void *vaddr; /* kernel virtual address of segment */ u32 sge_length; /* length of the SGE */ u32 length; /* remaining length of the segment */ @@ -334,7 +301,7 @@ struct hfi1_sge { struct hfi1_mr { struct ib_mr ibmr; struct ib_umem *umem; - struct hfi1_mregion mr; /* must be last */ + struct rvt_mregion mr; /* must be last */ }; /* @@ -501,7 +468,7 @@ struct hfi1_qp { u32 s_flags; struct hfi1_swqe *s_wqe; struct hfi1_sge_state s_sge; /* current send request data */ - struct hfi1_mregion *s_rdma_mr; + struct rvt_mregion *s_rdma_mr; u32 s_cur_size; /* size of send packet in bytes */ u32 s_len; /* total length of s_sge */ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ @@ -655,16 +622,6 @@ static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq, unsigned n) rq->max_sge * sizeof(struct ib_sge)) * n); } -#define MAX_LKEY_TABLE_BITS 23 - -struct hfi1_lkey_table { - spinlock_t lock; /* protect changes in this struct */ - u32 next; /* next unused index (speeds search) */ - u32 gen; /* generation count */ - u32 max; /* size of the table */ - struct hfi1_mregion __rcu **table; -}; - struct hfi1_opcode_stats { u64 n_packets; /* number of packets */ u64 n_bytes; /* total number of bytes */ @@ -748,12 +705,12 @@ struct hfi1_ibdev { struct list_head pending_mmaps; spinlock_t mmap_offset_lock; /* protect mmap_offset */ u32 mmap_offset; - struct hfi1_mregion __rcu *dma_mr; + struct rvt_mregion __rcu *dma_mr; struct hfi1_qp_ibdev *qp_dev; /* QP numbers are shared by all IB ports */ - struct hfi1_lkey_table lk_table; + struct rvt_lkey_table lk_table; /* protect wait lists */ seqlock_t iowait_lock; struct list_head txwait; /* list for wait verbs_txreq */ @@ -966,11 +923,11 @@ void hfi1_ud_rcv(struct hfi1_packet *packet); int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey); -int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region); +int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region); -void hfi1_free_lkey(struct hfi1_mregion *mr); +void hfi1_free_lkey(struct rvt_mregion *mr); -int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd, +int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, struct hfi1_sge *isge, struct ib_sge *sge, int acc); int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, @@ -1035,12 +992,12 @@ int hfi1_unmap_fmr(struct list_head *fmr_list); int hfi1_dealloc_fmr(struct ib_fmr *ibfmr); -static inline void hfi1_get_mr(struct hfi1_mregion *mr) +static inline void hfi1_get_mr(struct rvt_mregion *mr) { atomic_inc(&mr->refcount); } -static inline void hfi1_put_mr(struct hfi1_mregion *mr) +static inline void hfi1_put_mr(struct rvt_mregion *mr) { if (unlikely(atomic_dec_and_test(&mr->refcount))) complete(&mr->comp); -- cgit v1.2.3-59-g8ed1b From 4c6829c5c7d6186b76cf0817f9aa8e63831a6a27 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:00 -0800 Subject: staging/rdma/hfi1: Remove driver specific members from hfi1 qp type In preparation for moving the queue pair data structure to rdmavt the members of the driver specific queue pairs which are not common need to be pushed off to a private driver structure. This structure will be available in the queue pair once moved to rdmavt as a void pointer. This patch while not adding a lot of value in and of itself is a prerequisite to move the queue pair out of the drivers and into rdmavt. The driver specific, private queue pair data structure should condense as more of the send side code moves to rdmavt. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/diag.c | 3 +- drivers/staging/rdma/hfi1/pio.c | 6 ++- drivers/staging/rdma/hfi1/qp.c | 78 +++++++++++++++++++++++++-------------- drivers/staging/rdma/hfi1/qp.h | 15 +++++--- drivers/staging/rdma/hfi1/rc.c | 17 ++++++--- drivers/staging/rdma/hfi1/ruc.c | 48 ++++++++++++------------ drivers/staging/rdma/hfi1/uc.c | 7 ++-- drivers/staging/rdma/hfi1/ud.c | 37 ++++++++++--------- drivers/staging/rdma/hfi1/verbs.c | 57 +++++++++++++++++----------- drivers/staging/rdma/hfi1/verbs.h | 30 +++++++++++---- 10 files changed, 182 insertions(+), 116 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index fbe9b15c9a65..15c616ada4bf 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -1619,7 +1619,8 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { - struct ahg_ib_header *ahdr = qp->s_hdr; + struct hfi1_qp_priv *priv = qp->priv; + struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; struct hfi1_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index b51a4416312b..25d65f9a0b94 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1528,6 +1528,7 @@ static void sc_piobufavail(struct send_context *sc) struct list_head *list; struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE]; struct hfi1_qp *qp; + struct hfi1_qp_priv *priv; unsigned long flags; unsigned i, n = 0; @@ -1547,8 +1548,9 @@ static void sc_piobufavail(struct send_context *sc) if (n == ARRAY_SIZE(qps)) goto full; wait = list_first_entry(list, struct iowait, list); - qp = container_of(wait, struct hfi1_qp, s_iowait); - list_del_init(&qp->s_iowait.list); + qp = iowait_to_qp(wait); + priv = qp->priv; + list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ qps[n++] = qp; } diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index bb447b56dd6b..cacef55dfb74 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -349,11 +349,12 @@ bail: */ static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type) { + struct hfi1_qp_priv *priv = qp->priv; qp->remote_qpn = 0; qp->qkey = 0; qp->qp_access_flags = 0; iowait_init( - &qp->s_iowait, + &priv->s_iowait, 1, hfi1_do_send, iowait_sleep, @@ -378,7 +379,7 @@ static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type) } qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qp->r_nak_state = 0; - qp->r_adefered = 0; + priv->r_adefered = 0; qp->r_aflags = 0; qp->r_flags = 0; qp->s_head = 0; @@ -460,6 +461,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) { struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); + struct hfi1_qp_priv *priv = qp->priv; struct ib_wc wc; int ret = 0; @@ -477,9 +479,9 @@ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND; write_seqlock(&dev->iowait_lock); - if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { + if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; - list_del_init(&qp->s_iowait.list); + list_del_init(&priv->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } @@ -544,11 +546,13 @@ bail: static void flush_tx_list(struct hfi1_qp *qp) { - while (!list_empty(&qp->s_iowait.tx_head)) { + struct hfi1_qp_priv *priv = qp->priv; + + while (!list_empty(&priv->s_iowait.tx_head)) { struct sdma_txreq *tx; tx = list_first_entry( - &qp->s_iowait.tx_head, + &priv->s_iowait.tx_head, struct sdma_txreq, list); list_del_init(&tx->list); @@ -559,12 +563,13 @@ static void flush_tx_list(struct hfi1_qp *qp) static void flush_iowait(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; write_seqlock_irqsave(&dev->iowait_lock, flags); - if (!list_empty(&qp->s_iowait.list)) { - list_del_init(&qp->s_iowait.list); + if (!list_empty(&priv->s_iowait.list)) { + list_del_init(&priv->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } @@ -612,6 +617,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, { struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_qp *qp = to_iqp(ibqp); + struct hfi1_qp_priv *priv = qp->priv; enum ib_qp_state cur_state, new_state; struct ib_event ev; int lastwqe = 0; @@ -738,9 +744,9 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); /* Stop the sending work queue and retry timer */ - cancel_work_sync(&qp->s_iowait.iowork); + cancel_work_sync(&priv->s_iowait.iowork); del_timer_sync(&qp->s_timer); - iowait_sdma_drain(&qp->s_iowait); + iowait_sdma_drain(&priv->s_iowait); flush_tx_list(qp); remove_qp(dev, qp); wait_event(qp->wait, !atomic_read(&qp->refcount)); @@ -805,8 +811,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->remote_ah_attr = attr->ah_attr; qp->s_srate = attr->ah_attr.static_rate; qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); - qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); - qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc); + priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); + priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); } if (attr_mask & IB_QP_ALT_PATH) { @@ -821,8 +827,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_flags |= HFI1_S_AHG_CLEAR; - qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); - qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc); + priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); + priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); } } @@ -1031,6 +1037,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, struct ib_udata *udata) { struct hfi1_qp *qp; + struct hfi1_qp_priv *priv; int err; struct hfi1_swqe *swq = NULL; struct hfi1_ibdev *dev; @@ -1098,11 +1105,18 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, goto bail_swq; } RCU_INIT_POINTER(qp->next, NULL); - qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL); - if (!qp->s_hdr) { + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = ERR_PTR(-ENOMEM); + goto bail_qp_priv; + } + priv->owner = qp; + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); + if (!priv->s_hdr) { ret = ERR_PTR(-ENOMEM); goto bail_qp; } + qp->priv = priv; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); @@ -1245,7 +1259,9 @@ bail_ip: vfree(qp->r_rq.wq); free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); bail_qp: - kfree(qp->s_hdr); + kfree(priv->s_hdr); + kfree(priv); +bail_qp_priv: kfree(qp); bail_swq: vfree(swq); @@ -1266,6 +1282,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) { struct hfi1_qp *qp = to_iqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); + struct hfi1_qp_priv *priv = qp->priv; /* Make sure HW and driver activity is stopped. */ spin_lock_irq(&qp->r_lock); @@ -1276,9 +1293,9 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); - cancel_work_sync(&qp->s_iowait.iowork); + cancel_work_sync(&priv->s_iowait.iowork); del_timer_sync(&qp->s_timer); - iowait_sdma_drain(&qp->s_iowait); + iowait_sdma_drain(&priv->s_iowait); flush_tx_list(qp); remove_qp(dev, qp); wait_event(qp->wait, !atomic_read(&qp->refcount)); @@ -1301,7 +1318,8 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) else vfree(qp->r_rq.wq); vfree(qp->s_wq); - kfree(qp->s_hdr); + kfree(priv->s_hdr); + kfree(priv); kfree(qp); return 0; } @@ -1422,11 +1440,13 @@ static int iowait_sleep( { struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); struct hfi1_qp *qp; + struct hfi1_qp_priv *priv; unsigned long flags; int ret = 0; struct hfi1_ibdev *dev; qp = tx->qp; + priv = qp->priv; spin_lock_irqsave(&qp->s_lock, flags); if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { @@ -1442,13 +1462,13 @@ static int iowait_sleep( write_seqlock(&dev->iowait_lock); if (sdma_progress(sde, seq, stx)) goto eagain; - if (list_empty(&qp->s_iowait.list)) { + if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->n_dmawait++; qp->s_flags |= HFI1_S_WAIT_DMA_DESC; - list_add_tail(&qp->s_iowait.list, &sde->dmawait); + list_add_tail(&priv->s_iowait.list, &sde->dmawait); trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC); atomic_inc(&qp->refcount); } @@ -1470,7 +1490,7 @@ eagain: static void iowait_wakeup(struct iowait *wait, int reason) { - struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait); + struct hfi1_qp *qp = iowait_to_qp(wait); WARN_ON(reason != SDMA_AVAIL_REASON); hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC); @@ -1651,9 +1671,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) { struct hfi1_swqe *wqe; struct hfi1_qp *qp = iter->qp; + struct hfi1_qp_priv *priv = qp->priv; struct sdma_engine *sde; - sde = qp_to_sdma_engine(qp, qp->s_sc); + sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = get_swqe_ptr(qp, qp->s_last); seq_printf(s, "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n", @@ -1666,8 +1687,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe ? wqe->wr.opcode : 0, qp->s_hdrwords, qp->s_flags, - atomic_read(&qp->s_iowait.sdma_busy), - !list_empty(&qp->s_iowait.list), + atomic_read(&priv->s_iowait.sdma_busy), + !list_empty(&priv->s_iowait.list), qp->timeout, wqe ? wqe->ssn : 0, qp->s_lsn, @@ -1706,6 +1727,7 @@ void qp_comm_est(struct hfi1_qp *qp) */ void hfi1_migrate_qp(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct ib_event ev; qp->s_mig_state = IB_MIG_MIGRATED; @@ -1713,8 +1735,8 @@ void hfi1_migrate_qp(struct hfi1_qp *qp) qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_flags |= HFI1_S_AHG_CLEAR; - qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); - qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc); + priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); + priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 19b16a9a99ea..474c838e3b50 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -123,10 +123,12 @@ static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, */ static inline void clear_ahg(struct hfi1_qp *qp) { - qp->s_hdr->ahgcount = 0; + struct hfi1_qp_priv *priv = qp->priv; + + priv->s_hdr->ahgcount = 0; qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); - if (qp->s_sde && qp->s_ahgidx >= 0) - sdma_ahg_free(qp->s_sde, qp->s_ahgidx); + if (priv->s_sde && qp->s_ahgidx >= 0) + sdma_ahg_free(priv->s_sde, qp->s_ahgidx); qp->s_ahgidx = -1; } @@ -257,14 +259,15 @@ void qp_comm_est(struct hfi1_qp *qp); */ static inline void _hfi1_schedule_send(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - iowait_schedule(&qp->s_iowait, ppd->hfi1_wq, - qp->s_sde ? - qp->s_sde->cpu : + iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, + priv->s_sde ? + priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->assigned_node_id))); } diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 6f4a155f7931..70d5bd1ec1d2 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -259,6 +259,7 @@ bail: */ int hfi1_make_rc_req(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_other_headers *ohdr; struct hfi1_sge_state *ss; @@ -275,9 +276,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) int middle = 0; int delta; - ohdr = &qp->s_hdr->ibh.u.oth; + ohdr = &priv->s_hdr->ibh.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &qp->s_hdr->ibh.u.l.oth; + ohdr = &priv->s_hdr->ibh.u.l.oth; /* * The lock is needed to synchronize between the sending tasklet, @@ -297,7 +298,7 @@ int hfi1_make_rc_req(struct hfi1_qp *qp) if (qp->s_last == qp->s_head) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_iowait.sdma_busy)) { + if (atomic_read(&priv->s_iowait.sdma_busy)) { qp->s_flags |= HFI1_S_WAIT_DMA; goto bail; } @@ -1620,7 +1621,9 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, static inline void rc_cancel_ack(struct hfi1_qp *qp) { - qp->r_adefered = 0; + struct hfi1_qp_priv *priv = qp->priv; + + priv->r_adefered = 0; if (list_empty(&qp->rspwait)) return; list_del_init(&qp->rspwait); @@ -2347,11 +2350,13 @@ send_last: qp->r_nak_state = 0; /* Send an ACK if requested or required. */ if (psn & IB_BTH_REQ_ACK) { + struct hfi1_qp_priv *priv = qp->priv; + if (packet->numpkt == 0) { rc_cancel_ack(qp); goto send_ack; } - if (qp->r_adefered >= HFI1_PSN_CREDIT) { + if (priv->r_adefered >= HFI1_PSN_CREDIT) { rc_cancel_ack(qp); goto send_ack; } @@ -2359,7 +2364,7 @@ send_last: rc_cancel_ack(qp); goto send_ack; } - qp->r_adefered++; + priv->r_adefered++; rc_defered_ack(rcd, qp); } return; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index ea5efa4da69e..55ed00dd0218 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -692,27 +692,28 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, */ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn) { + struct hfi1_qp_priv *priv = qp->priv; if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) clear_ahg(qp); if (!(qp->s_flags & HFI1_S_AHG_VALID)) { /* first middle that needs copy */ if (qp->s_ahgidx < 0) - qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde); + qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); if (qp->s_ahgidx >= 0) { qp->s_ahgpsn = npsn; - qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY; + priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY; /* save to protect a change in another thread */ - qp->s_hdr->sde = qp->s_sde; - qp->s_hdr->ahgidx = qp->s_ahgidx; + priv->s_hdr->sde = priv->s_sde; + priv->s_hdr->ahgidx = qp->s_ahgidx; qp->s_flags |= HFI1_S_AHG_VALID; } } else { /* subsequent middle after valid */ if (qp->s_ahgidx >= 0) { - qp->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG; - qp->s_hdr->ahgidx = qp->s_ahgidx; - qp->s_hdr->ahgcount++; - qp->s_hdr->ahgdesc[0] = + priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG; + priv->s_hdr->ahgidx = qp->s_ahgidx; + priv->s_hdr->ahgcount++; + priv->s_hdr->ahgdesc[0] = sdma_build_ahg_descriptor( (__force u16)cpu_to_be16((u16)npsn), BTH2_OFFSET, @@ -720,8 +721,8 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn) 16); if ((npsn & 0xffff0000) != (qp->s_ahgpsn & 0xffff0000)) { - qp->s_hdr->ahgcount++; - qp->s_hdr->ahgdesc[1] = + priv->s_hdr->ahgcount++; + priv->s_hdr->ahgdesc[1] = sdma_build_ahg_descriptor( (__force u16)cpu_to_be16( (u16)(npsn >> 16)), @@ -737,6 +738,7 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, u32 bth0, u32 bth2, int middle) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_qp_priv *priv = qp->priv; u16 lrh0; u32 nwords; u32 extra_bytes; @@ -747,13 +749,13 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = HFI1_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { - qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh, - &qp->remote_ah_attr.grh, - qp->s_hdrwords, nwords); + qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh, + &qp->remote_ah_attr.grh, + qp->s_hdrwords, nwords); lrh0 = HFI1_LRH_GRH; middle = 0; } - lrh0 |= (qp->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; + lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4; /* * reset s_hdr/AHG fields * @@ -765,10 +767,10 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, * build_ahg() will modify as appropriate * to use the AHG feature. */ - qp->s_hdr->tx_flags = 0; - qp->s_hdr->ahgcount = 0; - qp->s_hdr->ahgidx = 0; - qp->s_hdr->sde = NULL; + priv->s_hdr->tx_flags = 0; + priv->s_hdr->ahgcount = 0; + priv->s_hdr->ahgidx = 0; + priv->s_hdr->sde = NULL; if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; else @@ -777,11 +779,11 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, build_ahg(qp, bth2); else qp->s_flags &= ~HFI1_S_AHG_VALID; - qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); - qp->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); - qp->s_hdr->ibh.lrh[2] = + priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); + priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); + priv->s_hdr->ibh.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); - qp->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | + priv->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qp->remote_ah_attr.src_path_bits); bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); bth0 |= extra_bytes << 20; @@ -810,7 +812,7 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, void hfi1_do_send(struct work_struct *work) { struct iowait *wait = container_of(work, struct iowait, iowork); - struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait); + struct hfi1_qp *qp = iowait_to_qp(wait); struct hfi1_pkt_state ps; int (*make_req)(struct hfi1_qp *qp); unsigned long flags; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 4f2a7889a852..1908a288cfb7 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -63,6 +63,7 @@ */ int hfi1_make_uc_req(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; struct hfi1_swqe *wqe; unsigned long flags; @@ -82,7 +83,7 @@ int hfi1_make_uc_req(struct hfi1_qp *qp) if (qp->s_last == qp->s_head) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_iowait.sdma_busy)) { + if (atomic_read(&priv->s_iowait.sdma_busy)) { qp->s_flags |= HFI1_S_WAIT_DMA; goto bail; } @@ -92,9 +93,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp) goto done; } - ohdr = &qp->s_hdr->ibh.u.oth; + ohdr = &priv->s_hdr->ibh.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &qp->s_hdr->ibh.u.l.oth; + ohdr = &priv->s_hdr->ibh.u.l.oth; /* Get the next send request. */ wqe = get_swqe_ptr(qp, qp->s_cur); diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 970d42ff32bb..00d1ae757529 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -264,6 +264,7 @@ drop: */ int hfi1_make_ud_req(struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; struct ib_ah_attr *ah_attr; struct hfi1_pportdata *ppd; @@ -288,7 +289,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) if (qp->s_last == qp->s_head) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_iowait.sdma_busy)) { + if (atomic_read(&priv->s_iowait.sdma_busy)) { qp->s_flags |= HFI1_S_WAIT_DMA; goto bail; } @@ -322,7 +323,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) * Instead of waiting, we could queue a * zero length descriptor so we get a callback. */ - if (atomic_read(&qp->s_iowait.sdma_busy)) { + if (atomic_read(&priv->s_iowait.sdma_busy)) { qp->s_flags |= HFI1_S_WAIT_DMA; goto bail; } @@ -353,11 +354,11 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) if (ah_attr->ah_flags & IB_AH_GRH) { /* Header size in 32-bit words. */ - qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh, + qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh, &ah_attr->grh, qp->s_hdrwords, nwords); lrh0 = HFI1_LRH_GRH; - ohdr = &qp->s_hdr->ibh.u.l.oth; + ohdr = &priv->s_hdr->ibh.u.l.oth; /* * Don't worry about sending to locally attached multicast * QPs. It is unspecified by the spec. what happens. @@ -365,7 +366,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) } else { /* Header size in 32-bit words. */ lrh0 = HFI1_LRH_BTH; - ohdr = &qp->s_hdr->ibh.u.oth; + ohdr = &priv->s_hdr->ibh.u.oth; } if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qp->s_hdrwords++; @@ -377,25 +378,25 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) lrh0 |= (ah_attr->sl & 0xf) << 4; if (qp->ibqp.qp_type == IB_QPT_SMI) { lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ - qp->s_sc = 0xf; + priv->s_sc = 0xf; } else { lrh0 |= (sc5 & 0xf) << 12; - qp->s_sc = sc5; + priv->s_sc = sc5; } - qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc); - qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); - qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ - qp->s_hdr->ibh.lrh[2] = + priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); + priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); + priv->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ + priv->s_hdr->ibh.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) - qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; + priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; else { lid = ppd->lid; if (lid) { lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); - qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid); + priv->s_hdr->ibh.lrh[3] = cpu_to_be16(lid); } else - qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; + priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; @@ -415,10 +416,10 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) qp->qkey : wqe->ud_wr.remote_qkey); ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); /* disarm any ahg */ - qp->s_hdr->ahgcount = 0; - qp->s_hdr->ahgidx = 0; - qp->s_hdr->tx_flags = 0; - qp->s_hdr->sde = NULL; + priv->s_hdr->ahgcount = 0; + priv->s_hdr->ahgidx = 0; + priv->s_hdr->tx_flags = 0; + priv->s_hdr->sde = NULL; done: ret = 1; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index dc846d55f4d7..b8c6f742b18f 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -486,6 +486,7 @@ static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct hfi1_qp *qp = to_iqp(ibqp); + struct hfi1_qp_priv *priv = qp->priv; int err = 0; int call_send; unsigned long flags; @@ -515,7 +516,7 @@ bail: if (nreq && !call_send) _hfi1_schedule_send(qp); if (nreq && call_send) - hfi1_do_send(&qp->s_iowait.iowork); + hfi1_do_send(&priv->s_iowait.iowork); return err; } @@ -698,12 +699,14 @@ static void mem_timer(unsigned long data) struct hfi1_qp *qp = NULL; struct iowait *wait; unsigned long flags; + struct hfi1_qp_priv *priv; write_seqlock_irqsave(&dev->iowait_lock, flags); if (!list_empty(list)) { wait = list_first_entry(list, struct iowait, list); - qp = container_of(wait, struct hfi1_qp, s_iowait); - list_del_init(&qp->s_iowait.list); + qp = iowait_to_qp(wait); + priv = qp->priv; + list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ if (!list_empty(list)) mod_timer(&dev->mem_timer, jiffies + 1); @@ -738,6 +741,7 @@ void update_sge(struct hfi1_sge_state *ss, u32 length) static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct verbs_txreq *tx; unsigned long flags; @@ -746,10 +750,10 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, spin_lock_irqsave(&qp->s_lock, flags); write_seqlock(&dev->iowait_lock); if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK && - list_empty(&qp->s_iowait.list)) { + list_empty(&priv->s_iowait.list)) { dev->n_txwait++; qp->s_flags |= HFI1_S_WAIT_TX; - list_add_tail(&qp->s_iowait.list, &dev->txwait); + list_add_tail(&priv->s_iowait.list, &dev->txwait); trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX); atomic_inc(&qp->refcount); } @@ -783,6 +787,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx) struct hfi1_qp *qp; unsigned long flags; unsigned int seq; + struct hfi1_qp_priv *priv; qp = tx->qp; dev = to_idev(qp->ibqp.device); @@ -805,8 +810,9 @@ void hfi1_put_txreq(struct verbs_txreq *tx) /* Wake up first QP wanting a free struct */ wait = list_first_entry(&dev->txwait, struct iowait, list); - qp = container_of(wait, struct hfi1_qp, s_iowait); - list_del_init(&qp->s_iowait.list); + qp = iowait_to_qp(wait); + priv = qp->priv; + list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ write_sequnlock_irqrestore(&dev->iowait_lock, flags); hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX); @@ -856,17 +862,18 @@ static void verbs_sdma_complete( static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; unsigned long flags; int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); - if (list_empty(&qp->s_iowait.list)) { + if (list_empty(&priv->s_iowait.list)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); qp->s_flags |= HFI1_S_WAIT_KMEM; - list_add_tail(&qp->s_iowait.list, &dev->memwait); + list_add_tail(&priv->s_iowait.list, &dev->memwait); trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM); atomic_inc(&qp->refcount); } @@ -1004,7 +1011,8 @@ bail_txadd: int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { - struct ahg_ib_header *ahdr = qp->s_hdr; + struct hfi1_qp_priv *priv = qp->priv; + struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; struct hfi1_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; @@ -1014,17 +1022,18 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, struct verbs_txreq *tx; struct sdma_txreq *stx; u64 pbc_flags = 0; - u8 sc5 = qp->s_sc; + u8 sc5 = priv->s_sc; + int ret; - if (!list_empty(&qp->s_iowait.tx_head)) { + if (!list_empty(&priv->s_iowait.tx_head)) { stx = list_first_entry( - &qp->s_iowait.tx_head, + &priv->s_iowait.tx_head, struct sdma_txreq, list); list_del_init(&stx->list); tx = container_of(stx, struct verbs_txreq, txreq); - ret = sdma_send_txreq(tx->sde, &qp->s_iowait, stx); + ret = sdma_send_txreq(tx->sde, &priv->s_iowait, stx); if (unlikely(ret == -ECOMM)) goto bail_ecomm; return ret; @@ -1034,7 +1043,7 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, if (IS_ERR(tx)) goto bail_tx; - tx->sde = qp->s_sde; + tx->sde = priv->s_sde; if (likely(pbc == 0)) { u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); @@ -1053,7 +1062,7 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, if (unlikely(ret)) goto bail_build; trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); - ret = sdma_send_txreq(tx->sde, &qp->s_iowait, &tx->txreq); + ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); if (unlikely(ret == -ECOMM)) goto bail_ecomm; return ret; @@ -1075,6 +1084,7 @@ bail_tx: */ static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; struct hfi1_ibdev *dev = &dd->verbs_dev; unsigned long flags; @@ -1089,14 +1099,14 @@ static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc) spin_lock_irqsave(&qp->s_lock, flags); if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); - if (list_empty(&qp->s_iowait.list)) { + if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibdev *dev = &dd->verbs_dev; int was_empty; dev->n_piowait++; qp->s_flags |= HFI1_S_WAIT_PIO; was_empty = list_empty(&sc->piowait); - list_add_tail(&qp->s_iowait.list, &sc->piowait); + list_add_tail(&priv->s_iowait.list, &sc->piowait); trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO); atomic_inc(&qp->refcount); /* counting: only call wantpiobuf_intr if first user */ @@ -1126,7 +1136,8 @@ struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5) int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { - struct ahg_ib_header *ahdr = qp->s_hdr; + struct hfi1_qp_priv *priv = qp->priv; + struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; struct hfi1_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; @@ -1142,7 +1153,7 @@ int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, int wc_status = IB_WC_SUCCESS; /* vl15 special case taken care of in ud.c */ - sc5 = qp->s_sc; + sc5 = priv->s_sc; sc = qp_to_send_context(qp, sc5); if (!sc) @@ -1249,11 +1260,12 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, struct hfi1_ib_header *hdr, struct hfi1_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; struct hfi1_devdata *dd; int i = 0; u16 pkey; - u8 lnh, sc5 = qp->s_sc; + u8 lnh, sc5 = priv->s_sc; if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT)) return 0; @@ -1312,7 +1324,8 @@ bad: int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - struct ahg_ib_header *ahdr = qp->s_hdr; + struct hfi1_qp_priv *priv = qp->priv; + struct ahg_ib_header *ahdr = priv->s_hdr; int ret; int pio = 0; unsigned long flags = 0; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 14aa81c1b11c..dc5aa9aacd02 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -386,6 +386,20 @@ struct hfi1_ack_entry { }; }; +/* + * hfi1 specific data structures that will be hidden from rvt after the queue + * pair is made common + */ +struct hfi1_qp; +struct hfi1_qp_priv { + struct ahg_ib_header *s_hdr; /* next packet header to send */ + struct sdma_engine *s_sde; /* current sde */ + u8 s_sc; /* SC[0..4] for next packet */ + u8 r_adefered; /* number of acks defered */ + struct iowait s_iowait; + struct hfi1_qp *owner; +}; + /* * Variables prefixed with s_ are for the requester (sender). * Variables prefixed with r_ are for the responder (receiver). @@ -396,16 +410,13 @@ struct hfi1_ack_entry { */ struct hfi1_qp { struct ib_qp ibqp; + void *priv; /* read mostly fields above and below */ struct ib_ah_attr remote_ah_attr; struct ib_ah_attr alt_ah_attr; struct hfi1_qp __rcu *next; /* link list for QPN hash table */ struct hfi1_swqe *s_wq; /* send work queue */ struct hfi1_mmap_info *ip; - struct ahg_ib_header *s_hdr; /* next packet header to send */ - struct sdma_engine *s_sde; /* current sde */ - /* sc for UC/RC QPs - based on ah for UD */ - u8 s_sc; unsigned long timeout_jiffies; /* computed from timeout */ enum ib_mtu path_mtu; @@ -453,7 +464,6 @@ struct hfi1_qp { u32 r_psn; /* expected rcv packet sequence number */ u32 r_msn; /* message sequence number */ - u8 r_adefered; /* number of acks defered */ u8 r_state; /* opcode of last packet received */ u8 r_flags; u8 r_head_ack_queue; /* index into s_ack_queue[] */ @@ -501,8 +511,6 @@ struct hfi1_qp { struct hfi1_sge_state s_ack_rdma_sge; struct timer_list s_timer; - struct iowait s_iowait; - struct hfi1_sge r_sg_list[0] /* verified SGEs */ ____cacheline_aligned_in_smp; }; @@ -794,6 +802,14 @@ static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) return container_of(rdi, struct hfi1_ibdev, rdi); } +static inline struct hfi1_qp *iowait_to_qp(struct iowait *s_iowait) +{ + struct hfi1_qp_priv *priv; + + priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait); + return priv->owner; +} + /* * Send if not busy or waiting for I/O and either * a RC response is pending or we can process send work requests. -- cgit v1.2.3-59-g8ed1b From 49dbb6cf1739e1eefa7ed3849430144bb817fdc6 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:06 -0800 Subject: staging/rdma/hfi1: Add device specific info prints Implement get_card_name and get_pci_dev helper functions for rdmavt for hfi1. Reviewed-by: Mike Marciniszyn Reviewed-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 16 ++++++++++++++++ drivers/staging/rdma/hfi1/hfi.h | 2 ++ drivers/staging/rdma/hfi1/verbs.c | 2 ++ 3 files changed, 20 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index d096f11c0baa..62ee03ee4d9e 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -162,6 +162,22 @@ const char *get_unit_name(int unit) return iname; } +const char *get_card_name(struct rvt_dev_info *rdi) +{ + struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); + struct hfi1_devdata *dd = container_of(ibdev, + struct hfi1_devdata, verbs_dev); + return get_unit_name(dd->unit); +} + +struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) +{ + struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); + struct hfi1_devdata *dd = container_of(ibdev, + struct hfi1_devdata, verbs_dev); + return dd->pcidev; +} + /* * Return count of units with at least one port ACTIVE. */ diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index dbea286cde72..cff966e5938f 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1660,6 +1660,8 @@ int get_platform_config_field(struct hfi1_devdata *dd, int table_index, int field_index, u32 *data, u32 len); const char *get_unit_name(int unit); +const char *get_card_name(struct rvt_dev_info *rdi); +struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi); /* * Flush write combining store buffers (if present) and perform a write diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index b8c6f742b18f..ccd91da1868b 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -2031,6 +2031,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) * Fill in rvt info object. */ dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files; + dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name; + dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; dd->verbs_dev.rdi.flags = (RVT_FLAG_MR_INIT_DRIVER | RVT_FLAG_QP_INIT_DRIVER | -- cgit v1.2.3-59-g8ed1b From 8859b4a6d08bcbd804459274c6f4134aaf6ace8a Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:11 -0800 Subject: staging/rdma/hfi1: Use correct rdmavt header files after move. Rdmavt split the header files to be based on ibta object. This patch makes changes in hfi1 to account for the move. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 3 ++- drivers/staging/rdma/hfi1/mad.c | 4 ++-- drivers/staging/rdma/hfi1/qp.c | 5 +++-- drivers/staging/rdma/hfi1/ud.c | 14 +++++++------- drivers/staging/rdma/hfi1/verbs.c | 4 ++-- 5 files changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 62ee03ee4d9e..aa309a5fde8f 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -56,6 +56,7 @@ #include #include #include +#include #include "hfi.h" #include "trace.h" @@ -316,7 +317,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; - if (lid < HFI1_MULTICAST_LID_BASE) { + if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { struct hfi1_qp *qp; unsigned long flags; diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index ed88a5aab140..13994850396c 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1097,7 +1097,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* Must be a valid unicast LID address. */ if ((lid == 0 && ls_old > IB_PORT_INIT) || - lid >= HFI1_MULTICAST_LID_BASE) { + lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n", lid); @@ -1130,7 +1130,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* Must be a valid unicast LID address. */ if ((smlid == 0 && ls_old > IB_PORT_INIT) || - smlid >= HFI1_MULTICAST_LID_BASE) { + smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid); } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index cacef55dfb74..735253b7d3e8 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -640,7 +640,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_AV) { u8 sc; - if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE) + if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr)) goto inval; @@ -653,7 +653,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_ALT_PATH) { u8 sc; - if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE) + if (attr->alt_ah_attr.dlid >= + be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) goto inval; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 00d1ae757529..55a4eec37331 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -242,7 +242,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1)); /* Check for loopback when the port lid is not set */ if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI) - wc.slid = HFI1_PERMISSIVE_LID; + wc.slid = be16_to_cpu(IB_LID_PERMISSIVE); wc.sl = ah_attr->sl; wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); wc.port_num = qp->port_num; @@ -310,11 +310,11 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); ah_attr = &to_iah(wqe->ud_wr.ah)->attr; - if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE || - ah_attr->dlid == HFI1_PERMISSIVE_LID) { + if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) || + ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); if (unlikely(!loopback && (lid == ppd->lid || - (lid == HFI1_PERMISSIVE_LID && + (lid == be16_to_cpu(IB_LID_PERMISSIVE) && qp->ibqp.qp_type == IB_QPT_GSI)))) { /* * If DMAs are in progress, we can't generate @@ -666,8 +666,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) qkey = be32_to_cpu(ohdr->u.ud.deth[0]); src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK; dlid = be16_to_cpu(hdr->lrh[1]); - is_mcast = (dlid > HFI1_MULTICAST_LID_BASE) && - (dlid != HFI1_PERMISSIVE_LID); + is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && + (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); bth1 = be32_to_cpu(ohdr->bth[1]); if (unlikely(bth1 & HFI1_BECN_SMASK)) { /* @@ -874,7 +874,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) /* * Save the LMC lower bits if the destination LID is a unicast LID. */ - wc.dlid_path_bits = dlid >= HFI1_MULTICAST_LID_BASE ? 0 : + wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 : dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index ccd91da1868b..5c952d82a023 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -646,8 +646,8 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) /* Get the destination QP number. */ qp_num = be32_to_cpu(packet->ohdr->bth[1]) & HFI1_QPN_MASK; lid = be16_to_cpu(hdr->lrh[1]); - if (unlikely((lid >= HFI1_MULTICAST_LID_BASE) && - (lid != HFI1_PERMISSIVE_LID))) { + if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && + (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { struct hfi1_mcast *mcast; struct hfi1_mcast_qp *p; -- cgit v1.2.3-59-g8ed1b From 15723f06fb9d80cbfd895c32c6023881c7d0e0b4 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:17 -0800 Subject: staging/rdma/hfi1: Use address handle in rdmavt and remove from hfi1 Original patch from Kamal Heib , split apart from original and modified to accomodate recent changes in rdmavt. Remove AH from hfi1 and use rdmavt version. Signed-off-by: Kamal Heib Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/common.h | 2 - drivers/staging/rdma/hfi1/mad.c | 2 +- drivers/staging/rdma/hfi1/qp.c | 6 +- drivers/staging/rdma/hfi1/ruc.c | 2 +- drivers/staging/rdma/hfi1/ud.c | 4 +- drivers/staging/rdma/hfi1/verbs.c | 131 +++---------------------------------- drivers/staging/rdma/hfi1/verbs.h | 20 +----- 7 files changed, 18 insertions(+), 149 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h index e4b1dc6d0328..cb5ca794ac08 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/staging/rdma/hfi1/common.h @@ -341,7 +341,6 @@ struct hfi1_message_header { #define FULL_MGMT_P_KEY 0xFFFF #define DEFAULT_P_KEY LIM_MGMT_P_KEY -#define HFI1_PERMISSIVE_LID 0xFFFF #define HFI1_AETH_CREDIT_SHIFT 24 #define HFI1_AETH_CREDIT_MASK 0x1F #define HFI1_AETH_CREDIT_INVAL 0x1F @@ -353,7 +352,6 @@ struct hfi1_message_header { #define HFI1_BECN_SHIFT 30 #define HFI1_BECN_MASK 1 #define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT) -#define HFI1_MULTICAST_LID_BASE 0xC000 static inline __u64 rhf_to_cpu(const __le32 *rbuf) { diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 13994850396c..1190f8dd2629 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -137,7 +137,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) ret = PTR_ERR(ah); else { send_buf->ah = ah; - ibp->sm_ah = to_iah(ah); + ibp->sm_ah = ibah_to_rvtah(ah); ret = 0; } } else diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 735253b7d3e8..9fcf052b62bd 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -424,7 +424,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) - atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); + atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); if (++qp->s_last >= qp->s_size) qp->s_last = 0; } @@ -642,7 +642,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; - if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr)) + if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) goto inval; sc = ah_to_sc(ibqp->device, &attr->ah_attr); if (!qp_to_sdma_engine(qp, sc) && @@ -656,7 +656,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr->alt_ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) goto inval; - if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) + if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) goto inval; if (attr->alt_pkey_index >= hfi1_get_npkeys(dd)) goto inval; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 55ed00dd0218..33bcfe5bfd13 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -893,7 +893,7 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) - atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); + atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); /* See ch. 11.2.4.1 and 10.7.3.1 */ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 55a4eec37331..820fef211edf 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -98,7 +98,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) goto drop; } - ah_attr = &to_iah(swqe->ud_wr.ah)->attr; + ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr; ppd = ppd_from_ibp(ibp); if (qp->ibqp.qp_num > 1) { @@ -309,7 +309,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp) /* Construct the header. */ ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); - ah_attr = &to_iah(wqe->ud_wr.ah)->attr; + ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) || ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 5c952d82a023..021e21153b90 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -455,9 +455,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) if (wqe->length > 0x80000000U) goto bail_inval_free; } else { - struct hfi1_ah *ah = to_iah(ud_wr(wr)->ah); - - atomic_inc(&ah->refcount); + atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); } wqe->ssn = qp->s_ssn++; qp->s_head = next; @@ -1615,88 +1613,21 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah) return ibp->sl_to_sc[ah->sl]; } -int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) +static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) { struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u8 sc5; - /* A multicast address requires a GRH (see ch. 8.4.1). */ - if (ah_attr->dlid >= HFI1_MULTICAST_LID_BASE && - ah_attr->dlid != HFI1_PERMISSIVE_LID && - !(ah_attr->ah_flags & IB_AH_GRH)) - goto bail; - if ((ah_attr->ah_flags & IB_AH_GRH) && - ah_attr->grh.sgid_index >= HFI1_GUIDS_PER_PORT) - goto bail; - if (ah_attr->dlid == 0) - goto bail; - if (ah_attr->port_num < 1 || - ah_attr->port_num > ibdev->phys_port_cnt) - goto bail; - if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && - ib_rate_to_mbps(ah_attr->static_rate) < 0) - goto bail; - if (ah_attr->sl >= OPA_MAX_SLS) - goto bail; /* test the mapping for validity */ ibp = to_iport(ibdev, ah_attr->port_num); ppd = ppd_from_ibp(ibp); sc5 = ibp->sl_to_sc[ah_attr->sl]; dd = dd_from_ppd(ppd); if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) - goto bail; + return -EINVAL; return 0; -bail: - return -EINVAL; -} - -/** - * create_ah - create an address handle - * @pd: the protection domain - * @ah_attr: the attributes of the AH - * - * This may be called from interrupt context. - */ -static struct ib_ah *create_ah(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) -{ - struct hfi1_ah *ah; - struct ib_ah *ret; - struct hfi1_ibdev *dev = to_idev(pd->device); - unsigned long flags; - - if (hfi1_check_ah(pd->device, ah_attr)) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - - ah = kmalloc(sizeof(*ah), GFP_ATOMIC); - if (!ah) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - spin_lock_irqsave(&dev->n_ahs_lock, flags); - if (dev->n_ahs_allocated == hfi1_max_ahs) { - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - kfree(ah); - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - dev->n_ahs_allocated++; - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - - /* ib_create_ah() will initialize ah->ibah. */ - ah->attr = *ah_attr; - atomic_set(&ah->refcount, 0); - - ret = &ah->ibah; - -bail: - return ret; } struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) @@ -1716,51 +1647,6 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) return ah; } -/** - * destroy_ah - destroy an address handle - * @ibah: the AH to destroy - * - * This may be called from interrupt context. - */ -static int destroy_ah(struct ib_ah *ibah) -{ - struct hfi1_ibdev *dev = to_idev(ibah->device); - struct hfi1_ah *ah = to_iah(ibah); - unsigned long flags; - - if (atomic_read(&ah->refcount) != 0) - return -EBUSY; - - spin_lock_irqsave(&dev->n_ahs_lock, flags); - dev->n_ahs_allocated--; - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - - kfree(ah); - - return 0; -} - -static int modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) -{ - struct hfi1_ah *ah = to_iah(ibah); - - if (hfi1_check_ah(ibah->device, ah_attr)) - return -EINVAL; - - ah->attr = *ah_attr; - - return 0; -} - -static int query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) -{ - struct hfi1_ah *ah = to_iah(ibah); - - *ah_attr = ah->attr; - - return 0; -} - /** * hfi1_get_npkeys - return the size of the PKEY table for context 0 * @dd: the hfi1_ib device @@ -1879,7 +1765,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->n_ahs_lock); spin_lock_init(&dev->n_cqs_lock); spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); @@ -1989,10 +1874,10 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->dealloc_ucontext = dealloc_ucontext; ibdev->alloc_pd = NULL; ibdev->dealloc_pd = NULL; - ibdev->create_ah = create_ah; - ibdev->destroy_ah = destroy_ah; - ibdev->modify_ah = modify_ah; - ibdev->query_ah = query_ah; + ibdev->create_ah = NULL; + ibdev->destroy_ah = NULL; + ibdev->modify_ah = NULL; + ibdev->query_ah = NULL; ibdev->create_srq = hfi1_create_srq; ibdev->modify_srq = hfi1_modify_srq; ibdev->query_srq = hfi1_query_srq; @@ -2033,6 +1918,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files; dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name; dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; + dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; + dd->verbs_dev.rdi.dparms.props.max_ah = hfi1_max_ahs; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; dd->verbs_dev.rdi.flags = (RVT_FLAG_MR_INIT_DRIVER | RVT_FLAG_QP_INIT_DRIVER | diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index dc5aa9aacd02..4db6136f0384 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -234,13 +234,6 @@ struct hfi1_mcast { int n_attached; }; -/* Address Handle */ -struct hfi1_ah { - struct ib_ah ibah; - struct ib_ah_attr attr; - atomic_t refcount; -}; - /* * This structure is used by hfi1_mmap() to validate an offset * when an mmap() request is made. The vm_area_struct then uses @@ -652,8 +645,8 @@ static inline void inc_opstats( struct hfi1_ibport { struct hfi1_qp __rcu *qp[2]; struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ - struct hfi1_ah *sm_ah; - struct hfi1_ah *smi_ah; + struct rvt_ah *sm_ah; + struct rvt_ah *smi_ah; struct rb_root mcast_tree; spinlock_t lock; /* protect changes in this struct */ @@ -735,8 +728,6 @@ struct hfi1_ibdev { u64 n_kmem_wait; u64 n_send_schedule; - u32 n_ahs_allocated; /* number of AHs allocated for device */ - spinlock_t n_ahs_lock; u32 n_cqs_allocated; /* number of CQs allocated for device */ spinlock_t n_cqs_lock; u32 n_qps_allocated; /* number of QPs allocated for device */ @@ -774,11 +765,6 @@ static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr) return container_of(ibmr, struct hfi1_mr, ibmr); } -static inline struct hfi1_ah *to_iah(struct ib_ah *ibah) -{ - return container_of(ibah, struct hfi1_ah, ibah); -} - static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq) { return container_of(ibcq, struct hfi1_cq, ibcq); @@ -925,8 +911,6 @@ void hfi1_rc_hdrerr( u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); -int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); - struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid); void hfi1_rc_rnr_retry(unsigned long arg); -- cgit v1.2.3-59-g8ed1b From 8f1764fa2ba5a39c651316998f40631e8492081d Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:22 -0800 Subject: staging/rdma/hfi1: Implement hfi1 support for AH notification For OPA devices additional work is required to create an AH. This patch adds support to set the VL correctly. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 021e21153b90..657efd3bd75c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1630,6 +1630,29 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) return 0; } +static void hfi1_notify_new_ah(struct ib_device *ibdev, + struct ib_ah_attr *ah_attr, + struct rvt_ah *ah) +{ + struct hfi1_ibport *ibp; + struct hfi1_pportdata *ppd; + struct hfi1_devdata *dd; + u8 sc5; + + /* + * Do not trust reading anything from rvt_ah at this point as it is not + * done being setup. We can however modify things which we need to set. + */ + + ibp = to_iport(ibdev, ah_attr->port_num); + ppd = ppd_from_ibp(ibp); + sc5 = ibp->sl_to_sc[ah->attr.sl]; + dd = dd_from_ppd(ppd); + ah->vl = sc_to_vlt(dd, sc5); + if (ah->vl < num_vls || ah->vl == 15) + ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu); +} + struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) { struct ib_ah_attr attr; @@ -1919,6 +1942,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name; dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; + dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; dd->verbs_dev.rdi.dparms.props.max_ah = hfi1_max_ahs; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; dd->verbs_dev.rdi.flags = (RVT_FLAG_MR_INIT_DRIVER | -- cgit v1.2.3-59-g8ed1b From 895420ddc8b35099ddd25132f5707306e70f0d6a Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:28 -0800 Subject: staging/rdma/hfi1: Remove hfi1 MR and hfi1 specific qp type This patch does the actual removal of the queue pair from the hfi1 driver along with a number of dependent data structures. These were moved to rvt. It also removes the MR functions to use those in rdmavt. These two pieces can not reasonably be split apart becuase they depend on each other. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/cq.c | 2 +- drivers/staging/rdma/hfi1/diag.c | 8 +- drivers/staging/rdma/hfi1/driver.c | 10 +- drivers/staging/rdma/hfi1/hfi.h | 16 +- drivers/staging/rdma/hfi1/keys.c | 356 ------------------------ drivers/staging/rdma/hfi1/mmap.c | 24 +- drivers/staging/rdma/hfi1/mr.c | 473 -------------------------------- drivers/staging/rdma/hfi1/pio.c | 4 +- drivers/staging/rdma/hfi1/qp.c | 88 +++--- drivers/staging/rdma/hfi1/qp.h | 33 ++- drivers/staging/rdma/hfi1/rc.c | 116 ++++---- drivers/staging/rdma/hfi1/ruc.c | 75 +++-- drivers/staging/rdma/hfi1/sdma.h | 6 +- drivers/staging/rdma/hfi1/srq.c | 28 +- drivers/staging/rdma/hfi1/trace.h | 22 +- drivers/staging/rdma/hfi1/uc.c | 10 +- drivers/staging/rdma/hfi1/ud.c | 18 +- drivers/staging/rdma/hfi1/verbs.c | 141 ++++------ drivers/staging/rdma/hfi1/verbs.h | 366 ++++-------------------- drivers/staging/rdma/hfi1/verbs_mcast.c | 8 +- 21 files changed, 337 insertions(+), 1469 deletions(-) delete mode 100644 drivers/staging/rdma/hfi1/keys.c delete mode 100644 drivers/staging/rdma/hfi1/mr.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 69fb10f6a111..d82d9dc97c39 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o cq.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ - init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \ + init.o intr.o mad.o mmap.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/staging/rdma/hfi1/cq.c index 4f046ffe7e60..ffd0e7abca00 100644 --- a/drivers/staging/rdma/hfi1/cq.c +++ b/drivers/staging/rdma/hfi1/cq.c @@ -479,7 +479,7 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) if (cq->ip) { struct hfi1_ibdev *dev = to_idev(ibcq->device); - struct hfi1_mmap_info *ip = cq->ip; + struct rvt_mmap_info *ip = cq->ip; hfi1_update_mmap_info(dev, ip, sz, wc); diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 15c616ada4bf..d9889d430698 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -1603,7 +1603,7 @@ int snoop_recv_handler(struct hfi1_packet *packet) /* * Handle snooping and capturing packets when sdma is being used. */ -int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n"); @@ -1616,13 +1616,13 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, * bypass packets. The only way to send a bypass packet currently is to use the * diagpkt interface. When that interface is enable snoop/capture is not. */ -int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; - struct hfi1_sge_state *ss = qp->s_cur_sge; + struct rvt_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; u32 dwords = (len + 3) >> 2; u32 plen = hdrwords + dwords + 2; /* includes pbc */ @@ -1630,7 +1630,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, struct snoop_packet *s_packet = NULL; u32 *hdr = (u32 *)&ahdr->ibh; u32 length = 0; - struct hfi1_sge_state temp_ss; + struct rvt_sge_state temp_ss; void *data = NULL; void *data_start = NULL; int ret; diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index aa309a5fde8f..eaed692ba575 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -318,7 +318,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { - struct hfi1_qp *qp; + struct rvt_qp *qp; unsigned long flags; rcu_read_lock(); @@ -387,7 +387,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, * Only in pre-B0 h/w is the CNP_OPCODE handled * via this code path. */ - struct hfi1_qp *qp = NULL; + struct rvt_qp *qp = NULL; u32 lqpn, rqpn; u16 rlid; u8 svc_type, sl, sc5; @@ -456,7 +456,7 @@ static void prescan_rxq(struct hfi1_packet *packet) {} #else /* !CONFIG_PRESCAN_RXQ */ static int prescan_receive_queue; -static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr, +static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr, struct hfi1_other_headers *ohdr, u64 rhf, u32 bth1, struct ib_grh *grh) { @@ -595,7 +595,7 @@ static void prescan_rxq(struct hfi1_packet *packet) struct hfi1_ibport *ibp = &rcd->ppd->ibport_data; __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head + dd->rhf_offset; - struct hfi1_qp *qp; + struct rvt_qp *qp; struct hfi1_ib_header *hdr; struct hfi1_other_headers *ohdr; struct ib_grh *grh = NULL; @@ -770,7 +770,7 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd; - struct hfi1_qp *qp, *nqp; + struct rvt_qp *qp, *nqp; rcd = packet->rcd; rcd->head = packet->rhqoff; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index cff966e5938f..d52dbdaacad7 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -334,7 +334,7 @@ struct hfi1_packet { void *hdr; struct hfi1_ctxtdata *rcd; __le32 *rhf_addr; - struct hfi1_qp *qp; + struct rvt_qp *qp; struct hfi1_other_headers *ohdr; u64 rhf; u32 maxcnt; @@ -374,7 +374,7 @@ struct hfi1_snoop_data { #define HFI1_PORT_SNOOP_MODE 1U #define HFI1_PORT_CAPTURE_MODE 2U -struct hfi1_sge_state; +struct rvt_sge_state; /* * Get/Set IB link-level config parameters for f_get/set_ib_cfg() @@ -1091,9 +1091,9 @@ struct hfi1_devdata { * Handlers for outgoing data so that snoop/capture does not * have to have its hooks in the send path */ - int (*process_pio_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, + int (*process_pio_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); - int (*process_dma_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, + int (*process_dma_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); @@ -1276,7 +1276,7 @@ static inline u32 egress_cycles(u32 len, u32 rate) void set_link_ipg(struct hfi1_pportdata *ppd); void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, u32 rqpn, u8 svc_type); -void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn, +void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh); @@ -1468,9 +1468,9 @@ void reset_link_credits(struct hfi1_devdata *dd); void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); int snoop_recv_handler(struct hfi1_packet *packet); -int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); -int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); @@ -1682,7 +1682,7 @@ int process_receive_invalid(struct hfi1_packet *packet); extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; -void update_sge(struct hfi1_sge_state *ss, u32 length); +void update_sge(struct rvt_sge_state *ss, u32 length); /* global module parameter variables */ extern unsigned int hfi1_max_mtu; diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c deleted file mode 100644 index ffaaa6fd7a1f..000000000000 --- a/drivers/staging/rdma/hfi1/keys.c +++ /dev/null @@ -1,356 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include "hfi.h" - -/** - * hfi1_alloc_lkey - allocate an lkey - * @mr: memory region that this lkey protects - * @dma_region: 0->normal key, 1->restricted DMA key - * - * Returns 0 if successful, otherwise returns -errno. - * - * Increments mr reference count as required. - * - * Sets the lkey field mr for non-dma regions. - * - */ - -int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region) -{ - unsigned long flags; - u32 r; - u32 n; - int ret = 0; - struct hfi1_ibdev *dev = to_idev(mr->pd->device); - struct rvt_lkey_table *rkt = &dev->lk_table; - - hfi1_get_mr(mr); - spin_lock_irqsave(&rkt->lock, flags); - - /* special case for dma_mr lkey == 0 */ - if (dma_region) { - struct rvt_mregion *tmr; - - tmr = rcu_access_pointer(dev->dma_mr); - if (!tmr) { - rcu_assign_pointer(dev->dma_mr, mr); - mr->lkey_published = 1; - } else { - hfi1_put_mr(mr); - } - goto success; - } - - /* Find the next available LKEY */ - r = rkt->next; - n = r; - for (;;) { - if (!rcu_access_pointer(rkt->table[r])) - break; - r = (r + 1) & (rkt->max - 1); - if (r == n) - goto bail; - } - rkt->next = (r + 1) & (rkt->max - 1); - /* - * Make sure lkey is never zero which is reserved to indicate an - * unrestricted LKEY. - */ - rkt->gen++; - /* - * bits are capped in verbs.c to ensure enough bits for - * generation number - */ - mr->lkey = (r << (32 - hfi1_lkey_table_size)) | - ((((1 << (24 - hfi1_lkey_table_size)) - 1) & rkt->gen) - << 8); - if (mr->lkey == 0) { - mr->lkey |= 1 << 8; - rkt->gen++; - } - rcu_assign_pointer(rkt->table[r], mr); - mr->lkey_published = 1; -success: - spin_unlock_irqrestore(&rkt->lock, flags); -out: - return ret; -bail: - hfi1_put_mr(mr); - spin_unlock_irqrestore(&rkt->lock, flags); - ret = -ENOMEM; - goto out; -} - -/** - * hfi1_free_lkey - free an lkey - * @mr: mr to free from tables - */ -void hfi1_free_lkey(struct rvt_mregion *mr) -{ - unsigned long flags; - u32 lkey = mr->lkey; - u32 r; - struct hfi1_ibdev *dev = to_idev(mr->pd->device); - struct rvt_lkey_table *rkt = &dev->lk_table; - int freed = 0; - - spin_lock_irqsave(&rkt->lock, flags); - if (!mr->lkey_published) - goto out; - if (lkey == 0) - RCU_INIT_POINTER(dev->dma_mr, NULL); - else { - r = lkey >> (32 - hfi1_lkey_table_size); - RCU_INIT_POINTER(rkt->table[r], NULL); - } - mr->lkey_published = 0; - freed++; -out: - spin_unlock_irqrestore(&rkt->lock, flags); - if (freed) { - synchronize_rcu(); - hfi1_put_mr(mr); - } -} - -/** - * hfi1_lkey_ok - check IB SGE for validity and initialize - * @rkt: table containing lkey to check SGE against - * @pd: protection domain - * @isge: outgoing internal SGE - * @sge: SGE to check - * @acc: access flags - * - * Return 1 if valid and successful, otherwise returns 0. - * - * increments the reference count upon success - * - * Check the IB SGE for validity and initialize our internal version - * of it. - */ -int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, - struct hfi1_sge *isge, struct ib_sge *sge, int acc) -{ - struct rvt_mregion *mr; - unsigned n, m; - size_t off; - - /* - * We use LKEY == zero for kernel virtual addresses - * (see hfi1_get_dma_mr and dma.c). - */ - rcu_read_lock(); - if (sge->lkey == 0) { - struct hfi1_ibdev *dev = to_idev(pd->ibpd.device); - - if (pd->user) - goto bail; - mr = rcu_dereference(dev->dma_mr); - if (!mr) - goto bail; - atomic_inc(&mr->refcount); - rcu_read_unlock(); - - isge->mr = mr; - isge->vaddr = (void *) sge->addr; - isge->length = sge->length; - isge->sge_length = sge->length; - isge->m = 0; - isge->n = 0; - goto ok; - } - mr = rcu_dereference( - rkt->table[(sge->lkey >> (32 - hfi1_lkey_table_size))]); - if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) - goto bail; - - off = sge->addr - mr->user_base; - if (unlikely(sge->addr < mr->user_base || - off + sge->length > mr->length || - (mr->access_flags & acc) != acc)) - goto bail; - atomic_inc(&mr->refcount); - rcu_read_unlock(); - - off += mr->offset; - if (mr->page_shift) { - /* - page sizes are uniform power of 2 so no loop is necessary - entries_spanned_by_off is the number of times the loop below - would have executed. - */ - size_t entries_spanned_by_off; - - entries_spanned_by_off = off >> mr->page_shift; - off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off / RVT_SEGSZ; - n = entries_spanned_by_off % RVT_SEGSZ; - } else { - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= RVT_SEGSZ) { - m++; - n = 0; - } - } - } - isge->mr = mr; - isge->vaddr = mr->map[m]->segs[n].vaddr + off; - isge->length = mr->map[m]->segs[n].length - off; - isge->sge_length = sge->length; - isge->m = m; - isge->n = n; -ok: - return 1; -bail: - rcu_read_unlock(); - return 0; -} - -/** - * hfi1_rkey_ok - check the IB virtual address, length, and RKEY - * @qp: qp for validation - * @sge: SGE state - * @len: length of data - * @vaddr: virtual address to place data - * @rkey: rkey to check - * @acc: access flags - * - * Return 1 if successful, otherwise 0. - * - * increments the reference count upon success - */ -int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, - u32 len, u64 vaddr, u32 rkey, int acc) -{ - struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct rvt_mregion *mr; - unsigned n, m; - size_t off; - - /* - * We use RKEY == zero for kernel virtual addresses - * (see hfi1_get_dma_mr and dma.c). - */ - rcu_read_lock(); - if (rkey == 0) { - struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); - struct hfi1_ibdev *dev = to_idev(pd->ibpd.device); - - if (pd->user) - goto bail; - mr = rcu_dereference(dev->dma_mr); - if (!mr) - goto bail; - atomic_inc(&mr->refcount); - rcu_read_unlock(); - - sge->mr = mr; - sge->vaddr = (void *) vaddr; - sge->length = len; - sge->sge_length = len; - sge->m = 0; - sge->n = 0; - goto ok; - } - - mr = rcu_dereference( - rkt->table[(rkey >> (32 - hfi1_lkey_table_size))]); - if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) - goto bail; - - off = vaddr - mr->iova; - if (unlikely(vaddr < mr->iova || off + len > mr->length || - (mr->access_flags & acc) == 0)) - goto bail; - atomic_inc(&mr->refcount); - rcu_read_unlock(); - - off += mr->offset; - if (mr->page_shift) { - /* - page sizes are uniform power of 2 so no loop is necessary - entries_spanned_by_off is the number of times the loop below - would have executed. - */ - size_t entries_spanned_by_off; - - entries_spanned_by_off = off >> mr->page_shift; - off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off / RVT_SEGSZ; - n = entries_spanned_by_off % RVT_SEGSZ; - } else { - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= RVT_SEGSZ) { - m++; - n = 0; - } - } - } - sge->mr = mr; - sge->vaddr = mr->map[m]->segs[n].vaddr + off; - sge->length = mr->map[m]->segs[n].length - off; - sge->sge_length = len; - sge->m = m; - sge->n = n; -ok: - return 1; -bail: - rcu_read_unlock(); - return 0; -} diff --git a/drivers/staging/rdma/hfi1/mmap.c b/drivers/staging/rdma/hfi1/mmap.c index 5173b1c60b3d..4ce6be6af17c 100644 --- a/drivers/staging/rdma/hfi1/mmap.c +++ b/drivers/staging/rdma/hfi1/mmap.c @@ -59,12 +59,12 @@ /** * hfi1_release_mmap_info - free mmap info structure - * @ref: a pointer to the kref within struct hfi1_mmap_info + * @ref: a pointer to the kref within struct rvt_mmap_info */ void hfi1_release_mmap_info(struct kref *ref) { - struct hfi1_mmap_info *ip = - container_of(ref, struct hfi1_mmap_info, ref); + struct rvt_mmap_info *ip = + container_of(ref, struct rvt_mmap_info, ref); struct hfi1_ibdev *dev = to_idev(ip->context->device); spin_lock_irq(&dev->pending_lock); @@ -81,14 +81,14 @@ void hfi1_release_mmap_info(struct kref *ref) */ static void hfi1_vma_open(struct vm_area_struct *vma) { - struct hfi1_mmap_info *ip = vma->vm_private_data; + struct rvt_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); } static void hfi1_vma_close(struct vm_area_struct *vma) { - struct hfi1_mmap_info *ip = vma->vm_private_data; + struct rvt_mmap_info *ip = vma->vm_private_data; kref_put(&ip->ref, hfi1_release_mmap_info); } @@ -109,7 +109,7 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) struct hfi1_ibdev *dev = to_idev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; - struct hfi1_mmap_info *ip, *pp; + struct rvt_mmap_info *ip, *pp; int ret = -EINVAL; /* @@ -146,11 +146,11 @@ done: /* * Allocate information for hfi1_mmap */ -struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, - u32 size, - struct ib_ucontext *context, - void *obj) { - struct hfi1_mmap_info *ip; +struct rvt_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, + u32 size, + struct ib_ucontext *context, + void *obj) { + struct rvt_mmap_info *ip; ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) @@ -175,7 +175,7 @@ bail: return ip; } -void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip, +void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct rvt_mmap_info *ip, u32 size, void *obj) { size = PAGE_ALIGN(size); diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c deleted file mode 100644 index 7e14965a02cd..000000000000 --- a/drivers/staging/rdma/hfi1/mr.c +++ /dev/null @@ -1,473 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include - -#include "hfi.h" - -/* Fast memory region */ -struct hfi1_fmr { - struct ib_fmr ibfmr; - struct rvt_mregion mr; /* must be last */ -}; - -static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr) -{ - return container_of(ibfmr, struct hfi1_fmr, ibfmr); -} - -static int init_mregion(struct rvt_mregion *mr, struct ib_pd *pd, - int count) -{ - int m, i = 0; - int rval = 0; - - m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; - for (; i < m; i++) { - mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); - if (!mr->map[i]) - goto bail; - } - mr->mapsz = m; - init_completion(&mr->comp); - /* count returning the ptr to user */ - atomic_set(&mr->refcount, 1); - mr->pd = pd; - mr->max_segs = count; -out: - return rval; -bail: - while (i) - kfree(mr->map[--i]); - rval = -ENOMEM; - goto out; -} - -static void deinit_mregion(struct rvt_mregion *mr) -{ - int i = mr->mapsz; - - mr->mapsz = 0; - while (i) - kfree(mr->map[--i]); -} - - -/** - * hfi1_get_dma_mr - get a DMA memory region - * @pd: protection domain for this memory region - * @acc: access flags - * - * Returns the memory region on success, otherwise returns an errno. - * Note that all DMA addresses should be created via the - * struct ib_dma_mapping_ops functions (see dma.c). - */ -struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc) -{ - struct hfi1_mr *mr = NULL; - struct ib_mr *ret; - int rval; - - if (ibpd_to_rvtpd(pd)->user) { - ret = ERR_PTR(-EPERM); - goto bail; - } - - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - rval = init_mregion(&mr->mr, pd, 0); - if (rval) { - ret = ERR_PTR(rval); - goto bail; - } - - - rval = hfi1_alloc_lkey(&mr->mr, 1); - if (rval) { - ret = ERR_PTR(rval); - goto bail_mregion; - } - - mr->mr.access_flags = acc; - ret = &mr->ibmr; -done: - return ret; - -bail_mregion: - deinit_mregion(&mr->mr); -bail: - kfree(mr); - goto done; -} - -static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd) -{ - struct hfi1_mr *mr; - int rval = -ENOMEM; - int m; - - /* Allocate struct plus pointers to first level page tables. */ - m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ; - mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); - if (!mr) - goto bail; - - rval = init_mregion(&mr->mr, pd, count); - if (rval) - goto bail; - - rval = hfi1_alloc_lkey(&mr->mr, 0); - if (rval) - goto bail_mregion; - mr->ibmr.lkey = mr->mr.lkey; - mr->ibmr.rkey = mr->mr.lkey; -done: - return mr; - -bail_mregion: - deinit_mregion(&mr->mr); -bail: - kfree(mr); - mr = ERR_PTR(rval); - goto done; -} - -/** - * hfi1_reg_user_mr - register a userspace memory region - * @pd: protection domain for this memory region - * @start: starting userspace address - * @length: length of region to register - * @mr_access_flags: access flags for this memory region - * @udata: unused by the driver - * - * Returns the memory region on success, otherwise returns an errno. - */ -struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int mr_access_flags, - struct ib_udata *udata) -{ - struct hfi1_mr *mr; - struct ib_umem *umem; - struct scatterlist *sg; - int n, m, entry; - struct ib_mr *ret; - - if (length == 0) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - - umem = ib_umem_get(pd->uobject->context, start, length, - mr_access_flags, 0); - if (IS_ERR(umem)) - return (void *) umem; - - n = umem->nmap; - - mr = alloc_mr(n, pd); - if (IS_ERR(mr)) { - ret = (struct ib_mr *)mr; - ib_umem_release(umem); - goto bail; - } - - mr->mr.user_base = start; - mr->mr.iova = virt_addr; - mr->mr.length = length; - mr->mr.offset = ib_umem_offset(umem); - mr->mr.access_flags = mr_access_flags; - mr->umem = umem; - - if (is_power_of_2(umem->page_size)) - mr->mr.page_shift = ilog2(umem->page_size); - m = 0; - n = 0; - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - void *vaddr; - - vaddr = page_address(sg_page(sg)); - if (!vaddr) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - mr->mr.map[m]->segs[n].vaddr = vaddr; - mr->mr.map[m]->segs[n].length = umem->page_size; - n++; - if (n == RVT_SEGSZ) { - m++; - n = 0; - } - } - ret = &mr->ibmr; - -bail: - return ret; -} - -/** - * hfi1_dereg_mr - unregister and free a memory region - * @ibmr: the memory region to free - * - * Returns 0 on success. - * - * Note that this is called to free MRs created by hfi1_get_dma_mr() - * or hfi1_reg_user_mr(). - */ -int hfi1_dereg_mr(struct ib_mr *ibmr) -{ - struct hfi1_mr *mr = to_imr(ibmr); - int ret = 0; - unsigned long timeout; - - hfi1_free_lkey(&mr->mr); - - hfi1_put_mr(&mr->mr); /* will set completion if last */ - timeout = wait_for_completion_timeout(&mr->mr.comp, - 5 * HZ); - if (!timeout) { - dd_dev_err( - dd_from_ibdev(mr->mr.pd->device), - "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n", - mr, mr->mr.pd, atomic_read(&mr->mr.refcount)); - hfi1_get_mr(&mr->mr); - ret = -EBUSY; - goto out; - } - deinit_mregion(&mr->mr); - if (mr->umem) - ib_umem_release(mr->umem); - kfree(mr); -out: - return ret; -} - -/* - * Allocate a memory region usable with the - * IB_WR_REG_MR send work request. - * - * Return the memory region on success, otherwise return an errno. - * FIXME: IB_WR_REG_MR is not supported - */ -struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, - u32 max_num_sg) -{ - struct hfi1_mr *mr; - - if (mr_type != IB_MR_TYPE_MEM_REG) - return ERR_PTR(-EINVAL); - - mr = alloc_mr(max_num_sg, pd); - if (IS_ERR(mr)) - return (struct ib_mr *)mr; - - return &mr->ibmr; -} - -/** - * hfi1_alloc_fmr - allocate a fast memory region - * @pd: the protection domain for this memory region - * @mr_access_flags: access flags for this memory region - * @fmr_attr: fast memory region attributes - * - * Returns the memory region on success, otherwise returns an errno. - */ -struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) -{ - struct hfi1_fmr *fmr; - int m; - struct ib_fmr *ret; - int rval = -ENOMEM; - - /* Allocate struct plus pointers to first level page tables. */ - m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ; - fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); - if (!fmr) - goto bail; - - rval = init_mregion(&fmr->mr, pd, fmr_attr->max_pages); - if (rval) - goto bail; - - /* - * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & - * rkey. - */ - rval = hfi1_alloc_lkey(&fmr->mr, 0); - if (rval) - goto bail_mregion; - fmr->ibfmr.rkey = fmr->mr.lkey; - fmr->ibfmr.lkey = fmr->mr.lkey; - /* - * Resources are allocated but no valid mapping (RKEY can't be - * used). - */ - fmr->mr.access_flags = mr_access_flags; - fmr->mr.max_segs = fmr_attr->max_pages; - fmr->mr.page_shift = fmr_attr->page_shift; - - ret = &fmr->ibfmr; -done: - return ret; - -bail_mregion: - deinit_mregion(&fmr->mr); -bail: - kfree(fmr); - ret = ERR_PTR(rval); - goto done; -} - -/** - * hfi1_map_phys_fmr - set up a fast memory region - * @ibmfr: the fast memory region to set up - * @page_list: the list of pages to associate with the fast memory region - * @list_len: the number of pages to associate with the fast memory region - * @iova: the virtual address of the start of the fast memory region - * - * This may be called from interrupt context. - */ - -int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, - int list_len, u64 iova) -{ - struct hfi1_fmr *fmr = to_ifmr(ibfmr); - struct rvt_lkey_table *rkt; - unsigned long flags; - int m, n, i; - u32 ps; - int ret; - - i = atomic_read(&fmr->mr.refcount); - if (i > 2) - return -EBUSY; - - if (list_len > fmr->mr.max_segs) { - ret = -EINVAL; - goto bail; - } - rkt = &to_idev(ibfmr->device)->lk_table; - spin_lock_irqsave(&rkt->lock, flags); - fmr->mr.user_base = iova; - fmr->mr.iova = iova; - ps = 1 << fmr->mr.page_shift; - fmr->mr.length = list_len * ps; - m = 0; - n = 0; - for (i = 0; i < list_len; i++) { - fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; - fmr->mr.map[m]->segs[n].length = ps; - if (++n == RVT_SEGSZ) { - m++; - n = 0; - } - } - spin_unlock_irqrestore(&rkt->lock, flags); - ret = 0; - -bail: - return ret; -} - -/** - * hfi1_unmap_fmr - unmap fast memory regions - * @fmr_list: the list of fast memory regions to unmap - * - * Returns 0 on success. - */ -int hfi1_unmap_fmr(struct list_head *fmr_list) -{ - struct hfi1_fmr *fmr; - struct rvt_lkey_table *rkt; - unsigned long flags; - - list_for_each_entry(fmr, fmr_list, ibfmr.list) { - rkt = &to_idev(fmr->ibfmr.device)->lk_table; - spin_lock_irqsave(&rkt->lock, flags); - fmr->mr.user_base = 0; - fmr->mr.iova = 0; - fmr->mr.length = 0; - spin_unlock_irqrestore(&rkt->lock, flags); - } - return 0; -} - -/** - * hfi1_dealloc_fmr - deallocate a fast memory region - * @ibfmr: the fast memory region to deallocate - * - * Returns 0 on success. - */ -int hfi1_dealloc_fmr(struct ib_fmr *ibfmr) -{ - struct hfi1_fmr *fmr = to_ifmr(ibfmr); - int ret = 0; - unsigned long timeout; - - hfi1_free_lkey(&fmr->mr); - hfi1_put_mr(&fmr->mr); /* will set completion if last */ - timeout = wait_for_completion_timeout(&fmr->mr.comp, - 5 * HZ); - if (!timeout) { - hfi1_get_mr(&fmr->mr); - ret = -EBUSY; - goto out; - } - deinit_mregion(&fmr->mr); - kfree(fmr); -out: - return ret; -} diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 25d65f9a0b94..8ee7ed8e0fb7 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1526,8 +1526,8 @@ static void sc_piobufavail(struct send_context *sc) struct hfi1_devdata *dd = sc->dd; struct hfi1_ibdev *dev = &dd->verbs_dev; struct list_head *list; - struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE]; - struct hfi1_qp *qp; + struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; + struct rvt_qp *qp; struct hfi1_qp_priv *priv; unsigned long flags; unsigned i, n = 0; diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 9fcf052b62bd..5a6845509d16 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -67,7 +67,7 @@ static unsigned int hfi1_qp_table_size = 256; module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); -static void flush_tx_list(struct hfi1_qp *qp); +static void flush_tx_list(struct rvt_qp *qp); static int iowait_sleep( struct sdma_engine *sde, struct iowait *wait, @@ -229,7 +229,7 @@ static void free_qpn(struct hfi1_qpn_table *qpt, u32 qpn) * Put the QP into the hash table. * The hash table holds a reference to the QP. */ -static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) +static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); unsigned long flags; @@ -254,7 +254,7 @@ static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) * Remove the QP from the table so it can't be found asynchronously by * the receive interrupt routine. */ -static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) +static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); @@ -270,8 +270,8 @@ static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(ibp->qp[1], NULL); } else { - struct hfi1_qp *q; - struct hfi1_qp __rcu **qpp; + struct rvt_qp *q; + struct rvt_qp __rcu **qpp; removed = 0; qpp = &dev->qp_dev->qp_table[n]; @@ -308,7 +308,7 @@ static unsigned free_all_qps(struct hfi1_devdata *dd) { struct hfi1_ibdev *dev = &dd->verbs_dev; unsigned long flags; - struct hfi1_qp *qp; + struct rvt_qp *qp; unsigned n, qp_inuse = 0; for (n = 0; n < dd->num_pports; n++) { @@ -347,7 +347,7 @@ bail: * @qp: the QP to reset * @type: the QP type */ -static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type) +static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type) { struct hfi1_qp_priv *priv = qp->priv; qp->remote_qpn = 0; @@ -402,7 +402,7 @@ static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type) qp->r_sge.num_sge = 0; } -static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) +static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) { unsigned n; @@ -413,13 +413,13 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) if (clr_sends) { while (qp->s_last != qp->s_head) { - struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); + struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last); unsigned i; for (i = 0; i < wqe->wr.num_sge; i++) { - struct hfi1_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || @@ -429,7 +429,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) qp->s_last = 0; } if (qp->s_rdma_mr) { - hfi1_put_mr(qp->s_rdma_mr); + rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } } @@ -438,11 +438,11 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) return; for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { - struct hfi1_ack_entry *e = &qp->s_ack_queue[n]; + struct rvt_ack_entry *e = &qp->s_ack_queue[n]; if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && e->rdma_sge.mr) { - hfi1_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } } @@ -458,7 +458,7 @@ static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends) * The QP r_lock and s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ -int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) +int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) { struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; @@ -490,7 +490,7 @@ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) if (!(qp->s_flags & HFI1_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { - hfi1_put_mr(qp->s_rdma_mr); + rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } flush_tx_list(qp); @@ -514,7 +514,7 @@ int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err) wc.status = IB_WC_WR_FLUSH_ERR; if (qp->r_rq.wq) { - struct hfi1_rwq *wq; + struct rvt_rwq *wq; u32 head; u32 tail; @@ -544,7 +544,7 @@ bail: return ret; } -static void flush_tx_list(struct hfi1_qp *qp) +static void flush_tx_list(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; @@ -561,7 +561,7 @@ static void flush_tx_list(struct hfi1_qp *qp) } } -static void flush_iowait(struct hfi1_qp *qp) +static void flush_iowait(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); @@ -616,7 +616,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); struct hfi1_qp_priv *priv = qp->priv; enum ib_qp_state cur_state, new_state; struct ib_event ev; @@ -915,7 +915,7 @@ bail: int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; @@ -968,7 +968,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, * * Returns the AETH. */ -__be32 hfi1_compute_aeth(struct hfi1_qp *qp) +__be32 hfi1_compute_aeth(struct rvt_qp *qp) { u32 aeth = qp->r_msn & HFI1_MSN_MASK; @@ -981,7 +981,7 @@ __be32 hfi1_compute_aeth(struct hfi1_qp *qp) } else { u32 min, max, x; u32 credits; - struct hfi1_rwq *wq = qp->r_rq.wq; + struct rvt_rwq *wq = qp->r_rq.wq; u32 head; u32 tail; @@ -1037,10 +1037,10 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { - struct hfi1_qp *qp; + struct rvt_qp *qp; struct hfi1_qp_priv *priv; int err; - struct hfi1_swqe *swq = NULL; + struct rvt_swqe *swq = NULL; struct hfi1_ibdev *dev; struct hfi1_devdata *dd; size_t sz; @@ -1081,9 +1081,9 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: - sz = sizeof(struct hfi1_sge) * + sz = sizeof(struct rvt_sge) * init_attr->cap.max_send_sge + - sizeof(struct hfi1_swqe); + sizeof(struct rvt_swqe); swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); if (swq == NULL) { ret = ERR_PTR(-ENOMEM); @@ -1127,8 +1127,8 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + - sizeof(struct hfi1_rwqe); - qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + + sizeof(struct rvt_rwqe); + qp->r_rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + qp->r_rq.size * sz); if (!qp->r_rq.wq) { ret = ERR_PTR(-ENOMEM); @@ -1192,7 +1192,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, goto bail_ip; } } else { - u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz; + u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; qp->ip = hfi1_create_mmap_info(dev, s, ibpd->uobject->context, @@ -1281,7 +1281,7 @@ bail: */ int hfi1_destroy_qp(struct ib_qp *ibqp) { - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_qp_priv *priv = qp->priv; @@ -1387,7 +1387,7 @@ static void free_qpn_table(struct hfi1_qpn_table *qpt) * * The QP s_lock should be held. */ -void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth) +void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) { u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; @@ -1417,7 +1417,7 @@ void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth) } } -void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag) +void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) { unsigned long flags; @@ -1440,7 +1440,7 @@ static int iowait_sleep( unsigned seq) { struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); - struct hfi1_qp *qp; + struct rvt_qp *qp; struct hfi1_qp_priv *priv; unsigned long flags; int ret = 0; @@ -1491,7 +1491,7 @@ eagain: static void iowait_wakeup(struct iowait *wait, int reason) { - struct hfi1_qp *qp = iowait_to_qp(wait); + struct rvt_qp *qp = iowait_to_qp(wait); WARN_ON(reason != SDMA_AVAIL_REASON); hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC); @@ -1558,7 +1558,7 @@ void hfi1_qp_exit(struct hfi1_ibdev *dev) * Return: * A send engine for the qp or NULL for SMI type qp. */ -struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5) +struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct sdma_engine *sde; @@ -1577,7 +1577,7 @@ struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5) struct qp_iter { struct hfi1_ibdev *dev; - struct hfi1_qp *qp; + struct rvt_qp *qp; int specials; int n; }; @@ -1605,8 +1605,8 @@ int qp_iter_next(struct qp_iter *iter) struct hfi1_ibdev *dev = iter->dev; int n = iter->n; int ret = 1; - struct hfi1_qp *pqp = iter->qp; - struct hfi1_qp *qp; + struct rvt_qp *pqp = iter->qp; + struct rvt_qp *qp; /* * The approach is to consider the special qps @@ -1659,7 +1659,7 @@ static const char * const qp_type_str[] = { "SMI", "GSI", "RC", "UC", "UD", }; -static int qp_idle(struct hfi1_qp *qp) +static int qp_idle(struct rvt_qp *qp) { return qp->s_last == qp->s_acked && @@ -1670,8 +1670,8 @@ static int qp_idle(struct hfi1_qp *qp) void qp_iter_print(struct seq_file *s, struct qp_iter *iter) { - struct hfi1_swqe *wqe; - struct hfi1_qp *qp = iter->qp; + struct rvt_swqe *wqe; + struct rvt_qp *qp = iter->qp; struct hfi1_qp_priv *priv = qp->priv; struct sdma_engine *sde; @@ -1709,7 +1709,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) sde ? sde->this_idx : 0); } -void qp_comm_est(struct hfi1_qp *qp) +void qp_comm_est(struct rvt_qp *qp) { qp->r_flags |= HFI1_R_COMM_EST; if (qp->ibqp.event_handler) { @@ -1726,7 +1726,7 @@ void qp_comm_est(struct hfi1_qp *qp) * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */ -void hfi1_migrate_qp(struct hfi1_qp *qp) +void hfi1_migrate_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_event ev; diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 474c838e3b50..3dd31e9e2c6b 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -80,7 +80,7 @@ struct hfi1_qpn_table { struct hfi1_qp_ibdev { u32 qp_table_size; u32 qp_table_bits; - struct hfi1_qp __rcu **qp_table; + struct rvt_qp __rcu **qp_table; spinlock_t qpt_lock; struct hfi1_qpn_table qpn_table; }; @@ -98,10 +98,10 @@ static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn) * The caller must hold the rcu_read_lock(), and keep the lock until * the returned qp is no longer in use. */ -static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, - u32 qpn) __must_hold(RCU) +static inline struct rvt_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, + u32 qpn) __must_hold(RCU) { - struct hfi1_qp *qp = NULL; + struct rvt_qp *qp = NULL; if (unlikely(qpn <= 1)) { qp = rcu_dereference(ibp->qp[qpn]); @@ -117,11 +117,10 @@ static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, return qp; } -/** - * clear_ahg - reset ahg status in qp - * @qp - qp pointer +/* + * free_ahg - clear ahg from QP */ -static inline void clear_ahg(struct hfi1_qp *qp) +static inline void clear_ahg(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; @@ -142,7 +141,7 @@ static inline void clear_ahg(struct hfi1_qp *qp) * The QP r_lock and s_lock should be held and interrupts disabled. * If we are already in error state, just return. */ -int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err); +int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err); /** * hfi1_modify_qp - modify the attributes of a queue pair @@ -165,7 +164,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, * * Returns the AETH. */ -__be32 hfi1_compute_aeth(struct hfi1_qp *qp); +__be32 hfi1_compute_aeth(struct rvt_qp *qp); /** * hfi1_create_qp - create a queue pair for a device @@ -198,7 +197,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp); * * The QP s_lock should be held. */ -void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth); +void hfi1_get_credit(struct rvt_qp *qp, u32 aeth); /** * hfi1_qp_init - allocate QP tables @@ -217,9 +216,9 @@ void hfi1_qp_exit(struct hfi1_ibdev *dev); * @qp: the QP * @flag: flag the qp on which the qp is stalled */ -void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag); +void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag); -struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5); +struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5); struct qp_iter; @@ -246,7 +245,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter); * qp_comm_est - handle trap with QP established * @qp: the QP */ -void qp_comm_est(struct hfi1_qp *qp); +void qp_comm_est(struct rvt_qp *qp); /** * _hfi1_schedule_send - schedule progress @@ -257,7 +256,7 @@ void qp_comm_est(struct hfi1_qp *qp); * It is only used in the post send, which doesn't hold * the s_lock. */ -static inline void _hfi1_schedule_send(struct hfi1_qp *qp) +static inline void _hfi1_schedule_send(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = @@ -278,12 +277,12 @@ static inline void _hfi1_schedule_send(struct hfi1_qp *qp) * This schedules qp progress and caller should hold * the s_lock. */ -static inline void hfi1_schedule_send(struct hfi1_qp *qp) +static inline void hfi1_schedule_send(struct rvt_qp *qp) { if (hfi1_send_ok(qp)) _hfi1_schedule_send(qp); } -void hfi1_migrate_qp(struct hfi1_qp *qp); +void hfi1_migrate_qp(struct rvt_qp *qp); #endif /* _QP_H */ diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 70d5bd1ec1d2..4b8518ac9e7f 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -60,7 +60,7 @@ static void rc_timeout(unsigned long arg); -static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe, +static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) { u32 len; @@ -74,7 +74,7 @@ static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe, return wqe->length - len; } -static void start_timer(struct hfi1_qp *qp) +static void start_timer(struct rvt_qp *qp) { qp->s_flags |= HFI1_S_TIMER; qp->s_timer.function = rc_timeout; @@ -94,10 +94,10 @@ static void start_timer(struct hfi1_qp *qp) * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ -static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, +static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct hfi1_other_headers *ohdr, u32 pmtu) { - struct hfi1_ack_entry *e; + struct rvt_ack_entry *e; u32 hwords; u32 len; u32 bth0; @@ -116,7 +116,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->rdma_sge.mr) { - hfi1_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } /* FALLTHROUGH */ @@ -154,7 +154,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, /* Copy SGE state in case we need to resend */ qp->s_rdma_mr = e->rdma_sge.mr; if (qp->s_rdma_mr) - hfi1_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; @@ -193,7 +193,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp, qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; if (qp->s_rdma_mr) - hfi1_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) { len = pmtu; @@ -257,13 +257,13 @@ bail: * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_rc_req(struct hfi1_qp *qp) +int hfi1_make_rc_req(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_other_headers *ohdr; - struct hfi1_sge_state *ss; - struct hfi1_swqe *wqe; + struct rvt_sge_state *ss; + struct rvt_swqe *wqe; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ u32 hwords = 5; u32 len; @@ -683,7 +683,7 @@ unlock: * Note that RDMA reads and atomics are handled in the * send side QP state and tasklet. */ -void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp, +void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, int is_fecn) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -794,10 +794,10 @@ queue_ack: * for the given QP. * Called at interrupt level with the QP s_lock held. */ -static void reset_psn(struct hfi1_qp *qp, u32 psn) +static void reset_psn(struct rvt_qp *qp, u32 psn) { u32 n = qp->s_acked; - struct hfi1_swqe *wqe = get_swqe_ptr(qp, n); + struct rvt_swqe *wqe = get_swqe_ptr(qp, n); u32 opcode; qp->s_cur = n; @@ -880,9 +880,9 @@ done: * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ -static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait) +static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) { - struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); + struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); struct hfi1_ibport *ibp; if (qp->s_retry == 0) { @@ -917,7 +917,7 @@ static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait) */ static void rc_timeout(unsigned long arg) { - struct hfi1_qp *qp = (struct hfi1_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; struct hfi1_ibport *ibp; unsigned long flags; @@ -941,7 +941,7 @@ static void rc_timeout(unsigned long arg) */ void hfi1_rc_rnr_retry(unsigned long arg) { - struct hfi1_qp *qp = (struct hfi1_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); @@ -957,9 +957,9 @@ void hfi1_rc_rnr_retry(unsigned long arg) * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads are present. */ -static void reset_sending_psn(struct hfi1_qp *qp, u32 psn) +static void reset_sending_psn(struct rvt_qp *qp, u32 psn) { - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; u32 n = qp->s_last; /* Find the work request corresponding to the given PSN. */ @@ -982,10 +982,10 @@ static void reset_sending_psn(struct hfi1_qp *qp, u32 psn) /* * This should be called with the QP s_lock held and interrupts disabled. */ -void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr) +void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) { struct hfi1_other_headers *ohdr; - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; struct ib_wc wc; unsigned i; u32 opcode; @@ -1027,9 +1027,9 @@ void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; for (i = 0; i < wqe->wr.num_sge; i++) { - struct hfi1_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || @@ -1059,7 +1059,7 @@ void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr) } } -static inline void update_last_psn(struct hfi1_qp *qp, u32 psn) +static inline void update_last_psn(struct rvt_qp *qp, u32 psn) { qp->s_last_psn = psn; } @@ -1069,9 +1069,9 @@ static inline void update_last_psn(struct hfi1_qp *qp, u32 psn) * This is similar to hfi1_send_complete but has to check to be sure * that the SGEs are not being referenced if the SWQE is being resent. */ -static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp, - struct hfi1_swqe *wqe, - struct hfi1_ibport *ibp) +static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, + struct rvt_swqe *wqe, + struct hfi1_ibport *ibp) { struct ib_wc wc; unsigned i; @@ -1084,9 +1084,9 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp, if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { for (i = 0; i < wqe->wr.num_sge; i++) { - struct hfi1_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || @@ -1158,12 +1158,12 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp, * May be called at interrupt level, with the QP s_lock held. * Returns 1 if OK, 0 if current operation should be aborted (NAK). */ -static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode, +static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct hfi1_ctxtdata *rcd) { struct hfi1_ibport *ibp; enum ib_wc_status status; - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; int ret = 0; u32 ack_psn; int diff; @@ -1381,10 +1381,10 @@ bail: * We have seen an out of sequence RDMA read middle or last packet. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. */ -static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn, +static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct hfi1_ctxtdata *rcd) { - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; /* Remove QP from retry timer */ if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { @@ -1430,11 +1430,11 @@ static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn, */ static void rc_rcv_resp(struct hfi1_ibport *ibp, struct hfi1_other_headers *ohdr, - void *data, u32 tlen, struct hfi1_qp *qp, + void *data, u32 tlen, struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct hfi1_ctxtdata *rcd) { - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; enum ib_wc_status status; unsigned long flags; int diff; @@ -1610,7 +1610,7 @@ bail: } static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, - struct hfi1_qp *qp) + struct rvt_qp *qp) { if (list_empty(&qp->rspwait)) { qp->r_flags |= HFI1_R_RSP_DEFERED_ACK; @@ -1619,7 +1619,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, } } -static inline void rc_cancel_ack(struct hfi1_qp *qp) +static inline void rc_cancel_ack(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; @@ -1648,11 +1648,11 @@ static inline void rc_cancel_ack(struct hfi1_qp *qp) * schedule a response to be sent. */ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, - struct hfi1_qp *qp, u32 opcode, u32 psn, int diff, + struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - struct hfi1_ack_entry *e; + struct rvt_ack_entry *e; unsigned long flags; u8 i, prev; int old_req; @@ -1750,7 +1750,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; if (e->rdma_sge.mr) { - hfi1_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } if (len != 0) { @@ -1758,8 +1758,8 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, u64 vaddr = be64_to_cpu(reth->vaddr); int ok; - ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, - IB_ACCESS_REMOTE_READ); + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, + IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock_done; } else { @@ -1826,7 +1826,7 @@ send_ack: return 0; } -void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err) +void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err) { unsigned long flags; int lastwqe; @@ -1845,7 +1845,7 @@ void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err) } } -static inline void update_ack_queue(struct hfi1_qp *qp, unsigned n) +static inline void update_ack_queue(struct rvt_qp *qp, unsigned n) { unsigned next; @@ -1960,7 +1960,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; - struct hfi1_qp *qp = packet->qp; + struct rvt_qp *qp = packet->qp; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_other_headers *ohdr = packet->ohdr; @@ -2177,8 +2177,8 @@ send_last: int ok; /* Check rkey & NAK */ - ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, - rkey, IB_ACCESS_REMOTE_WRITE); + ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, + rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto nack_acc; qp->r_sge.num_sge = 1; @@ -2203,7 +2203,7 @@ send_last: goto send_last; case OP(RDMA_READ_REQUEST): { - struct hfi1_ack_entry *e; + struct rvt_ack_entry *e; u32 len; u8 next; @@ -2221,7 +2221,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - hfi1_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } reth = &ohdr->u.rc.reth; @@ -2232,8 +2232,8 @@ send_last: int ok; /* Check rkey & NAK */ - ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, - rkey, IB_ACCESS_REMOTE_READ); + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, + rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto nack_acc_unlck; /* @@ -2276,7 +2276,7 @@ send_last: case OP(COMPARE_SWAP): case OP(FETCH_ADD): { struct ib_atomic_eth *ateth; - struct hfi1_ack_entry *e; + struct rvt_ack_entry *e; u64 vaddr; atomic64_t *maddr; u64 sdata; @@ -2296,7 +2296,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - hfi1_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; @@ -2306,9 +2306,9 @@ send_last: goto nack_inv_unlck; rkey = be32_to_cpu(ateth->rkey); /* Check rkey & NAK */ - if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), - vaddr, rkey, - IB_ACCESS_REMOTE_ATOMIC))) + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + vaddr, rkey, + IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; @@ -2318,7 +2318,7 @@ send_last: (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); - hfi1_put_mr(qp->r_sge.sge.mr); + rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; e->opcode = opcode; e->sent = 0; @@ -2408,7 +2408,7 @@ void hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_ib_header *hdr, u32 rcv_flags, - struct hfi1_qp *qp) + struct rvt_qp *qp) { int has_grh = rcv_flags & HFI1_HAS_GRH; struct hfi1_other_headers *ohdr; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 33bcfe5bfd13..762fca9d9ad4 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -97,15 +97,15 @@ const u32 ib_hfi1_rnr_table[32] = { * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ -static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) +static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) { int i, j, ret; struct ib_wc wc; struct rvt_lkey_table *rkt; struct rvt_pd *pd; - struct hfi1_sge_state *ss; + struct rvt_sge_state *ss; - rkt = &to_idev(qp->ibqp.device)->lk_table; + rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; @@ -114,8 +114,8 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ - if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, - &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) + if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, + &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; j++; @@ -127,9 +127,9 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) bad_lkey: while (j) { - struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; + struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); @@ -154,13 +154,13 @@ bail: * * Can be called from interrupt level. */ -int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only) +int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) { unsigned long flags; - struct hfi1_rq *rq; - struct hfi1_rwq *wq; + struct rvt_rq *rq; + struct rvt_rwq *wq; struct hfi1_srq *srq; - struct hfi1_rwqe *wqe; + struct rvt_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; @@ -265,7 +265,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) * The s_lock will be acquired around the hfi1_migrate_qp() call. */ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, - int has_grh, struct hfi1_qp *qp, u32 bth0) + int has_grh, struct rvt_qp *qp, u32 bth0) { __be64 guid; unsigned long flags; @@ -355,12 +355,12 @@ err: * receive interrupts since this is a connected protocol and all packets * will pass through here. */ -static void ruc_loopback(struct hfi1_qp *sqp) +static void ruc_loopback(struct rvt_qp *sqp) { struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); - struct hfi1_qp *qp; - struct hfi1_swqe *wqe; - struct hfi1_sge *sge; + struct rvt_qp *qp; + struct rvt_swqe *wqe; + struct rvt_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; @@ -461,11 +461,10 @@ again: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; if (wqe->length == 0) - break; - if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, - wqe->rdma_wr.remote_addr, - wqe->rdma_wr.rkey, - IB_ACCESS_REMOTE_WRITE))) + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, + wqe->rdma_wr.remote_addr, + wqe->rdma_wr.rkey, + IB_ACCESS_REMOTE_WRITE))) goto acc_err; qp->r_sge.sg_list = NULL; qp->r_sge.num_sge = 1; @@ -475,10 +474,10 @@ again: case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; - if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, - wqe->rdma_wr.remote_addr, - wqe->rdma_wr.rkey, - IB_ACCESS_REMOTE_READ))) + if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, + wqe->rdma_wr.remote_addr, + wqe->rdma_wr.rkey, + IB_ACCESS_REMOTE_READ))) goto acc_err; release = 0; sqp->s_sge.sg_list = NULL; @@ -493,10 +492,10 @@ again: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; - if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), - wqe->atomic_wr.remote_addr, - wqe->atomic_wr.rkey, - IB_ACCESS_REMOTE_ATOMIC))) + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + wqe->atomic_wr.remote_addr, + wqe->atomic_wr.rkey, + IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; @@ -506,7 +505,7 @@ again: (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->atomic_wr.swap); - hfi1_put_mr(qp->r_sge.sge.mr); + rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; goto send_comp; @@ -530,7 +529,7 @@ again: sge->sge_length -= len; if (sge->sge_length == 0) { if (!release) - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -690,7 +689,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, * Subsequent middles use the copied entry, editing the * PSN with 1 or 2 edits. */ -static inline void build_ahg(struct hfi1_qp *qp, u32 npsn) +static inline void build_ahg(struct rvt_qp *qp, u32 npsn) { struct hfi1_qp_priv *priv = qp->priv; if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) @@ -734,7 +733,7 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn) } } -void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, +void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, u32 bth0, u32 bth2, int middle) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -812,9 +811,9 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, void hfi1_do_send(struct work_struct *work) { struct iowait *wait = container_of(work, struct iowait, iowork); - struct hfi1_qp *qp = iowait_to_qp(wait); + struct rvt_qp *qp = iowait_to_qp(wait); struct hfi1_pkt_state ps; - int (*make_req)(struct hfi1_qp *qp); + int (*make_req)(struct rvt_qp *qp); unsigned long flags; unsigned long timeout; @@ -876,7 +875,7 @@ void hfi1_do_send(struct work_struct *work) /* * This should be called with s_lock held. */ -void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, +void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) { u32 old_last, last; @@ -886,9 +885,9 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, return; for (i = 0; i < wqe->wr.num_sge; i++) { - struct hfi1_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index fbd0e41be135..0f51c45869d5 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -379,10 +379,10 @@ struct sdma_txreq { struct verbs_txreq { struct hfi1_pio_header phdr; struct sdma_txreq txreq; - struct hfi1_qp *qp; - struct hfi1_swqe *wqe; + struct rvt_qp *qp; + struct rvt_swqe *wqe; struct rvt_mregion *mr; - struct hfi1_sge_state *ss; + struct rvt_sge_state *ss; struct sdma_engine *sde; u16 hdr_dwords; u16 hdr_inx; diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c index 67786d417493..932bd96073ca 100644 --- a/drivers/staging/rdma/hfi1/srq.c +++ b/drivers/staging/rdma/hfi1/srq.c @@ -66,12 +66,12 @@ int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct hfi1_srq *srq = to_isrq(ibsrq); - struct hfi1_rwq *wq; + struct rvt_rwq *wq; unsigned long flags; int ret; for (; wr; wr = wr->next) { - struct hfi1_rwqe *wqe; + struct rvt_rwqe *wqe; u32 next; int i; @@ -149,8 +149,8 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, srq->rq.size = srq_init_attr->attr.max_wr + 1; srq->rq.max_sge = srq_init_attr->attr.max_sge; sz = sizeof(struct ib_sge) * srq->rq.max_sge + - sizeof(struct hfi1_rwqe); - srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz); + sizeof(struct rvt_rwqe); + srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz); if (!srq->rq.wq) { ret = ERR_PTR(-ENOMEM); goto bail_srq; @@ -162,7 +162,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, */ if (udata && udata->outlen >= sizeof(__u64)) { int err; - u32 s = sizeof(struct hfi1_rwq) + srq->rq.size * sz; + u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = hfi1_create_mmap_info(dev, s, ibpd->uobject->context, @@ -230,12 +230,12 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, struct ib_udata *udata) { struct hfi1_srq *srq = to_isrq(ibsrq); - struct hfi1_rwq *wq; + struct rvt_rwq *wq; int ret = 0; if (attr_mask & IB_SRQ_MAX_WR) { - struct hfi1_rwq *owq; - struct hfi1_rwqe *p; + struct rvt_rwq *owq; + struct rvt_rwqe *p; u32 sz, size, n, head, tail; /* Check that the requested sizes are below the limits. */ @@ -246,10 +246,10 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, goto bail; } - sz = sizeof(struct hfi1_rwqe) + + sz = sizeof(struct rvt_rwqe) + srq->rq.max_sge * sizeof(struct ib_sge); size = attr->max_wr + 1; - wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz); + wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz); if (!wq) { ret = -ENOMEM; goto bail; @@ -296,7 +296,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, n = 0; p = wq->wq; while (tail != head) { - struct hfi1_rwqe *wqe; + struct rvt_rwqe *wqe; int i; wqe = get_rwqe_ptr(&srq->rq, tail); @@ -305,7 +305,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, for (i = 0; i < wqe->num_sge; i++) p->sg_list[i] = wqe->sg_list[i]; n++; - p = (struct hfi1_rwqe *)((char *)p + sz); + p = (struct rvt_rwqe *)((char *)p + sz); if (++tail >= srq->rq.size) tail = 0; } @@ -320,9 +320,9 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, vfree(owq); if (srq->ip) { - struct hfi1_mmap_info *ip = srq->ip; + struct rvt_mmap_info *ip = srq->ip; struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device); - u32 s = sizeof(struct hfi1_rwq) + size * sz; + u32 s = sizeof(struct rvt_rwq) + size * sz; hfi1_update_mmap_info(dev, ip, s, wq); diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index 1e435675335f..14601d788c19 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -332,7 +332,7 @@ TRACE_EVENT(hfi1_wantpiointr, ); DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template, - TP_PROTO(struct hfi1_qp *qp, u32 flags), + TP_PROTO(struct rvt_qp *qp, u32 flags), TP_ARGS(qp, flags), TP_STRUCT__entry( DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) @@ -356,17 +356,17 @@ DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template, ); DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup, - TP_PROTO(struct hfi1_qp *qp, u32 flags), + TP_PROTO(struct rvt_qp *qp, u32 flags), TP_ARGS(qp, flags)); DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep, - TP_PROTO(struct hfi1_qp *qp, u32 flags), + TP_PROTO(struct rvt_qp *qp, u32 flags), TP_ARGS(qp, flags)); #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_qphash DECLARE_EVENT_CLASS(hfi1_qphash_template, - TP_PROTO(struct hfi1_qp *qp, u32 bucket), + TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket), TP_STRUCT__entry( DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) @@ -387,11 +387,11 @@ DECLARE_EVENT_CLASS(hfi1_qphash_template, ); DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert, - TP_PROTO(struct hfi1_qp *qp, u32 bucket), + TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket)); DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove, - TP_PROTO(struct hfi1_qp *qp, u32 bucket), + TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket)); #undef TRACE_SYSTEM @@ -1292,7 +1292,7 @@ TRACE_EVENT(hfi1_sdma_state, #define TRACE_SYSTEM hfi1_rc DECLARE_EVENT_CLASS(hfi1_rc_template, - TP_PROTO(struct hfi1_qp *qp, u32 psn), + TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn), TP_STRUCT__entry( DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) @@ -1331,22 +1331,22 @@ DECLARE_EVENT_CLASS(hfi1_rc_template, ); DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete, - TP_PROTO(struct hfi1_qp *qp, u32 psn), + TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack, - TP_PROTO(struct hfi1_qp *qp, u32 psn), + TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout, - TP_PROTO(struct hfi1_qp *qp, u32 psn), + TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error, - TP_PROTO(struct hfi1_qp *qp, u32 psn), + TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 1908a288cfb7..6686331943b9 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -61,11 +61,11 @@ * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_uc_req(struct hfi1_qp *qp) +int hfi1_make_uc_req(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; unsigned long flags; u32 hwords = 5; u32 bth0 = 0; @@ -267,7 +267,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; - struct hfi1_qp *qp = packet->qp; + struct rvt_qp *qp = packet->qp; struct hfi1_other_headers *ohdr = packet->ohdr; u32 bth0, opcode; u32 hdrsize = packet->hlen; @@ -492,8 +492,8 @@ rdma_first: int ok; /* Check rkey */ - ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, - vaddr, rkey, IB_ACCESS_REMOTE_WRITE); + ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, + vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto drop; qp->r_sge.num_sge = 1; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 820fef211edf..d54d56d833b2 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -65,15 +65,15 @@ * Note that the receive interrupt handler may be calling hfi1_ud_rcv() * while this is being called. */ -static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe) +static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) { struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); struct hfi1_pportdata *ppd; - struct hfi1_qp *qp; + struct rvt_qp *qp; struct ib_ah_attr *ah_attr; unsigned long flags; - struct hfi1_sge_state ssge; - struct hfi1_sge *sge; + struct rvt_sge_state ssge; + struct rvt_sge *sge; struct ib_wc wc; u32 length; enum ib_qp_type sqptype, dqptype; @@ -262,14 +262,14 @@ drop: * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_ud_req(struct hfi1_qp *qp) +int hfi1_make_ud_req(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; struct ib_ah_attr *ah_attr; struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; unsigned long flags; u32 nwords; u32 extra_bytes; @@ -477,7 +477,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey) return -1; } -void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn, +void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, u8 sc5, const struct ib_grh *old_grh) { @@ -551,7 +551,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn, * opa_smp_check() returns 0 if all checks succeed, 1 otherwise. */ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, - struct hfi1_qp *qp, u16 slid, struct opa_smp *smp) + struct rvt_qp *qp, u16 slid, struct opa_smp *smp) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); @@ -655,7 +655,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) u32 rcv_flags = packet->rcv_flags; void *data = packet->ebuf; u32 tlen = packet->tlen; - struct hfi1_qp *qp = packet->qp; + struct rvt_qp *qp = packet->qp; bool has_grh = rcv_flags & HFI1_HAS_GRH; bool sc4_bit = has_sc4_bit(packet); u8 sc; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 657efd3bd75c..10d6547037d0 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -65,7 +65,7 @@ #include "qp.h" #include "sdma.h" -unsigned int hfi1_lkey_table_size = 16; +static unsigned int hfi1_lkey_table_size = 16; module_param_named(lkey_table_size, hfi1_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, @@ -162,7 +162,7 @@ static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext return container_of(ibucontext, struct hfi1_ucontext, ibucontext); } -static inline void _hfi1_schedule_send(struct hfi1_qp *qp); +static inline void _hfi1_schedule_send(struct rvt_qp *qp); /* * Translate ib_wr_opcode into ib_wc_opcode. @@ -276,11 +276,11 @@ __be64 ib_hfi1_sys_image_guid; * @length: the length of the data */ void hfi1_copy_sge( - struct hfi1_sge_state *ss, + struct rvt_sge_state *ss, void *data, u32 length, int release) { - struct hfi1_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; while (length) { u32 len = sge->length; @@ -296,7 +296,7 @@ void hfi1_copy_sge( sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -320,9 +320,9 @@ void hfi1_copy_sge( * @ss: the SGE state * @length: the number of bytes to skip */ -void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release) +void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release) { - struct hfi1_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; while (length) { u32 len = sge->length; @@ -337,7 +337,7 @@ void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release) sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { @@ -360,9 +360,9 @@ void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release) * @qp: the QP to post on * @wr: the work request to send */ -static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) +static int post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr) { - struct hfi1_swqe *wqe; + struct rvt_swqe *wqe; u32 next; int i; int j; @@ -412,7 +412,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) if (next == qp->s_last) return -ENOMEM; - rkt = &to_idev(qp->ibqp.device)->lk_table; + rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; pd = ibpd_to_rvtpd(qp->ibqp.pd); wqe = get_swqe_ptr(qp, qp->s_head); @@ -441,8 +441,8 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) if (length == 0) continue; - ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j], - &wr->sg_list[i], acc); + ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], + &wr->sg_list[i], acc); if (!ok) goto bail_inval_free; wqe->length += length; @@ -465,9 +465,9 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) bail_inval_free: /* release mr holds */ while (j) { - struct hfi1_sge *sge = &wqe->sg_list[--j]; + struct rvt_sge *sge = &wqe->sg_list[--j]; - hfi1_put_mr(sge->mr); + rvt_put_mr(sge->mr); } return -EINVAL; } @@ -483,7 +483,7 @@ bail_inval_free: static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); struct hfi1_qp_priv *priv = qp->priv; int err = 0; int call_send; @@ -529,8 +529,8 @@ bail: static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { - struct hfi1_qp *qp = to_iqp(ibqp); - struct hfi1_rwq *wq = qp->r_rq.wq; + struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; @@ -542,7 +542,7 @@ static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, } for (; wr; wr = wr->next) { - struct hfi1_rwqe *wqe; + struct rvt_rwqe *wqe; u32 next; int i; @@ -694,7 +694,7 @@ static void mem_timer(unsigned long data) { struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data; struct list_head *list = &dev->memwait; - struct hfi1_qp *qp = NULL; + struct rvt_qp *qp = NULL; struct iowait *wait; unsigned long flags; struct hfi1_qp_priv *priv; @@ -715,9 +715,9 @@ static void mem_timer(unsigned long data) hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM); } -void update_sge(struct hfi1_sge_state *ss, u32 length) +void update_sge(struct rvt_sge_state *ss, u32 length) { - struct hfi1_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; @@ -737,7 +737,7 @@ void update_sge(struct hfi1_sge_state *ss, u32 length) } static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, - struct hfi1_qp *qp) + struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct verbs_txreq *tx; @@ -764,7 +764,7 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, } static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, - struct hfi1_qp *qp) + struct rvt_qp *qp) { struct verbs_txreq *tx; @@ -782,7 +782,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, void hfi1_put_txreq(struct verbs_txreq *tx) { struct hfi1_ibdev *dev; - struct hfi1_qp *qp; + struct rvt_qp *qp; unsigned long flags; unsigned int seq; struct hfi1_qp_priv *priv; @@ -791,7 +791,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx) dev = to_idev(qp->ibqp.device); if (tx->mr) { - hfi1_put_mr(tx->mr); + rvt_put_mr(tx->mr); tx->mr = NULL; } sdma_txclean(dd_from_dev(dev), &tx->txreq); @@ -830,7 +830,7 @@ static void verbs_sdma_complete( { struct verbs_txreq *tx = container_of(cookie, struct verbs_txreq, txreq); - struct hfi1_qp *qp = tx->qp; + struct rvt_qp *qp = tx->qp; spin_lock(&qp->s_lock); if (tx->wqe) @@ -858,7 +858,7 @@ static void verbs_sdma_complete( hfi1_put_txreq(tx); } -static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp) +static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; unsigned long flags; @@ -891,12 +891,12 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp) */ static int build_verbs_ulp_payload( struct sdma_engine *sde, - struct hfi1_sge_state *ss, + struct rvt_sge_state *ss, u32 length, struct verbs_txreq *tx) { - struct hfi1_sge *sg_list = ss->sg_list; - struct hfi1_sge sge = ss->sge; + struct rvt_sge *sg_list = ss->sg_list; + struct rvt_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 len; int ret = 0; @@ -939,7 +939,7 @@ bail_txadd: /* New API */ static int build_verbs_tx_desc( struct sdma_engine *sde, - struct hfi1_sge_state *ss, + struct rvt_sge_state *ss, u32 length, struct verbs_txreq *tx, struct ahg_ib_header *ahdr, @@ -1006,13 +1006,13 @@ bail_txadd: return ret; } -int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; - struct hfi1_sge_state *ss = qp->s_cur_sge; + struct rvt_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */ struct hfi1_ibdev *dev = ps->dev; @@ -1080,7 +1080,7 @@ bail_tx: * If we are now in the error state, return zero to flush the * send work request. */ -static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc) +static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; @@ -1119,7 +1119,7 @@ static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc) return ret; } -struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5) +struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1); @@ -1131,13 +1131,13 @@ struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5) return dd->vld[vl].sc; } -int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; - struct hfi1_sge_state *ss = qp->s_cur_sge; + struct rvt_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; u32 dwords = (len + 3) >> 2; u32 plen = hdrwords + dwords + 2; /* includes pbc */ @@ -1209,7 +1209,7 @@ int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); if (qp->s_rdma_mr) { - hfi1_put_mr(qp->s_rdma_mr); + rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } @@ -1256,7 +1256,7 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent) */ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, struct hfi1_ib_header *hdr, - struct hfi1_qp *qp) + struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; @@ -1319,7 +1319,7 @@ bad: * Return zero if packet is sent or queued OK. * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise. */ -int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps) +int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; @@ -1402,8 +1402,8 @@ static int query_device(struct ib_device *ibdev, props->max_cq = hfi1_max_cqs; props->max_ah = hfi1_max_ahs; props->max_cqe = hfi1_max_cqes; - props->max_mr = dev->lk_table.max; - props->max_fmr = dev->lk_table.max; + props->max_mr = dev->rdi.lkey_table.max; + props->max_fmr = dev->rdi.lkey_table.max; props->max_map_per_fmr = 32767; props->max_pd = dev->rdi.dparms.props.max_pd; props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; @@ -1657,7 +1657,7 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) { struct ib_ah_attr attr; struct ib_ah *ah = ERR_PTR(-EINVAL); - struct hfi1_qp *qp0; + struct rvt_qp *qp0; memset(&attr, 0, sizeof(attr)); attr.dlid = dlid; @@ -1772,7 +1772,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) struct hfi1_ibdev *dev = &dd->verbs_dev; struct ib_device *ibdev = &dev->rdi.ibdev; struct hfi1_pportdata *ppd = dd->pport; - unsigned i, lk_tab_size; + unsigned i; int ret; size_t lcpysz = IB_DEVICE_NAME_MAX; u16 descq_cnt; @@ -1796,29 +1796,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dev->mem_timer.function = mem_timer; dev->mem_timer.data = (unsigned long) dev; - /* - * The top hfi1_lkey_table_size bits are used to index the - * table. The lower 8 bits can be owned by the user (copied from - * the LKEY). The remaining bits act as a generation number or tag. - */ - spin_lock_init(&dev->lk_table.lock); - dev->lk_table.max = 1 << hfi1_lkey_table_size; - /* ensure generation is at least 4 bits (keys.c) */ - if (hfi1_lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) { - dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", - hfi1_lkey_table_size, RVT_MAX_LKEY_TABLE_BITS); - hfi1_lkey_table_size = RVT_MAX_LKEY_TABLE_BITS; - } - lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - dev->lk_table.table = (struct rvt_mregion __rcu **) - vmalloc(lk_tab_size); - if (dev->lk_table.table == NULL) { - ret = -ENOMEM; - goto err_lk; - } - RCU_INIT_POINTER(dev->dma_mr, NULL); - for (i = 0; i < dev->lk_table.max; i++) - RCU_INIT_POINTER(dev->lk_table.table[i], NULL); INIT_LIST_HEAD(&dev->pending_mmaps); spin_lock_init(&dev->pending_lock); seqlock_init(&dev->iowait_lock); @@ -1917,14 +1894,15 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->resize_cq = hfi1_resize_cq; ibdev->poll_cq = hfi1_poll_cq; ibdev->req_notify_cq = hfi1_req_notify_cq; - ibdev->get_dma_mr = hfi1_get_dma_mr; - ibdev->reg_user_mr = hfi1_reg_user_mr; - ibdev->dereg_mr = hfi1_dereg_mr; - ibdev->alloc_mr = hfi1_alloc_mr; - ibdev->alloc_fmr = hfi1_alloc_fmr; - ibdev->map_phys_fmr = hfi1_map_phys_fmr; - ibdev->unmap_fmr = hfi1_unmap_fmr; - ibdev->dealloc_fmr = hfi1_dealloc_fmr; + ibdev->get_dma_mr = NULL; + ibdev->reg_user_mr = NULL; + ibdev->dereg_mr = NULL; + ibdev->alloc_mr = NULL; + ibdev->map_mr_sg = NULL; + ibdev->alloc_fmr = NULL; + ibdev->map_phys_fmr = NULL; + ibdev->unmap_fmr = NULL; + ibdev->dealloc_fmr = NULL; ibdev->attach_mcast = hfi1_multicast_attach; ibdev->detach_mcast = hfi1_multicast_detach; ibdev->process_mad = hfi1_process_mad; @@ -1945,9 +1923,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; dd->verbs_dev.rdi.dparms.props.max_ah = hfi1_max_ahs; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; - dd->verbs_dev.rdi.flags = (RVT_FLAG_MR_INIT_DRIVER | - RVT_FLAG_QP_INIT_DRIVER | + dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER | RVT_FLAG_CQ_INIT_DRIVER); + dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) @@ -1970,8 +1948,6 @@ err_agents: err_reg: err_verbs_txreq: kmem_cache_destroy(dev->verbs_txreq_cache); - vfree(dev->lk_table.table); -err_lk: hfi1_qp_exit(dev); err_qp_init: dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); @@ -1993,13 +1969,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) dd_dev_err(dd, "txwait list not empty!\n"); if (!list_empty(&dev->memwait)) dd_dev_err(dd, "memwait list not empty!\n"); - if (dev->dma_mr) - dd_dev_err(dd, "DMA MR not NULL!\n"); hfi1_qp_exit(dev); del_timer_sync(&dev->mem_timer); kmem_cache_destroy(dev->verbs_txreq_cache); - vfree(dev->lk_table.table); } void hfi1_cnp_rcv(struct hfi1_packet *packet) @@ -2007,7 +1980,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet) struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_ib_header *hdr = packet->hdr; - struct hfi1_qp *qp = packet->qp; + struct rvt_qp *qp = packet->qp; u32 lqpn, rqpn = 0; u16 rlid = 0; u8 sl, sc5, sc4_bit, svc_type; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 4db6136f0384..0782a85f5d28 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -222,7 +222,7 @@ struct tx_pio_header { */ struct hfi1_mcast_qp { struct list_head list; - struct hfi1_qp *qp; + struct rvt_qp *qp; }; struct hfi1_mcast { @@ -234,20 +234,6 @@ struct hfi1_mcast { int n_attached; }; -/* - * This structure is used by hfi1_mmap() to validate an offset - * when an mmap() request is made. The vm_area_struct then uses - * this as its vm_private_data. - */ -struct hfi1_mmap_info { - struct list_head pending_mmaps; - struct ib_ucontext *context; - void *obj; - __u64 offset; - struct kref ref; - unsigned size; -}; - /* * This structure is used to contain the head pointer, tail pointer, * and completion queue entries as a single memory allocation so @@ -274,238 +260,28 @@ struct hfi1_cq { u8 notify; u8 triggered; struct hfi1_cq_wc *queue; - struct hfi1_mmap_info *ip; -}; - -/* - * These keep track of the copy progress within a memory region. - * Used by the verbs layer. - */ -struct hfi1_sge { - struct rvt_mregion *mr; - void *vaddr; /* kernel virtual address of segment */ - u32 sge_length; /* length of the SGE */ - u32 length; /* remaining length of the segment */ - u16 m; /* current index: mr->map[m] */ - u16 n; /* current index: mr->map[m]->segs[n] */ -}; - -/* Memory region */ -struct hfi1_mr { - struct ib_mr ibmr; - struct ib_umem *umem; - struct rvt_mregion mr; /* must be last */ -}; - -/* - * Send work request queue entry. - * The size of the sg_list is determined when the QP is created and stored - * in qp->s_max_sge. - */ -struct hfi1_swqe { - union { - struct ib_send_wr wr; /* don't use wr.sg_list */ - struct ib_rdma_wr rdma_wr; - struct ib_atomic_wr atomic_wr; - struct ib_ud_wr ud_wr; - }; - u32 psn; /* first packet sequence number */ - u32 lpsn; /* last packet sequence number */ - u32 ssn; /* send sequence number */ - u32 length; /* total length of data in sg_list */ - struct hfi1_sge sg_list[0]; -}; - -/* - * Receive work request queue entry. - * The size of the sg_list is determined when the QP (or SRQ) is created - * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). - */ -struct hfi1_rwqe { - u64 wr_id; - u8 num_sge; - struct ib_sge sg_list[0]; -}; - -/* - * This structure is used to contain the head pointer, tail pointer, - * and receive work queue entries as a single memory allocation so - * it can be mmap'ed into user space. - * Note that the wq array elements are variable size so you can't - * just index into the array to get the N'th element; - * use get_rwqe_ptr() instead. - */ -struct hfi1_rwq { - u32 head; /* new work requests posted to the head */ - u32 tail; /* receives pull requests from here. */ - struct hfi1_rwqe wq[0]; -}; - -struct hfi1_rq { - struct hfi1_rwq *wq; - u32 size; /* size of RWQE array */ - u8 max_sge; - /* protect changes in this struct */ - spinlock_t lock ____cacheline_aligned_in_smp; + struct rvt_mmap_info *ip; }; struct hfi1_srq { struct ib_srq ibsrq; - struct hfi1_rq rq; - struct hfi1_mmap_info *ip; + struct rvt_rq rq; + struct rvt_mmap_info *ip; /* send signal when number of RWQEs < limit */ u32 limit; }; -struct hfi1_sge_state { - struct hfi1_sge *sg_list; /* next SGE to be used if any */ - struct hfi1_sge sge; /* progress state for the current SGE */ - u32 total_len; - u8 num_sge; -}; - -/* - * This structure holds the information that the send tasklet needs - * to send a RDMA read response or atomic operation. - */ -struct hfi1_ack_entry { - u8 opcode; - u8 sent; - u32 psn; - u32 lpsn; - union { - struct hfi1_sge rdma_sge; - u64 atomic_data; - }; -}; - /* * hfi1 specific data structures that will be hidden from rvt after the queue * pair is made common */ -struct hfi1_qp; struct hfi1_qp_priv { struct ahg_ib_header *s_hdr; /* next packet header to send */ struct sdma_engine *s_sde; /* current sde */ u8 s_sc; /* SC[0..4] for next packet */ u8 r_adefered; /* number of acks defered */ struct iowait s_iowait; - struct hfi1_qp *owner; -}; - -/* - * Variables prefixed with s_ are for the requester (sender). - * Variables prefixed with r_ are for the responder (receiver). - * Variables prefixed with ack_ are for responder replies. - * - * Common variables are protected by both r_rq.lock and s_lock in that order - * which only happens in modify_qp() or changing the QP 'state'. - */ -struct hfi1_qp { - struct ib_qp ibqp; - void *priv; - /* read mostly fields above and below */ - struct ib_ah_attr remote_ah_attr; - struct ib_ah_attr alt_ah_attr; - struct hfi1_qp __rcu *next; /* link list for QPN hash table */ - struct hfi1_swqe *s_wq; /* send work queue */ - struct hfi1_mmap_info *ip; - unsigned long timeout_jiffies; /* computed from timeout */ - - enum ib_mtu path_mtu; - int srate_mbps; /* s_srate (below) converted to Mbit/s */ - u32 remote_qpn; - u32 pmtu; /* decoded from path_mtu */ - u32 qkey; /* QKEY for this QP (for UD or RD) */ - u32 s_size; /* send work queue size */ - u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ - u32 s_ahgpsn; /* set to the psn in the copy of the header */ - - u8 state; /* QP state */ - u8 allowed_ops; /* high order bits of allowed opcodes */ - u8 qp_access_flags; - u8 alt_timeout; /* Alternate path timeout for this QP */ - u8 timeout; /* Timeout for this QP */ - u8 s_srate; - u8 s_mig_state; - u8 port_num; - u8 s_pkey_index; /* PKEY index to use */ - u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ - u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ - u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ - u8 s_retry_cnt; /* number of times to retry */ - u8 s_rnr_retry_cnt; - u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ - u8 s_max_sge; /* size of s_wq->sg_list */ - u8 s_draining; - - /* start of read/write fields */ - atomic_t refcount ____cacheline_aligned_in_smp; - wait_queue_head_t wait; - - - struct hfi1_ack_entry s_ack_queue[HFI1_MAX_RDMA_ATOMIC + 1] - ____cacheline_aligned_in_smp; - struct hfi1_sge_state s_rdma_read_sge; - - spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ - unsigned long r_aflags; - u64 r_wr_id; /* ID for current receive WQE */ - u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ - u32 r_len; /* total length of r_sge */ - u32 r_rcv_len; /* receive data len processed */ - u32 r_psn; /* expected rcv packet sequence number */ - u32 r_msn; /* message sequence number */ - - u8 r_state; /* opcode of last packet received */ - u8 r_flags; - u8 r_head_ack_queue; /* index into s_ack_queue[] */ - - struct list_head rspwait; /* link for waiting to respond */ - - struct hfi1_sge_state r_sge; /* current receive data */ - struct hfi1_rq r_rq; /* receive work queue */ - - spinlock_t s_lock ____cacheline_aligned_in_smp; - struct hfi1_sge_state *s_cur_sge; - u32 s_flags; - struct hfi1_swqe *s_wqe; - struct hfi1_sge_state s_sge; /* current send request data */ - struct rvt_mregion *s_rdma_mr; - u32 s_cur_size; /* size of send packet in bytes */ - u32 s_len; /* total length of s_sge */ - u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ - u32 s_next_psn; /* PSN for next request */ - u32 s_last_psn; /* last response PSN processed */ - u32 s_sending_psn; /* lowest PSN that is being sent */ - u32 s_sending_hpsn; /* highest PSN that is being sent */ - u32 s_psn; /* current packet sequence number */ - u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ - u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ - u32 s_head; /* new entries added here */ - u32 s_tail; /* next entry to process */ - u32 s_cur; /* current work queue entry */ - u32 s_acked; /* last un-ACK'ed entry */ - u32 s_last; /* last completed entry */ - u32 s_ssn; /* SSN of tail entry */ - u32 s_lsn; /* limit sequence number (credit) */ - u16 s_hdrwords; /* size of s_hdr in 32 bit words */ - u16 s_rdma_ack_cnt; - s8 s_ahgidx; - u8 s_state; /* opcode of last packet sent */ - u8 s_ack_state; /* opcode of packet to ACK */ - u8 s_nak_state; /* non-zero if NAK is pending */ - u8 r_nak_state; /* non-zero if NAK is pending */ - u8 s_retry; /* requester retry counter */ - u8 s_rnr_retry; /* requester RNR retry counter */ - u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ - u8 s_tail_ack_queue; /* index into s_ack_queue[] */ - - struct hfi1_sge_state s_ack_rdma_sge; - struct timer_list s_timer; - - struct hfi1_sge r_sg_list[0] /* verified SGEs */ - ____cacheline_aligned_in_smp; + struct rvt_qp *owner; }; /* @@ -599,27 +375,27 @@ struct hfi1_pkt_state { #define HFI1_PSN_CREDIT 16 /* - * Since struct hfi1_swqe is not a fixed size, we can't simply index into + * Since struct rvt_swqe is not a fixed size, we can't simply index into * struct hfi1_qp.s_wq. This function does the array index computation. */ -static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp, - unsigned n) +static inline struct rvt_swqe *get_swqe_ptr(struct rvt_qp *qp, + unsigned n) { - return (struct hfi1_swqe *)((char *)qp->s_wq + - (sizeof(struct hfi1_swqe) + + return (struct rvt_swqe *)((char *)qp->s_wq + + (sizeof(struct rvt_swqe) + qp->s_max_sge * - sizeof(struct hfi1_sge)) * n); + sizeof(struct rvt_sge)) * n); } /* - * Since struct hfi1_rwqe is not a fixed size, we can't simply index into - * struct hfi1_rwq.wq. This function does the array index computation. + * Since struct rvt_rwqe is not a fixed size, we can't simply index into + * struct rvt_rwq.wq. This function does the array index computation. */ -static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq, unsigned n) +static inline struct rvt_rwqe *get_rwqe_ptr(struct rvt_rq *rq, unsigned n) { - return (struct hfi1_rwqe *) + return (struct rvt_rwqe *) ((char *) rq->wq->wq + - (sizeof(struct hfi1_rwqe) + + (sizeof(struct rvt_rwqe) + rq->max_sge * sizeof(struct ib_sge)) * n); } @@ -643,7 +419,7 @@ static inline void inc_opstats( } struct hfi1_ibport { - struct hfi1_qp __rcu *qp[2]; + struct rvt_qp __rcu *qp[2]; struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ struct rvt_ah *sm_ah; struct rvt_ah *smi_ah; @@ -706,12 +482,10 @@ struct hfi1_ibdev { struct list_head pending_mmaps; spinlock_t mmap_offset_lock; /* protect mmap_offset */ u32 mmap_offset; - struct rvt_mregion __rcu *dma_mr; struct hfi1_qp_ibdev *qp_dev; /* QP numbers are shared by all IB ports */ - struct rvt_lkey_table lk_table; /* protect wait lists */ seqlock_t iowait_lock; struct list_head txwait; /* list for wait verbs_txreq */ @@ -760,11 +534,6 @@ struct hfi1_verbs_counters { u32 vl15_dropped; }; -static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr) -{ - return container_of(ibmr, struct hfi1_mr, ibmr); -} - static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq) { return container_of(ibcq, struct hfi1_cq, ibcq); @@ -775,9 +544,9 @@ static inline struct hfi1_srq *to_isrq(struct ib_srq *ibsrq) return container_of(ibsrq, struct hfi1_srq, ibsrq); } -static inline struct hfi1_qp *to_iqp(struct ib_qp *ibqp) +static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp) { - return container_of(ibqp, struct hfi1_qp, ibqp); + return container_of(ibqp, struct rvt_qp, ibqp); } static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) @@ -788,7 +557,7 @@ static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) return container_of(rdi, struct hfi1_ibdev, rdi); } -static inline struct hfi1_qp *iowait_to_qp(struct iowait *s_iowait) +static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait) { struct hfi1_qp_priv *priv; @@ -800,7 +569,7 @@ static inline struct hfi1_qp *iowait_to_qp(struct iowait *s_iowait) * Send if not busy or waiting for I/O and either * a RC response is pending or we can process send work requests. */ -static inline int hfi1_send_ok(struct hfi1_qp *qp) +static inline int hfi1_send_ok(struct rvt_qp *qp) { return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) && (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) || @@ -890,12 +659,12 @@ int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp); struct verbs_txreq; void hfi1_put_txreq(struct verbs_txreq *tx); -int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps); +int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps); -void hfi1_copy_sge(struct hfi1_sge_state *ss, void *data, u32 length, +void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release); -void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release); +void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release); void hfi1_cnp_rcv(struct hfi1_packet *packet); @@ -907,7 +676,7 @@ void hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_ib_header *hdr, u32 rcv_flags, - struct hfi1_qp *qp); + struct rvt_qp *qp); u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); @@ -915,24 +684,14 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid); void hfi1_rc_rnr_retry(unsigned long arg); -void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr); +void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr); -void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err); +void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err); void hfi1_ud_rcv(struct hfi1_packet *packet); int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey); -int hfi1_alloc_lkey(struct rvt_mregion *mr, int dma_region); - -void hfi1_free_lkey(struct rvt_mregion *mr); - -int hfi1_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, - struct hfi1_sge *isge, struct ib_sge *sge, int acc); - -int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, - u32 len, u64 vaddr, u32 rkey, int acc); - int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr); @@ -970,43 +729,10 @@ int hfi1_req_notify_cq( int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); -struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc); - -struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int mr_access_flags, - struct ib_udata *udata); - -int hfi1_dereg_mr(struct ib_mr *ibmr); - -struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, - u32 max_entries); - -struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - -int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, - int list_len, u64 iova); - -int hfi1_unmap_fmr(struct list_head *fmr_list); - -int hfi1_dealloc_fmr(struct ib_fmr *ibfmr); - -static inline void hfi1_get_mr(struct rvt_mregion *mr) -{ - atomic_inc(&mr->refcount); -} - -static inline void hfi1_put_mr(struct rvt_mregion *mr) -{ - if (unlikely(atomic_dec_and_test(&mr->refcount))) - complete(&mr->comp); -} - -static inline void hfi1_put_ss(struct hfi1_sge_state *ss) +static inline void hfi1_put_ss(struct rvt_sge_state *ss) { while (ss->num_sge) { - hfi1_put_mr(ss->sge.mr); + rvt_put_mr(ss->sge.mr); if (--ss->num_sge) ss->sge = *ss->sg_list++; } @@ -1014,38 +740,40 @@ static inline void hfi1_put_ss(struct hfi1_sge_state *ss) void hfi1_release_mmap_info(struct kref *ref); -struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size, - struct ib_ucontext *context, - void *obj); +struct rvt_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size, + struct ib_ucontext *context, + void *obj); -void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip, +void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct rvt_mmap_info *ip, u32 size, void *obj); int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); -int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only); +int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only); + +void hfi1_migrate_qp(struct rvt_qp *qp); int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, - int has_grh, struct hfi1_qp *qp, u32 bth0); + int has_grh, struct rvt_qp *qp, u32 bth0); u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, struct ib_global_route *grh, u32 hwords, u32 nwords); -void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr, +void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, u32 bth0, u32 bth2, int middle); void hfi1_do_send(struct work_struct *work); -void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, +void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status); -void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct hfi1_qp *qp, int is_fecn); +void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct rvt_qp *qp, int is_fecn); -int hfi1_make_rc_req(struct hfi1_qp *qp); +int hfi1_make_rc_req(struct rvt_qp *qp); -int hfi1_make_uc_req(struct hfi1_qp *qp); +int hfi1_make_uc_req(struct rvt_qp *qp); -int hfi1_make_ud_req(struct hfi1_qp *qp); +int hfi1_make_ud_req(struct rvt_qp *qp); int hfi1_register_ib_device(struct hfi1_devdata *); @@ -1055,13 +783,13 @@ void hfi1_ib_rcv(struct hfi1_packet *packet); unsigned hfi1_get_npkeys(struct hfi1_devdata *); -int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); -int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps, +int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); -struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5); +struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5); extern const enum ib_wc_opcode ib_hfi1_wc_opcode[]; @@ -1071,8 +799,6 @@ extern const int ib_hfi1_state_ops[]; extern __be64 ib_hfi1_sys_image_guid; /* in network order */ -extern unsigned int hfi1_lkey_table_size; - extern unsigned int hfi1_max_cqes; extern unsigned int hfi1_max_cqs; diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/staging/rdma/hfi1/verbs_mcast.c index afc6b4c61a1d..49954b9b9e36 100644 --- a/drivers/staging/rdma/hfi1/verbs_mcast.c +++ b/drivers/staging/rdma/hfi1/verbs_mcast.c @@ -56,7 +56,7 @@ * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct * @qp: the QP to link */ -static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp) +static struct hfi1_mcast_qp *mcast_qp_alloc(struct rvt_qp *qp) { struct hfi1_mcast_qp *mqp; @@ -73,7 +73,7 @@ bail: static void mcast_qp_free(struct hfi1_mcast_qp *mqp) { - struct hfi1_qp *qp = mqp->qp; + struct rvt_qp *qp = mqp->qp; /* Notify hfi1_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) @@ -241,7 +241,7 @@ bail: int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_ibport *ibp; struct hfi1_mcast *mcast; @@ -299,7 +299,7 @@ bail: int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { - struct hfi1_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = to_iqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num); struct hfi1_mcast *mcast = NULL; -- cgit v1.2.3-59-g8ed1b From 39db3e66fa5f7d489f3eb9b0359d6d7e7bf0cd45 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:33 -0800 Subject: staging/rdma/hfi1: Remove srq from hfi1 SRQ data structure has been moved to rdmavt. Make use of it. Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 2 +- drivers/staging/rdma/hfi1/ruc.c | 4 ++-- drivers/staging/rdma/hfi1/srq.c | 10 +++++----- drivers/staging/rdma/hfi1/verbs.h | 13 ------------- 4 files changed, 8 insertions(+), 21 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 5a6845509d16..0f00365f899d 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -1092,7 +1092,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, sz = sizeof(*qp); sg_list_sz = 0; if (init_attr->srq) { - struct hfi1_srq *srq = to_isrq(init_attr->srq); + struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); if (srq->rq.max_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 762fca9d9ad4..3b2f032b9dea 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -159,14 +159,14 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) unsigned long flags; struct rvt_rq *rq; struct rvt_rwq *wq; - struct hfi1_srq *srq; + struct rvt_srq *srq; struct rvt_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { - srq = to_isrq(qp->ibqp.srq); + srq = ibsrq_to_rvtsrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c index 932bd96073ca..78f190a266a5 100644 --- a/drivers/staging/rdma/hfi1/srq.c +++ b/drivers/staging/rdma/hfi1/srq.c @@ -65,7 +65,7 @@ int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { - struct hfi1_srq *srq = to_isrq(ibsrq); + struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_rwq *wq; unsigned long flags; int ret; @@ -120,7 +120,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, struct ib_udata *udata) { struct hfi1_ibdev *dev = to_idev(ibpd->device); - struct hfi1_srq *srq; + struct rvt_srq *srq; u32 sz; struct ib_srq *ret; @@ -229,7 +229,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { - struct hfi1_srq *srq = to_isrq(ibsrq); + struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct rvt_rwq *wq; int ret = 0; @@ -367,7 +367,7 @@ bail: int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { - struct hfi1_srq *srq = to_isrq(ibsrq); + struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); attr->max_wr = srq->rq.size - 1; attr->max_sge = srq->rq.max_sge; @@ -381,7 +381,7 @@ int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) */ int hfi1_destroy_srq(struct ib_srq *ibsrq) { - struct hfi1_srq *srq = to_isrq(ibsrq); + struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); struct hfi1_ibdev *dev = to_idev(ibsrq->device); spin_lock(&dev->n_srqs_lock); diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 0782a85f5d28..97df555e62b5 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -263,14 +263,6 @@ struct hfi1_cq { struct rvt_mmap_info *ip; }; -struct hfi1_srq { - struct ib_srq ibsrq; - struct rvt_rq rq; - struct rvt_mmap_info *ip; - /* send signal when number of RWQEs < limit */ - u32 limit; -}; - /* * hfi1 specific data structures that will be hidden from rvt after the queue * pair is made common @@ -539,11 +531,6 @@ static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq) return container_of(ibcq, struct hfi1_cq, ibcq); } -static inline struct hfi1_srq *to_isrq(struct ib_srq *ibsrq) -{ - return container_of(ibsrq, struct hfi1_srq, ibsrq); -} - static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp) { return container_of(ibqp, struct rvt_qp, ibqp); -- cgit v1.2.3-59-g8ed1b From 4eb068824abb0fb335f87f268681a55a147a176f Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:39 -0800 Subject: staging/rdma/hfi1: Remove ibport and use rdmavt version Remove most of the ibport members from hfi1 and use the rdmavt version. Also register the port with rdmavt. Reviewed-by: Mike Marciniszyn Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 34 +++---- drivers/staging/rdma/hfi1/driver.c | 2 +- drivers/staging/rdma/hfi1/hfi.h | 8 +- drivers/staging/rdma/hfi1/mad.c | 151 ++++++++++++++++---------------- drivers/staging/rdma/hfi1/qp.c | 23 ++--- drivers/staging/rdma/hfi1/qp.h | 2 +- drivers/staging/rdma/hfi1/rc.c | 32 +++---- drivers/staging/rdma/hfi1/ruc.c | 14 +-- drivers/staging/rdma/hfi1/uc.c | 2 +- drivers/staging/rdma/hfi1/ud.c | 16 ++-- drivers/staging/rdma/hfi1/verbs.c | 61 +++++++------ drivers/staging/rdma/hfi1/verbs.h | 51 +---------- drivers/staging/rdma/hfi1/verbs_mcast.c | 28 +++--- 13 files changed, 197 insertions(+), 227 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index da2718f05f21..93e152dd4228 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -3933,8 +3933,8 @@ static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ void *context, int vl, int mode, u64 data) \ { \ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ - return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \ - ppd->ibport_data.cntr, vl, \ + return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ + ppd->ibport_data.rvp.cntr, vl, \ mode, data); \ } @@ -3951,7 +3951,7 @@ static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ if (vl != CNTR_INVALID_VL) \ return 0; \ \ - return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \ + return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ mode, data); \ } @@ -9239,14 +9239,14 @@ static inline int init_cpu_counters(struct hfi1_devdata *dd) ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { - ppd->ibport_data.rc_acks = NULL; - ppd->ibport_data.rc_qacks = NULL; - ppd->ibport_data.rc_acks = alloc_percpu(u64); - ppd->ibport_data.rc_qacks = alloc_percpu(u64); - ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64); - if ((ppd->ibport_data.rc_acks == NULL) || - (ppd->ibport_data.rc_delayed_comp == NULL) || - (ppd->ibport_data.rc_qacks == NULL)) + ppd->ibport_data.rvp.rc_acks = NULL; + ppd->ibport_data.rvp.rc_qacks = NULL; + ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); + ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); + ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); + if (!ppd->ibport_data.rvp.rc_acks || + !ppd->ibport_data.rvp.rc_delayed_comp || + !ppd->ibport_data.rvp.rc_qacks) return -ENOMEM; } @@ -11318,14 +11318,14 @@ static void free_cntrs(struct hfi1_devdata *dd) for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); kfree(ppd->scntrs); - free_percpu(ppd->ibport_data.rc_acks); - free_percpu(ppd->ibport_data.rc_qacks); - free_percpu(ppd->ibport_data.rc_delayed_comp); + free_percpu(ppd->ibport_data.rvp.rc_acks); + free_percpu(ppd->ibport_data.rvp.rc_qacks); + free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); ppd->cntrs = NULL; ppd->scntrs = NULL; - ppd->ibport_data.rc_acks = NULL; - ppd->ibport_data.rc_qacks = NULL; - ppd->ibport_data.rc_delayed_comp = NULL; + ppd->ibport_data.rvp.rc_acks = NULL; + ppd->ibport_data.rvp.rc_qacks = NULL; + ppd->ibport_data.rvp.rc_delayed_comp = NULL; } kfree(dd->portcntrnames); dd->portcntrnames = NULL; diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index eaed692ba575..da55e39658fb 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -337,7 +337,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, /* Check for valid receive state. */ if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; } switch (qp->ibqp.qp_type) { diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index d52dbdaacad7..e5f3451fecc2 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1855,10 +1855,10 @@ static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd) ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { - ppd->ibport_data.z_rc_acks = - get_all_cpu_total(ppd->ibport_data.rc_acks); - ppd->ibport_data.z_rc_qacks = - get_all_cpu_total(ppd->ibport_data.rc_qacks); + ppd->ibport_data.rvp.z_rc_acks = + get_all_cpu_total(ppd->ibport_data.rvp.rc_acks); + ppd->ibport_data.rvp.z_rc_qacks = + get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks); } } diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 1190f8dd2629..6daf2770cc56 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -91,7 +91,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) int pkey_idx; u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp; - agent = ibp->send_agent; + agent = ibp->rvp.send_agent; if (!agent) return; @@ -100,7 +100,8 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) return; /* o14-2 */ - if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) + if (ibp->rvp.trap_timeout && time_before(jiffies, + ibp->rvp.trap_timeout)) return; pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY); @@ -121,18 +122,18 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->class_version = OPA_SMI_CLASS_VERSION; smp->method = IB_MGMT_METHOD_TRAP; - ibp->tid++; - smp->tid = cpu_to_be64(ibp->tid); + ibp->rvp.tid++; + smp->tid = cpu_to_be64(ibp->rvp.tid); smp->attr_id = IB_SMP_ATTR_NOTICE; /* o14-1: smp->mkey = 0; */ memcpy(smp->route.lid.data, data, len); - spin_lock_irqsave(&ibp->lock, flags); + spin_lock_irqsave(&ibp->rvp.lock, flags); if (!ibp->sm_ah) { - if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { + if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; - ah = hfi1_create_qp0_ah(ibp, ibp->sm_lid); + ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid); if (IS_ERR(ah)) ret = PTR_ERR(ah); else { @@ -146,17 +147,17 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) send_buf->ah = &ibp->sm_ah->ibah; ret = 0; } - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (!ret) ret = ib_post_send_mad(send_buf, NULL); if (!ret) { /* 4.096 usec. */ - timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; - ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); + timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000; + ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout); } else { ib_free_send_mad(send_buf); - ibp->trap_timeout = 0; + ibp->rvp.trap_timeout = 0; } } @@ -174,10 +175,10 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl, memset(&data, 0, sizeof(data)); if (trap_num == OPA_TRAP_BAD_P_KEY) - ibp->pkey_violations++; + ibp->rvp.pkey_violations++; else - ibp->qkey_violations++; - ibp->n_pkt_drops++; + ibp->rvp.qkey_violations++; + ibp->rvp.n_pkt_drops++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; @@ -245,7 +246,7 @@ void hfi1_cap_mask_chg(struct hfi1_ibport *ibp) data.trap_num = OPA_TRAP_CHANGE_CAPABILITY; data.issuer_lid = cpu_to_be32(lid); data.ntc_144.lid = data.issuer_lid; - data.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); + data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); send_trap(ibp, &data, sizeof(data)); } @@ -407,37 +408,38 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, int ret = 0; /* Is the mkey in the process of expiring? */ - if (ibp->mkey_lease_timeout && - time_after_eq(jiffies, ibp->mkey_lease_timeout)) { + if (ibp->rvp.mkey_lease_timeout && + time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ - ibp->mkey_lease_timeout = 0; - ibp->mkeyprot = 0; + ibp->rvp.mkey_lease_timeout = 0; + ibp->rvp.mkeyprot = 0; } - if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || - ibp->mkey == mkey) + if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || + ibp->rvp.mkey == mkey) valid_mkey = 1; /* Unset lease timeout on any valid Get/Set/TrapRepress */ - if (valid_mkey && ibp->mkey_lease_timeout && + if (valid_mkey && ibp->rvp.mkey_lease_timeout && (mad->method == IB_MGMT_METHOD_GET || mad->method == IB_MGMT_METHOD_SET || mad->method == IB_MGMT_METHOD_TRAP_REPRESS)) - ibp->mkey_lease_timeout = 0; + ibp->rvp.mkey_lease_timeout = 0; if (!valid_mkey) { switch (mad->method) { case IB_MGMT_METHOD_GET: /* Bad mkey not a violation below level 2 */ - if (ibp->mkeyprot < 2) + if (ibp->rvp.mkeyprot < 2) break; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: - if (ibp->mkey_violations != 0xFFFF) - ++ibp->mkey_violations; - if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) - ibp->mkey_lease_timeout = jiffies + - ibp->mkey_lease_period * HZ; + if (ibp->rvp.mkey_violations != 0xFFFF) + ++ibp->rvp.mkey_violations; + if (!ibp->rvp.mkey_lease_timeout && + ibp->rvp.mkey_lease_period) + ibp->rvp.mkey_lease_timeout = jiffies + + ibp->rvp.mkey_lease_period * HZ; /* Generate a trap notice. */ bad_mkey(ibp, mad, mkey, dr_slid, return_path, hop_cnt); @@ -548,14 +550,14 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* Only return the mkey if the protection field allows it. */ if (!(smp->method == IB_MGMT_METHOD_GET && - ibp->mkey != smp->mkey && - ibp->mkeyprot == 1)) - pi->mkey = ibp->mkey; - - pi->subnet_prefix = ibp->gid_prefix; - pi->sm_lid = cpu_to_be32(ibp->sm_lid); - pi->ib_cap_mask = cpu_to_be32(ibp->port_cap_flags); - pi->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); + ibp->rvp.mkey != smp->mkey && + ibp->rvp.mkeyprot == 1)) + pi->mkey = ibp->rvp.mkey; + + pi->subnet_prefix = ibp->rvp.gid_prefix; + pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid); + pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); + pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp); pi->sa_qp = cpu_to_be32(ppd->sa_qp); @@ -599,7 +601,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, pi->port_states.portphysstate_portstate = (hfi1_ibphys_portstate(ppd) << 4) | state; - pi->mkeyprotect_lmc = (ibp->mkeyprot << 6) | ppd->lmc; + pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu)); for (i = 0; i < ppd->vls_supported; i++) { @@ -612,7 +614,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* don't forget VL 15 */ mtu = mtu_to_enum(dd->vld[15].mtu, 2048); pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu; - pi->smsl = ibp->sm_sl & OPA_PI_MASK_SMSL; + pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL; pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS); pi->partenforce_filterraw |= (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON); @@ -620,17 +622,17 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN; if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT) pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT; - pi->mkey_violations = cpu_to_be16(ibp->mkey_violations); + pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); /* P_KeyViolations are counted by hardware. */ - pi->pkey_violations = cpu_to_be16(ibp->pkey_violations); - pi->qkey_violations = cpu_to_be16(ibp->qkey_violations); + pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); + pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); pi->vl.cap = ppd->vls_supported; - pi->vl.high_limit = cpu_to_be16(ibp->vl_high_limit); + pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit); pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP); pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP); - pi->clientrereg_subnettimeout = ibp->subnet_timeout; + pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout; pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 | OPA_PORT_LINK_MODE_OPA << 5 | @@ -1091,9 +1093,9 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ls_old = driver_lstate(ppd); - ibp->mkey = pi->mkey; - ibp->gid_prefix = pi->subnet_prefix; - ibp->mkey_lease_period = be16_to_cpu(pi->mkey_lease_period); + ibp->rvp.mkey = pi->mkey; + ibp->rvp.gid_prefix = pi->subnet_prefix; + ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period); /* Must be a valid unicast LID address. */ if ((lid == 0 && ls_old > IB_PORT_INIT) || @@ -1133,20 +1135,20 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid); - } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { + } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid); - spin_lock_irqsave(&ibp->lock, flags); + spin_lock_irqsave(&ibp->rvp.lock, flags); if (ibp->sm_ah) { - if (smlid != ibp->sm_lid) + if (smlid != ibp->rvp.sm_lid) ibp->sm_ah->attr.dlid = smlid; - if (msl != ibp->sm_sl) + if (msl != ibp->rvp.sm_sl) ibp->sm_ah->attr.sl = msl; } - spin_unlock_irqrestore(&ibp->lock, flags); - if (smlid != ibp->sm_lid) - ibp->sm_lid = smlid; - if (msl != ibp->sm_sl) - ibp->sm_sl = msl; + spin_unlock_irqrestore(&ibp->rvp.lock, flags); + if (smlid != ibp->rvp.sm_lid) + ibp->rvp.sm_lid = smlid; + if (msl != ibp->rvp.sm_sl) + ibp->rvp.sm_sl = msl; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } @@ -1198,10 +1200,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, smp->status |= IB_SMP_INVALID_FIELD; } - ibp->mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6; - ibp->vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF; + ibp->rvp.mkeyprot = + (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6; + ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF; (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT, - ibp->vl_high_limit); + ibp->rvp.vl_high_limit); if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || ppd->vls_supported > ARRAY_SIZE(dd->vld)) { @@ -1260,15 +1263,15 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, } if (pi->mkey_violations == 0) - ibp->mkey_violations = 0; + ibp->rvp.mkey_violations = 0; if (pi->pkey_violations == 0) - ibp->pkey_violations = 0; + ibp->rvp.pkey_violations = 0; if (pi->qkey_violations == 0) - ibp->qkey_violations = 0; + ibp->rvp.qkey_violations = 0; - ibp->subnet_timeout = + ibp->rvp.subnet_timeout = pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT; crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode); @@ -3532,9 +3535,9 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, resp_len); break; case IB_SMP_ATTR_SM_INFO: - if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) + if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - if (ibp->port_cap_flags & IB_PORT_SM) + if (ibp->rvp.port_cap_flags & IB_PORT_SM) return IB_MAD_RESULT_SUCCESS; /* FALLTHROUGH */ default: @@ -3602,9 +3605,9 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, resp_len); break; case IB_SMP_ATTR_SM_INFO: - if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) + if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; - if (ibp->port_cap_flags & IB_PORT_SM) + if (ibp->rvp.port_cap_flags & IB_PORT_SM) return IB_MAD_RESULT_SUCCESS; /* FALLTHROUGH */ default: @@ -4180,7 +4183,7 @@ int hfi1_create_agents(struct hfi1_ibdev *dev) goto err; } - ibp->send_agent = agent; + ibp->rvp.send_agent = agent; } return 0; @@ -4188,9 +4191,9 @@ int hfi1_create_agents(struct hfi1_ibdev *dev) err: for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; - if (ibp->send_agent) { - agent = ibp->send_agent; - ibp->send_agent = NULL; + if (ibp->rvp.send_agent) { + agent = ibp->rvp.send_agent; + ibp->rvp.send_agent = NULL; ib_unregister_mad_agent(agent); } } @@ -4207,9 +4210,9 @@ void hfi1_free_agents(struct hfi1_ibdev *dev) for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; - if (ibp->send_agent) { - agent = ibp->send_agent; - ibp->send_agent = NULL; + if (ibp->rvp.send_agent) { + agent = ibp->rvp.send_agent; + ibp->rvp.send_agent = NULL; ib_unregister_mad_agent(agent); } if (ibp->sm_ah) { diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 0f00365f899d..a1dfb718d90b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -238,7 +238,7 @@ static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); if (qp->ibqp.qp_num <= 1) { - rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp); + rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp); } else { u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); @@ -263,12 +263,13 @@ static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); - if (rcu_dereference_protected(ibp->qp[0], + if (rcu_dereference_protected(ibp->rvp.qp[0], + lockdep_is_held( + &dev->qp_dev->qpt_lock)) == qp) { + RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); + } else if (rcu_dereference_protected(ibp->rvp.qp[1], lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->qp[0], NULL); - } else if (rcu_dereference_protected(ibp->qp[1], - lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->qp[1], NULL); + RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } else { struct rvt_qp *q; struct rvt_qp __rcu **qpp; @@ -317,9 +318,9 @@ static unsigned free_all_qps(struct hfi1_devdata *dd) if (!hfi1_mcast_tree_empty(ibp)) qp_inuse++; rcu_read_lock(); - if (rcu_dereference(ibp->qp[0])) + if (rcu_dereference(ibp->rvp.qp[0])) qp_inuse++; - if (rcu_dereference(ibp->qp[1])) + if (rcu_dereference(ibp->rvp.qp[1])) qp_inuse++; rcu_read_unlock(); } @@ -1467,7 +1468,7 @@ static int iowait_sleep( struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - ibp->n_dmawait++; + ibp->rvp.n_dmawait++; qp->s_flags |= HFI1_S_WAIT_DMA_DESC; list_add_tail(&priv->s_iowait.list, &sde->dmawait); trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC); @@ -1636,9 +1637,9 @@ int qp_iter_next(struct qp_iter *iter) ibp = &ppd->ibport_data; if (!(n & 1)) - qp = rcu_dereference(ibp->qp[0]); + qp = rcu_dereference(ibp->rvp.qp[0]); else - qp = rcu_dereference(ibp->qp[1]); + qp = rcu_dereference(ibp->rvp.qp[1]); } else { qp = rcu_dereference( dev->qp_dev->qp_table[ diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 3dd31e9e2c6b..8e665622a93b 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -104,7 +104,7 @@ static inline struct rvt_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, struct rvt_qp *qp = NULL; if (unlikely(qpn <= 1)) { - qp = rcu_dereference(ibp->qp[qpn]); + qp = rcu_dereference(ibp->rvp.qp[qpn]); } else { struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; u32 n = qpn_hash(dev->qp_dev, qpn); diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 4b8518ac9e7f..d7334f48f8c5 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -772,7 +772,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, return; queue_ack: - this_cpu_inc(*ibp->rc_qacks); + this_cpu_inc(*ibp->rvp.rc_qacks); spin_lock_irqsave(&qp->s_lock, flags); qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; @@ -900,9 +900,9 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ) - ibp->n_rc_resends++; + ibp->rvp.n_rc_resends++; else - ibp->n_rc_resends += delta_psn(qp->s_psn, psn); + ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN | @@ -925,7 +925,7 @@ static void rc_timeout(unsigned long arg) spin_lock(&qp->s_lock); if (qp->s_flags & HFI1_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); - ibp->n_rc_timeouts++; + ibp->rvp.n_rc_timeouts++; qp->s_flags &= ~HFI1_S_TIMER; del_timer(&qp->s_timer); trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1); @@ -1104,7 +1104,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, } else { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - this_cpu_inc(*ibp->rc_delayed_comp); + this_cpu_inc(*ibp->rvp.rc_delayed_comp); /* * If send progress not running attempt to progress * SDMA queue. @@ -1263,7 +1263,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, switch (aeth >> 29) { case 0: /* ACK */ - this_cpu_inc(*ibp->rc_acks); + this_cpu_inc(*ibp->rvp.rc_acks); if (qp->s_acked != qp->s_tail) { /* * We are expecting more ACKs so @@ -1292,7 +1292,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, goto bail; case 1: /* RNR NAK */ - ibp->n_rnr_naks++; + ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail; if (qp->s_flags & HFI1_S_WAIT_RNR) @@ -1307,7 +1307,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); - ibp->n_rc_resends += delta_psn(qp->s_psn, psn); + ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); reset_psn(qp, psn); @@ -1328,7 +1328,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ - ibp->n_seq_naks++; + ibp->rvp.n_seq_naks++; /* * Back up to the responder's expected PSN. * Note that we might get a NAK in the middle of an @@ -1341,17 +1341,17 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, case 1: /* Invalid Request */ status = IB_WC_REM_INV_REQ_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 2: /* Remote Access Error */ status = IB_WC_REM_ACCESS_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 3: /* Remote Operation Error */ status = IB_WC_REM_OP_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; class_b: if (qp->s_last == qp->s_acked) { hfi1_send_complete(qp, wqe, status); @@ -1402,7 +1402,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, wqe = do_rc_completion(qp, wqe, ibp); } - ibp->n_rdma_seq++; + ibp->rvp.n_rdma_seq++; qp->r_flags |= HFI1_R_RDMAR_SEQ; restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { @@ -1665,7 +1665,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, * Don't queue the NAK if we already sent one. */ if (!qp->r_nak_state) { - ibp->n_rc_seqnak++; + ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; @@ -1697,7 +1697,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, */ e = NULL; old_req = 1; - ibp->n_rc_dupreq++; + ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); @@ -2433,7 +2433,7 @@ void hfi1_rc_hdrerr( if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { diff = delta_psn(psn, qp->r_psn); if (!qp->r_nak_state && diff >= 0) { - ibp->n_rc_seqnak++; + ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 3b2f032b9dea..98a4798a0ead 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -279,7 +279,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, + guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->alt_ah_attr.grh.dgid.global.subnet_prefix, @@ -312,7 +313,8 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, goto err; guid = get_sguid(ibp, qp->remote_ah_attr.grh.sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, + guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->remote_ah_attr.grh.dgid.global.subnet_prefix, @@ -413,7 +415,7 @@ again: if (!qp || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. @@ -569,7 +571,7 @@ again: send_comp: spin_lock_irqsave(&sqp->s_lock, flags); - ibp->n_loop_pkts++; + ibp->rvp.n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; hfi1_send_complete(sqp, wqe, send_status); @@ -579,7 +581,7 @@ rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; - ibp->n_rnr_naks++; + ibp->rvp.n_rnr_naks++; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. @@ -665,7 +667,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ - hdr->sgid.global.subnet_prefix = ibp->gid_prefix; + hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; hdr->sgid.global.interface_id = grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ? ibp->guids[grh->sgid_index - 1] : diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 6686331943b9..cac3724e39d5 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -579,7 +579,7 @@ rewind: set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; return; op_err: diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index d54d56d833b2..e058fd24c60f 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -82,7 +82,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); if (!qp) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; rcu_read_unlock(); return; } @@ -94,7 +94,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) if (dqptype != sqptype || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; goto drop; } @@ -173,14 +173,14 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) } if (!ret) { if (qp->ibqp.qp_num == 0) - ibp->n_vl15_dropped++; + ibp->rvp.n_vl15_dropped++; goto bail_unlock; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= HFI1_R_REUSE_SGE; - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; goto bail_unlock; } @@ -249,7 +249,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) /* Signal completion event if the solicited bit is set. */ hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, swqe->wr.send_flags & IB_SEND_SOLICITED); - ibp->n_loop_pkts++; + ibp->rvp.n_loop_pkts++; bail_unlock: spin_unlock_irqrestore(&qp->r_lock, flags); drop: @@ -608,7 +608,7 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: case IB_MGMT_METHOD_REPORT_RESP: - if (ibp->port_cap_flags & IB_PORT_SM) + if (ibp->rvp.port_cap_flags & IB_PORT_SM) return 0; if (pkey == FULL_MGMT_P_KEY) { smp->status |= IB_SMP_UNSUP_METHOD; @@ -824,7 +824,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } if (!ret) { if (qp->ibqp.qp_num == 0) - ibp->n_vl15_dropped++; + ibp->rvp.n_vl15_dropped++; return; } } @@ -884,5 +884,5 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) return; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; } diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 10d6547037d0..1c5e477d5493 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -593,7 +593,7 @@ static inline int qp_ok(int opcode, struct hfi1_packet *packet) return 1; dropit: ibp = &packet->rcd->ppd->ibport_data; - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; return 0; } @@ -683,7 +683,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) return; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; } /* @@ -1465,17 +1465,17 @@ static int query_port(struct ib_device *ibdev, u8 port, memset(props, 0, sizeof(*props)); props->lid = lid ? lid : 0; props->lmc = ppd->lmc; - props->sm_lid = ibp->sm_lid; - props->sm_sl = ibp->sm_sl; + props->sm_lid = ibp->rvp.sm_lid; + props->sm_sl = ibp->rvp.sm_sl; /* OPA logical states match IB logical states */ props->state = driver_lstate(ppd); props->phys_state = hfi1_ibphys_portstate(ppd); - props->port_cap_flags = ibp->port_cap_flags; + props->port_cap_flags = ibp->rvp.port_cap_flags; props->gid_tbl_len = HFI1_GUIDS_PER_PORT; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = hfi1_get_npkeys(dd); - props->bad_pkey_cntr = ibp->pkey_violations; - props->qkey_viol_cntr = ibp->qkey_violations; + props->bad_pkey_cntr = ibp->rvp.pkey_violations; + props->qkey_viol_cntr = ibp->rvp.qkey_violations; props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */ props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active); @@ -1494,7 +1494,7 @@ static int query_port(struct ib_device *ibdev, u8 port, 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_2048); - props->subnet_timeout = ibp->subnet_timeout; + props->subnet_timeout = ibp->rvp.subnet_timeout; return 0; } @@ -1565,8 +1565,8 @@ static int modify_port(struct ib_device *ibdev, u8 port, struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); int ret = 0; - ibp->port_cap_flags |= props->set_port_cap_mask; - ibp->port_cap_flags &= ~props->clr_port_cap_mask; + ibp->rvp.port_cap_flags |= props->set_port_cap_mask; + ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask; if (props->set_port_cap_mask || props->clr_port_cap_mask) hfi1_cap_mask_chg(ibp); if (port_modify_mask & IB_PORT_SHUTDOWN) { @@ -1575,7 +1575,7 @@ static int modify_port(struct ib_device *ibdev, u8 port, ret = set_link_state(ppd, HLS_DN_DOWNDEF); } if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) - ibp->qkey_violations = 0; + ibp->rvp.qkey_violations = 0; return ret; } @@ -1591,7 +1591,7 @@ static int query_gid(struct ib_device *ibdev, u8 port, struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - gid->global.subnet_prefix = ibp->gid_prefix; + gid->global.subnet_prefix = ibp->rvp.gid_prefix; if (index == 0) gid->global.interface_id = cpu_to_be64(ppd->guid); else if (index < HFI1_GUIDS_PER_PORT) @@ -1663,7 +1663,7 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid) attr.dlid = dlid; attr.port_num = ppd_from_ibp(ibp)->port; rcu_read_lock(); - qp0 = rcu_dereference(ibp->qp[0]); + qp0 = rcu_dereference(ibp->rvp.qp[0]); if (qp0) ah = ib_create_ah(qp0->ibqp.pd, &attr); rcu_read_unlock(); @@ -1738,21 +1738,21 @@ static void init_ibport(struct hfi1_pportdata *ppd) ibp->sc_to_sl[i] = i; } - spin_lock_init(&ibp->lock); + spin_lock_init(&ibp->rvp.lock); /* Set the prefix to the default value (see ch. 4.1.1) */ - ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; - ibp->sm_lid = 0; + ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; + ibp->rvp.sm_lid = 0; /* Below should only set bits defined in OPA PortInfo.CapabilityMask */ - ibp->port_cap_flags = IB_PORT_AUTO_MIGR_SUP | + ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP | IB_PORT_CAP_MASK_NOTICE_SUP; - ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; - ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; - ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; - ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; - ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; - - RCU_INIT_POINTER(ibp->qp[0], NULL); - RCU_INIT_POINTER(ibp->qp[1], NULL); + ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; + ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; + ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; + ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; + ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; + + RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); + RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } static void verbs_txreq_kmem_cache_ctor(void *obj) @@ -1926,6 +1926,15 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER | RVT_FLAG_CQ_INIT_DRIVER); dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; + dd->verbs_dev.rdi.dparms.nports = dd->num_pports; + dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); + + ppd = dd->pport; + for (i = 0; i < dd->num_pports; i++, ppd++) + rvt_init_port(&dd->verbs_dev.rdi, + &ppd->ibport_data.rvp, + i, + ppd->pkeys); ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) @@ -2003,7 +2012,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet) svc_type = IB_CC_SVCTYPE_UD; break; default: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; return; } diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 97df555e62b5..6a7ee460d98a 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -412,62 +412,17 @@ static inline void inc_opstats( struct hfi1_ibport { struct rvt_qp __rcu *qp[2]; - struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ + struct rvt_ibport rvp; struct rvt_ah *sm_ah; struct rvt_ah *smi_ah; - struct rb_root mcast_tree; - spinlock_t lock; /* protect changes in this struct */ - - /* non-zero when timer is set */ - unsigned long mkey_lease_timeout; - unsigned long trap_timeout; - __be64 gid_prefix; /* in network order */ - __be64 mkey; + __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */ - u64 tid; /* TID for traps */ - u64 n_rc_resends; - u64 n_seq_naks; - u64 n_rdma_seq; - u64 n_rnr_naks; - u64 n_other_naks; - u64 n_loop_pkts; - u64 n_pkt_drops; - u64 n_vl15_dropped; - u64 n_rc_timeouts; - u64 n_dmawait; - u64 n_unaligned; - u64 n_rc_dupreq; - u64 n_rc_seqnak; - - /* Hot-path per CPU counters to avoid cacheline trading to update */ - u64 z_rc_acks; - u64 z_rc_qacks; - u64 z_rc_delayed_comp; - u64 __percpu *rc_acks; - u64 __percpu *rc_qacks; - u64 __percpu *rc_delayed_comp; - - u32 port_cap_flags; - u32 pma_sample_start; - u32 pma_sample_interval; - __be16 pma_counter_select[5]; - u16 pma_tag; - u16 pkey_violations; - u16 qkey_violations; - u16 mkey_violations; - u16 mkey_lease_period; - u16 sm_lid; - u16 repress_traps; - u8 sm_sl; - u8 mkeyprot; - u8 subnet_timeout; - u8 vl_high_limit; + /* the first 16 entries are sl_to_vl for !OPA */ u8 sl_to_sc[32]; u8 sc_to_sl[32]; }; - struct hfi1_qp_ibdev; struct hfi1_ibdev { struct rvt_dev_info rdi; /* Must be first */ diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/staging/rdma/hfi1/verbs_mcast.c index 49954b9b9e36..aa3f560d2f43 100644 --- a/drivers/staging/rdma/hfi1/verbs_mcast.c +++ b/drivers/staging/rdma/hfi1/verbs_mcast.c @@ -131,8 +131,8 @@ struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid) unsigned long flags; struct hfi1_mcast *mcast; - spin_lock_irqsave(&ibp->lock, flags); - n = ibp->mcast_tree.rb_node; + spin_lock_irqsave(&ibp->rvp.lock, flags); + n = ibp->rvp.mcast_tree.rb_node; while (n) { int ret; @@ -146,11 +146,11 @@ struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid) n = n->rb_right; else { atomic_inc(&mcast->refcount); - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); goto bail; } } - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); mcast = NULL; @@ -170,11 +170,11 @@ bail: static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp, struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp) { - struct rb_node **n = &ibp->mcast_tree.rb_node; + struct rb_node **n = &ibp->rvp.mcast_tree.rb_node; struct rb_node *pn = NULL; int ret; - spin_lock_irq(&ibp->lock); + spin_lock_irq(&ibp->rvp.lock); while (*n) { struct hfi1_mcast *tmcast; @@ -229,12 +229,12 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp, atomic_inc(&mcast->refcount); rb_link_node(&mcast->rb_node, pn, n); - rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); + rb_insert_color(&mcast->rb_node, &ibp->rvp.mcast_tree); ret = 0; bail: - spin_unlock_irq(&ibp->lock); + spin_unlock_irq(&ibp->rvp.lock); return ret; } @@ -313,13 +313,13 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) goto bail; } - spin_lock_irq(&ibp->lock); + spin_lock_irq(&ibp->rvp.lock); /* Find the GID in the mcast table. */ - n = ibp->mcast_tree.rb_node; + n = ibp->rvp.mcast_tree.rb_node; while (1) { if (n == NULL) { - spin_unlock_irq(&ibp->lock); + spin_unlock_irq(&ibp->rvp.lock); ret = -EINVAL; goto bail; } @@ -348,13 +348,13 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) /* If this was the last attached QP, remove the GID too. */ if (list_empty(&mcast->qp_list)) { - rb_erase(&mcast->rb_node, &ibp->mcast_tree); + rb_erase(&mcast->rb_node, &ibp->rvp.mcast_tree); last = 1; } break; } - spin_unlock_irq(&ibp->lock); + spin_unlock_irq(&ibp->rvp.lock); if (p) { /* @@ -381,5 +381,5 @@ bail: int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp) { - return ibp->mcast_tree.rb_node == NULL; + return !ibp->rvp.mcast_tree.rb_node; } -- cgit v1.2.3-59-g8ed1b From 92c24be1e8a1a9110428130271c7dc670fb1e0a2 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:44 -0800 Subject: staging/rdma/hfi1: Remove mmap from hfi1 Mmap data structure has already been moved to rdmavt and hfi1 supports it. Now that the mmap functionality has also been moved to rdmavt its time for hfi1 to use that as well. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/cq.c | 18 ++-- drivers/staging/rdma/hfi1/mmap.c | 192 ------------------------------------- drivers/staging/rdma/hfi1/qp.c | 12 +-- drivers/staging/rdma/hfi1/srq.c | 20 ++-- drivers/staging/rdma/hfi1/verbs.c | 6 +- drivers/staging/rdma/hfi1/verbs.h | 17 ---- 7 files changed, 27 insertions(+), 240 deletions(-) delete mode 100644 drivers/staging/rdma/hfi1/mmap.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index d82d9dc97c39..55077f396cf9 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o cq.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ - init.o intr.o mad.o mmap.o pcie.o pio.o pio_copy.o \ + init.o intr.o mad.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/staging/rdma/hfi1/cq.c index ffd0e7abca00..25d1a2a25fee 100644 --- a/drivers/staging/rdma/hfi1/cq.c +++ b/drivers/staging/rdma/hfi1/cq.c @@ -277,7 +277,7 @@ struct ib_cq *hfi1_create_cq( if (udata && udata->outlen >= sizeof(__u64)) { int err; - cq->ip = hfi1_create_mmap_info(dev, sz, context, wc); + cq->ip = rvt_create_mmap_info(&dev->rdi, sz, context, wc); if (!cq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wc; @@ -303,9 +303,9 @@ struct ib_cq *hfi1_create_cq( spin_unlock(&dev->n_cqs_lock); if (cq->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); + spin_lock_irq(&dev->rdi.pending_lock); + list_add(&cq->ip->pending_mmaps, &dev->rdi.pending_mmaps); + spin_unlock_irq(&dev->rdi.pending_lock); } /* @@ -355,7 +355,7 @@ int hfi1_destroy_cq(struct ib_cq *ibcq) dev->n_cqs_allocated--; spin_unlock(&dev->n_cqs_lock); if (cq->ip) - kref_put(&cq->ip->ref, hfi1_release_mmap_info); + kref_put(&cq->ip->ref, rvt_release_mmap_info); else vfree(cq->queue); kfree(cq); @@ -481,7 +481,7 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) struct hfi1_ibdev *dev = to_idev(ibcq->device); struct rvt_mmap_info *ip = cq->ip; - hfi1_update_mmap_info(dev, ip, sz, wc); + rvt_update_mmap_info(&dev->rdi, ip, sz, wc); /* * Return the offset to mmap. @@ -494,10 +494,10 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) goto bail; } - spin_lock_irq(&dev->pending_lock); + spin_lock_irq(&dev->rdi.pending_lock); if (list_empty(&ip->pending_mmaps)) - list_add(&ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); + list_add(&ip->pending_mmaps, &dev->rdi.pending_mmaps); + spin_unlock_irq(&dev->rdi.pending_lock); } ret = 0; diff --git a/drivers/staging/rdma/hfi1/mmap.c b/drivers/staging/rdma/hfi1/mmap.c deleted file mode 100644 index 4ce6be6af17c..000000000000 --- a/drivers/staging/rdma/hfi1/mmap.c +++ /dev/null @@ -1,192 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include -#include -#include - -#include "verbs.h" - -/** - * hfi1_release_mmap_info - free mmap info structure - * @ref: a pointer to the kref within struct rvt_mmap_info - */ -void hfi1_release_mmap_info(struct kref *ref) -{ - struct rvt_mmap_info *ip = - container_of(ref, struct rvt_mmap_info, ref); - struct hfi1_ibdev *dev = to_idev(ip->context->device); - - spin_lock_irq(&dev->pending_lock); - list_del(&ip->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - - vfree(ip->obj); - kfree(ip); -} - -/* - * open and close keep track of how many times the CQ is mapped, - * to avoid releasing it. - */ -static void hfi1_vma_open(struct vm_area_struct *vma) -{ - struct rvt_mmap_info *ip = vma->vm_private_data; - - kref_get(&ip->ref); -} - -static void hfi1_vma_close(struct vm_area_struct *vma) -{ - struct rvt_mmap_info *ip = vma->vm_private_data; - - kref_put(&ip->ref, hfi1_release_mmap_info); -} - -static struct vm_operations_struct hfi1_vm_ops = { - .open = hfi1_vma_open, - .close = hfi1_vma_close, -}; - -/** - * hfi1_mmap - create a new mmap region - * @context: the IB user context of the process making the mmap() call - * @vma: the VMA to be initialized - * Return zero if the mmap is OK. Otherwise, return an errno. - */ -int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) -{ - struct hfi1_ibdev *dev = to_idev(context->device); - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - unsigned long size = vma->vm_end - vma->vm_start; - struct rvt_mmap_info *ip, *pp; - int ret = -EINVAL; - - /* - * Search the device's list of objects waiting for a mmap call. - * Normally, this list is very short since a call to create a - * CQ, QP, or SRQ is soon followed by a call to mmap(). - */ - spin_lock_irq(&dev->pending_lock); - list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, - pending_mmaps) { - /* Only the creator is allowed to mmap the object */ - if (context != ip->context || (__u64) offset != ip->offset) - continue; - /* Don't allow a mmap larger than the object. */ - if (size > ip->size) - break; - - list_del_init(&ip->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - - ret = remap_vmalloc_range(vma, ip->obj, 0); - if (ret) - goto done; - vma->vm_ops = &hfi1_vm_ops; - vma->vm_private_data = ip; - hfi1_vma_open(vma); - goto done; - } - spin_unlock_irq(&dev->pending_lock); -done: - return ret; -} - -/* - * Allocate information for hfi1_mmap - */ -struct rvt_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, - u32 size, - struct ib_ucontext *context, - void *obj) { - struct rvt_mmap_info *ip; - - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) - goto bail; - - size = PAGE_ALIGN(size); - - spin_lock_irq(&dev->mmap_offset_lock); - if (dev->mmap_offset == 0) - dev->mmap_offset = PAGE_SIZE; - ip->offset = dev->mmap_offset; - dev->mmap_offset += size; - spin_unlock_irq(&dev->mmap_offset_lock); - - INIT_LIST_HEAD(&ip->pending_mmaps); - ip->size = size; - ip->context = context; - ip->obj = obj; - kref_init(&ip->ref); - -bail: - return ip; -} - -void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct rvt_mmap_info *ip, - u32 size, void *obj) -{ - size = PAGE_ALIGN(size); - - spin_lock_irq(&dev->mmap_offset_lock); - if (dev->mmap_offset == 0) - dev->mmap_offset = PAGE_SIZE; - ip->offset = dev->mmap_offset; - dev->mmap_offset += size; - spin_unlock_irq(&dev->mmap_offset_lock); - - ip->size = size; - ip->obj = obj; -} diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index a1dfb718d90b..20b1a840dbdc 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -1195,7 +1195,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, } else { u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; - qp->ip = hfi1_create_mmap_info(dev, s, + qp->ip = rvt_create_mmap_info(&dev->rdi, s, ibpd->uobject->context, qp->r_rq.wq); if (!qp->ip) { @@ -1223,9 +1223,9 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, spin_unlock(&dev->n_qps_lock); if (qp->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); + spin_lock_irq(&dev->rdi.pending_lock); + list_add(&qp->ip->pending_mmaps, &dev->rdi.pending_mmaps); + spin_unlock_irq(&dev->rdi.pending_lock); } ret = &qp->ibqp; @@ -1256,7 +1256,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, bail_ip: if (qp->ip) - kref_put(&qp->ip->ref, hfi1_release_mmap_info); + kref_put(&qp->ip->ref, rvt_release_mmap_info); else vfree(qp->r_rq.wq); free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); @@ -1316,7 +1316,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) spin_unlock(&dev->n_qps_lock); if (qp->ip) - kref_put(&qp->ip->ref, hfi1_release_mmap_info); + kref_put(&qp->ip->ref, rvt_release_mmap_info); else vfree(qp->r_rq.wq); vfree(qp->s_wq); diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c index 78f190a266a5..c53b378497e1 100644 --- a/drivers/staging/rdma/hfi1/srq.c +++ b/drivers/staging/rdma/hfi1/srq.c @@ -165,8 +165,8 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; srq->ip = - hfi1_create_mmap_info(dev, s, ibpd->uobject->context, - srq->rq.wq); + rvt_create_mmap_info(&dev->rdi, s, ibpd->uobject->context, + srq->rq.wq); if (!srq->ip) { ret = ERR_PTR(-ENOMEM); goto bail_wq; @@ -200,9 +200,9 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, spin_unlock(&dev->n_srqs_lock); if (srq->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); + spin_lock_irq(&dev->rdi.pending_lock); + list_add(&srq->ip->pending_mmaps, &dev->rdi.pending_mmaps); + spin_unlock_irq(&dev->rdi.pending_lock); } ret = &srq->ibsrq; @@ -324,7 +324,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device); u32 s = sizeof(struct rvt_rwq) + size * sz; - hfi1_update_mmap_info(dev, ip, s, wq); + rvt_update_mmap_info(&dev->rdi, ip, s, wq); /* * Return the offset to mmap. @@ -341,11 +341,11 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, * Put user mapping info onto the pending list * unless it already is on the list. */ - spin_lock_irq(&dev->pending_lock); + spin_lock_irq(&dev->rdi.pending_lock); if (list_empty(&ip->pending_mmaps)) list_add(&ip->pending_mmaps, - &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); + &dev->rdi.pending_mmaps); + spin_unlock_irq(&dev->rdi.pending_lock); } } else if (attr_mask & IB_SRQ_LIMIT) { spin_lock_irq(&srq->rq.lock); @@ -388,7 +388,7 @@ int hfi1_destroy_srq(struct ib_srq *ibsrq) dev->n_srqs_allocated--; spin_unlock(&dev->n_srqs_lock); if (srq->ip) - kref_put(&srq->ip->ref, hfi1_release_mmap_info); + kref_put(&srq->ip->ref, rvt_release_mmap_info); else vfree(srq->rq.wq); kfree(srq); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 1c5e477d5493..11f08ea77559 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1796,11 +1796,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dev->mem_timer.function = mem_timer; dev->mem_timer.data = (unsigned long) dev; - INIT_LIST_HEAD(&dev->pending_mmaps); - spin_lock_init(&dev->pending_lock); seqlock_init(&dev->iowait_lock); - dev->mmap_offset = PAGE_SIZE; - spin_lock_init(&dev->mmap_offset_lock); INIT_LIST_HEAD(&dev->txwait); INIT_LIST_HEAD(&dev->memwait); @@ -1906,7 +1902,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->attach_mcast = hfi1_multicast_attach; ibdev->detach_mcast = hfi1_multicast_detach; ibdev->process_mad = hfi1_process_mad; - ibdev->mmap = hfi1_mmap; + ibdev->mmap = NULL; ibdev->dma_ops = NULL; ibdev->get_port_immutable = port_immutable; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 6a7ee460d98a..eb1297825225 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -426,9 +426,6 @@ struct hfi1_ibport { struct hfi1_qp_ibdev; struct hfi1_ibdev { struct rvt_dev_info rdi; /* Must be first */ - struct list_head pending_mmaps; - spinlock_t mmap_offset_lock; /* protect mmap_offset */ - u32 mmap_offset; struct hfi1_qp_ibdev *qp_dev; @@ -441,9 +438,6 @@ struct hfi1_ibdev { struct kmem_cache *verbs_txreq_cache; struct timer_list mem_timer; - /* other waiters */ - spinlock_t pending_lock; - u64 n_piowait; u64 n_txwait; u64 n_kmem_wait; @@ -680,17 +674,6 @@ static inline void hfi1_put_ss(struct rvt_sge_state *ss) } } -void hfi1_release_mmap_info(struct kref *ref); - -struct rvt_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size, - struct ib_ucontext *context, - void *obj); - -void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct rvt_mmap_info *ip, - u32 size, void *obj); - -int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); - int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only); void hfi1_migrate_qp(struct rvt_qp *qp); -- cgit v1.2.3-59-g8ed1b From d604e1d2cd0e5225e1b6132f27734137b8e0d63e Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:42:50 -0800 Subject: staging/rdma/hfi1: Use rdmavt pkey verbs function No need to keep providing the query pkey function. This is now being done in rdmavt. Remove support from hfi1. The allocation and maintenance of the list still resides in the driver. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 11f08ea77559..fa5b9c15215e 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1679,24 +1679,6 @@ unsigned hfi1_get_npkeys(struct hfi1_devdata *dd) return ARRAY_SIZE(dd->pport[0].pkeys); } -static int query_pkey(struct ib_device *ibdev, u8 port, u16 index, - u16 *pkey) -{ - struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - int ret; - - if (index >= hfi1_get_npkeys(dd)) { - ret = -EINVAL; - goto bail; - } - - *pkey = hfi1_get_pkey(to_iport(ibdev, port), index); - ret = 0; - -bail: - return ret; -} - /** * alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device @@ -1864,7 +1846,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_device = modify_device; ibdev->query_port = query_port; ibdev->modify_port = modify_port; - ibdev->query_pkey = query_pkey; + ibdev->query_pkey = NULL; ibdev->query_gid = query_gid; ibdev->alloc_ucontext = alloc_ucontext; ibdev->dealloc_ucontext = dealloc_ucontext; -- cgit v1.2.3-59-g8ed1b From 90963ad735efd191d9e31c0720238406afd89e19 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Tue, 19 Jan 2016 14:42:55 -0800 Subject: staging/rdma/hfi1: Remove user context allocation and de-alloction functions IB user context alloc and dealloc functions have been added to rdmavt. This patch removes them from hfi1. Reviewed-by: Dennis Dalessandro Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 44 ++------------------------------------- 1 file changed, 2 insertions(+), 42 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index fa5b9c15215e..301716aba7fa 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -152,16 +152,6 @@ const int ib_hfi1_state_ops[IB_QPS_ERR + 1] = { HFI1_POST_SEND_OK | HFI1_FLUSH_SEND, }; -struct hfi1_ucontext { - struct ib_ucontext ibucontext; -}; - -static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext - *ibucontext) -{ - return container_of(ibucontext, struct hfi1_ucontext, ibucontext); -} - static inline void _hfi1_schedule_send(struct rvt_qp *qp); /* @@ -1679,36 +1669,6 @@ unsigned hfi1_get_npkeys(struct hfi1_devdata *dd) return ARRAY_SIZE(dd->pport[0].pkeys); } -/** - * alloc_ucontext - allocate a ucontest - * @ibdev: the infiniband device - * @udata: not used by the driver - */ - -static struct ib_ucontext *alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) -{ - struct hfi1_ucontext *context; - struct ib_ucontext *ret; - - context = kmalloc(sizeof(*context), GFP_KERNEL); - if (!context) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - ret = &context->ibucontext; - -bail: - return ret; -} - -static int dealloc_ucontext(struct ib_ucontext *context) -{ - kfree(to_iucontext(context)); - return 0; -} - static void init_ibport(struct hfi1_pportdata *ppd) { struct hfi1_ibport *ibp = &ppd->ibport_data; @@ -1848,8 +1808,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_port = modify_port; ibdev->query_pkey = NULL; ibdev->query_gid = query_gid; - ibdev->alloc_ucontext = alloc_ucontext; - ibdev->dealloc_ucontext = dealloc_ucontext; + ibdev->alloc_ucontext = NULL; + ibdev->dealloc_ucontext = NULL; ibdev->alloc_pd = NULL; ibdev->dealloc_pd = NULL; ibdev->create_ah = NULL; -- cgit v1.2.3-59-g8ed1b From 54d10c1eb1dc381e62361213bbd100a433b733c9 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:01 -0800 Subject: staging/rdma/hfi1: Use rdmavt send flags and recv flags Use the definitions of the s_flags and r_flags which are now in rdmavt. Reviewed-by: Ira Weiny Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 8 +- drivers/staging/rdma/hfi1/pio.c | 2 +- drivers/staging/rdma/hfi1/qp.c | 58 +++++++------- drivers/staging/rdma/hfi1/qp.h | 2 +- drivers/staging/rdma/hfi1/rc.c | 152 ++++++++++++++++++------------------- drivers/staging/rdma/hfi1/ruc.c | 30 ++++---- drivers/staging/rdma/hfi1/uc.c | 14 ++-- drivers/staging/rdma/hfi1/ud.c | 22 +++--- drivers/staging/rdma/hfi1/verbs.c | 28 +++---- drivers/staging/rdma/hfi1/verbs.h | 85 +-------------------- 10 files changed, 162 insertions(+), 239 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index da55e39658fb..ec2286a1e883 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -781,14 +781,14 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet) */ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_del_init(&qp->rspwait); - if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) { - qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK; + if (qp->r_flags & RVT_R_RSP_NAK) { + qp->r_flags &= ~RVT_R_RSP_NAK; hfi1_send_rc_ack(rcd, qp, 0); } - if (qp->r_flags & HFI1_R_RSP_SEND) { + if (qp->r_flags & RVT_R_RSP_SEND) { unsigned long flags; - qp->r_flags &= ~HFI1_R_RSP_SEND; + qp->r_flags &= ~RVT_R_RSP_SEND; spin_lock_irqsave(&qp->s_lock, flags); if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND) diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 8ee7ed8e0fb7..be0dcc345f4b 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1564,7 +1564,7 @@ full: write_sequnlock_irqrestore(&dev->iowait_lock, flags); for (i = 0; i < n; i++) - hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO); + hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO); } /* translate a send credit update to a bit code of reasons */ diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 20b1a840dbdc..d5620babd36a 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -360,7 +360,7 @@ static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type) hfi1_do_send, iowait_sleep, iowait_wakeup); - qp->s_flags &= HFI1_S_SIGNAL_REQ_WR; + qp->s_flags &= RVT_S_SIGNAL_REQ_WR; qp->s_hdrwords = 0; qp->s_wqe = NULL; qp->s_draining = 0; @@ -407,7 +407,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) { unsigned n; - if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_put_ss(&qp->r_sge); @@ -471,24 +471,24 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) qp->state = IB_QPS_ERR; - if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { - qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } - if (qp->s_flags & HFI1_S_ANY_WAIT_SEND) - qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND; + if (qp->s_flags & RVT_S_ANY_WAIT_SEND) + qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; write_seqlock(&dev->iowait_lock); - if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { - qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; + if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { + qp->s_flags &= ~RVT_S_ANY_WAIT_IO; list_del_init(&priv->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } write_sequnlock(&dev->iowait_lock); - if (!(qp->s_flags & HFI1_S_BUSY)) { + if (!(qp->s_flags & RVT_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); @@ -507,7 +507,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) wc.qp = &qp->ibqp; wc.opcode = IB_WC_RECV; - if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) { + if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { wc.wr_id = qp->r_wr_id; wc.status = err; hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); @@ -742,7 +742,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; flush_iowait(qp); - qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); /* Stop the sending work queue and retry timer */ @@ -762,7 +762,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, case IB_QPS_RTR: /* Allow event to re-trigger if QP set to RTR more than once */ - qp->r_flags &= ~HFI1_R_COMM_EST; + qp->r_flags &= ~RVT_R_COMM_EST; qp->state = new_state; break; @@ -828,7 +828,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; - qp->s_flags |= HFI1_S_AHG_CLEAR; + qp->s_flags |= RVT_S_AHG_CLEAR; priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); } @@ -954,7 +954,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; - if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR) + if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; else init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; @@ -1154,7 +1154,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) - qp->s_flags = HFI1_S_SIGNAL_REQ_WR; + qp->s_flags = RVT_S_SIGNAL_REQ_WR; dev = to_idev(ibpd->device); dd = dd_from_dev(dev); err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type, @@ -1292,7 +1292,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; flush_iowait(qp); - qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); cancel_work_sync(&priv->s_iowait.iowork); @@ -1398,20 +1398,20 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) * honor the credit field. */ if (credit == HFI1_AETH_CREDIT_INVAL) { - if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { - qp->s_flags |= HFI1_S_UNLIMITED_CREDIT; - if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { + qp->s_flags |= RVT_S_UNLIMITED_CREDIT; + if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; hfi1_schedule_send(qp); } } - } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { + } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { /* Compute new LSN (i.e., MSN + credit) */ credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; if (cmp_msn(credit, qp->s_lsn) > 0) { qp->s_lsn = credit; - if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; + if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; hfi1_schedule_send(qp); } } @@ -1469,13 +1469,13 @@ static int iowait_sleep( to_iport(qp->ibqp.device, qp->port_num); ibp->rvp.n_dmawait++; - qp->s_flags |= HFI1_S_WAIT_DMA_DESC; + qp->s_flags |= RVT_S_WAIT_DMA_DESC; list_add_tail(&priv->s_iowait.list, &sde->dmawait); - trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC); + trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); atomic_inc(&qp->refcount); } write_sequnlock(&dev->iowait_lock); - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EBUSY; } else { @@ -1495,7 +1495,7 @@ static void iowait_wakeup(struct iowait *wait, int reason) struct rvt_qp *qp = iowait_to_qp(wait); WARN_ON(reason != SDMA_AVAIL_REASON); - hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC); + hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); } int hfi1_qp_init(struct hfi1_ibdev *dev) @@ -1712,7 +1712,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) void qp_comm_est(struct rvt_qp *qp) { - qp->r_flags |= HFI1_R_COMM_EST; + qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; @@ -1736,7 +1736,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp) qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; - qp->s_flags |= HFI1_S_AHG_CLEAR; + qp->s_flags |= RVT_S_AHG_CLEAR; priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 8e665622a93b..9efa4bc634e7 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -125,7 +125,7 @@ static inline void clear_ahg(struct rvt_qp *qp) struct hfi1_qp_priv *priv = qp->priv; priv->s_hdr->ahgcount = 0; - qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); + qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR); if (priv->s_sde && qp->s_ahgidx >= 0) sdma_ahg_free(priv->s_sde, qp->s_ahgidx); qp->s_ahgidx = -1; diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index d7334f48f8c5..bd504decc46d 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -76,7 +76,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, static void start_timer(struct rvt_qp *qp) { - qp->s_flags |= HFI1_S_TIMER; + qp->s_flags |= RVT_S_TIMER; qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + qp->timeout_jiffies; @@ -133,7 +133,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { - if (qp->s_flags & HFI1_S_ACK_PENDING) + if (qp->s_flags & RVT_S_ACK_PENDING) goto normal; goto bail; } @@ -218,7 +218,7 @@ normal: * (see above). */ qp->s_ack_state = OP(SEND_ONLY); - qp->s_flags &= ~HFI1_S_ACK_PENDING; + qp->s_flags &= ~RVT_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) ohdr->u.aeth = @@ -242,12 +242,12 @@ bail: qp->s_ack_state = OP(ACKNOWLEDGE); /* * Ensure s_rdma_ack_cnt changes are committed prior to resetting - * HFI1_S_RESP_PENDING + * RVT_S_RESP_PENDING */ smp_wmb(); - qp->s_flags &= ~(HFI1_S_RESP_PENDING - | HFI1_S_ACK_PENDING - | HFI1_S_AHG_VALID); + qp->s_flags &= ~(RVT_S_RESP_PENDING + | RVT_S_ACK_PENDING + | RVT_S_AHG_VALID); return 0; } @@ -287,7 +287,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); /* Sending responses has higher priority over sending requests. */ - if ((qp->s_flags & HFI1_S_RESP_PENDING) && + if ((qp->s_flags & RVT_S_RESP_PENDING) && make_rc_ack(dev, qp, ohdr, pmtu)) goto done; @@ -299,7 +299,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { - qp->s_flags |= HFI1_S_WAIT_DMA; + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } clear_ahg(qp); @@ -310,12 +310,12 @@ int hfi1_make_rc_req(struct rvt_qp *qp) goto done; } - if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK)) + if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) goto bail; if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { - qp->s_flags |= HFI1_S_WAIT_PSN; + qp->s_flags |= RVT_S_WAIT_PSN; goto bail; } qp->s_sending_psn = qp->s_psn; @@ -348,7 +348,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) */ if ((wqe->wr.send_flags & IB_SEND_FENCE) && qp->s_num_rd_atomic) { - qp->s_flags |= HFI1_S_WAIT_FENCE; + qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } wqe->psn = qp->s_next_psn; @@ -366,9 +366,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp) case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } wqe->lpsn = wqe->psn; @@ -394,14 +394,14 @@ int hfi1_make_rc_req(struct rvt_qp *qp) break; case IB_WR_RDMA_WRITE: - if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) + if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } ohdr->u.rc.reth.vaddr = @@ -441,11 +441,11 @@ int hfi1_make_rc_req(struct rvt_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= HFI1_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* * Adjust s_next_psn to count the @@ -478,11 +478,11 @@ int hfi1_make_rc_req(struct rvt_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= HFI1_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; wqe->lpsn = wqe->psn; } @@ -649,9 +649,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp) delta = delta_psn(bth2, wqe->psn); if (delta && delta % HFI1_PSN_CREDIT == 0) bth2 |= IB_BTH_REQ_ACK; - if (qp->s_flags & HFI1_S_SEND_ONE) { - qp->s_flags &= ~HFI1_S_SEND_ONE; - qp->s_flags |= HFI1_S_WAIT_ACK; + if (qp->s_flags & RVT_S_SEND_ONE) { + qp->s_flags &= ~RVT_S_SEND_ONE; + qp->s_flags |= RVT_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; @@ -669,7 +669,7 @@ done: goto unlock; bail: - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; @@ -701,7 +701,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, unsigned long flags; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ - if (qp->s_flags & HFI1_S_RESP_PENDING) + if (qp->s_flags & RVT_S_RESP_PENDING) goto queue_ack; /* Ensure s_rdma_ack_cnt changes are committed */ @@ -774,11 +774,11 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, queue_ack: this_cpu_inc(*ibp->rvp.rc_qacks); spin_lock_irqsave(&qp->s_lock, flags); - qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING; + qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; if (is_fecn) - qp->s_flags |= HFI1_S_ECN; + qp->s_flags |= RVT_S_ECN; /* Schedule the send tasklet. */ hfi1_schedule_send(qp); @@ -866,14 +866,14 @@ static void reset_psn(struct rvt_qp *qp, u32 psn) done: qp->s_psn = psn; /* - * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer + * Set RVT_S_WAIT_PSN as rc_complete() may start the timer * asynchronously before the send tasklet can get scheduled. * Doing it in hfi1_make_rc_req() is too late. */ if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) && (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) - qp->s_flags |= HFI1_S_WAIT_PSN; - qp->s_flags &= ~HFI1_S_AHG_VALID; + qp->s_flags |= RVT_S_WAIT_PSN; + qp->s_flags &= ~RVT_S_AHG_VALID; } /* @@ -904,11 +904,11 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) else ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn); - qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | - HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN | - HFI1_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | + RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | + RVT_S_WAIT_ACK); if (wait) - qp->s_flags |= HFI1_S_SEND_ONE; + qp->s_flags |= RVT_S_SEND_ONE; reset_psn(qp, psn); } @@ -923,10 +923,10 @@ static void rc_timeout(unsigned long arg) spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); - if (qp->s_flags & HFI1_S_TIMER) { + if (qp->s_flags & RVT_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->rvp.n_rc_timeouts++; - qp->s_flags &= ~HFI1_S_TIMER; + qp->s_flags &= ~RVT_S_TIMER; del_timer(&qp->s_timer); trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1); restart_rc(qp, qp->s_last_psn + 1, 1); @@ -945,8 +945,8 @@ void hfi1_rc_rnr_retry(unsigned long arg) unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & HFI1_S_WAIT_RNR) { - qp->s_flags &= ~HFI1_S_WAIT_RNR; + if (qp->s_flags & RVT_S_WAIT_RNR) { + qp->s_flags &= ~RVT_S_WAIT_RNR; del_timer(&qp->s_timer); hfi1_schedule_send(qp); } @@ -1017,7 +1017,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && !(qp->s_flags & - (HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) && + (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) start_timer(qp); @@ -1032,7 +1032,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1050,9 +1050,9 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) * and they are now complete, restart sending. */ trace_hfi1_rc_sendcomplete(qp, psn); - if (qp->s_flags & HFI1_S_WAIT_PSN && + if (qp->s_flags & RVT_S_WAIT_PSN && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { - qp->s_flags &= ~HFI1_S_WAIT_PSN; + qp->s_flags &= ~RVT_S_WAIT_PSN; qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; hfi1_schedule_send(qp); @@ -1089,7 +1089,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1169,8 +1169,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, int diff; /* Remove QP from retry timer */ - if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { - qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } @@ -1218,11 +1218,11 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { /* Retry this request. */ - if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) { - qp->r_flags |= HFI1_R_RDMAR_SEQ; + if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { + qp->r_flags |= RVT_R_RDMAR_SEQ; restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= HFI1_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); @@ -1245,14 +1245,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { qp->s_num_rd_atomic--; /* Restart sending task if fence is complete */ - if ((qp->s_flags & HFI1_S_WAIT_FENCE) && + if ((qp->s_flags & RVT_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { - qp->s_flags &= ~(HFI1_S_WAIT_FENCE | - HFI1_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | + RVT_S_WAIT_ACK); hfi1_schedule_send(qp); - } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) { - qp->s_flags &= ~(HFI1_S_WAIT_RDMAR | - HFI1_S_WAIT_ACK); + } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { + qp->s_flags &= ~(RVT_S_WAIT_RDMAR | + RVT_S_WAIT_ACK); hfi1_schedule_send(qp); } } @@ -1280,8 +1280,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, qp->s_state = OP(SEND_LAST); qp->s_psn = psn + 1; } - if (qp->s_flags & HFI1_S_WAIT_ACK) { - qp->s_flags &= ~HFI1_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; hfi1_schedule_send(qp); } hfi1_get_credit(qp, aeth); @@ -1295,7 +1295,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail; - if (qp->s_flags & HFI1_S_WAIT_RNR) + if (qp->s_flags & RVT_S_WAIT_RNR) goto bail; if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; @@ -1311,8 +1311,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, reset_psn(qp, psn); - qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK); - qp->s_flags |= HFI1_S_WAIT_RNR; + qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); + qp->s_flags |= RVT_S_WAIT_RNR; qp->s_timer.function = hfi1_rc_rnr_retry; qp->s_timer.expires = jiffies + usecs_to_jiffies( ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & @@ -1387,8 +1387,8 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct rvt_swqe *wqe; /* Remove QP from retry timer */ - if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { - qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } @@ -1403,10 +1403,10 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, } ibp->rvp.n_rdma_seq++; - qp->r_flags |= HFI1_R_RDMAR_SEQ; + qp->r_flags |= RVT_R_RDMAR_SEQ; restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= HFI1_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1466,10 +1466,10 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, * Skip everything other than the PSN we expect, if we are waiting * for a reply to a restarted RDMA read or atomic op. */ - if (qp->r_flags & HFI1_R_RDMAR_SEQ) { + if (qp->r_flags & RVT_R_RDMAR_SEQ) { if (cmp_psn(psn, qp->s_last_psn + 1) != 0) goto ack_done; - qp->r_flags &= ~HFI1_R_RDMAR_SEQ; + qp->r_flags &= ~RVT_R_RDMAR_SEQ; } if (unlikely(qp->s_acked == qp->s_tail)) @@ -1520,10 +1520,10 @@ read_middle: * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ - qp->s_flags |= HFI1_S_TIMER; + qp->s_flags |= RVT_S_TIMER; mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); - if (qp->s_flags & HFI1_S_WAIT_ACK) { - qp->s_flags &= ~HFI1_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; hfi1_schedule_send(qp); } @@ -1613,7 +1613,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp) { if (list_empty(&qp->rspwait)) { - qp->r_flags |= HFI1_R_RSP_DEFERED_ACK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1627,7 +1627,7 @@ static inline void rc_cancel_ack(struct rvt_qp *qp) if (list_empty(&qp->rspwait)) return; list_del_init(&qp->rspwait); - qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK; + qp->r_flags &= ~RVT_R_RSP_NAK; if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } @@ -1813,7 +1813,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, break; } qp->s_ack_state = OP(ACKNOWLEDGE); - qp->s_flags |= HFI1_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qp->r_nak_state = 0; hfi1_schedule_send(qp); @@ -2057,7 +2057,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) break; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST)) + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) qp_comm_est(qp); /* OK, process the packet. */ @@ -2127,7 +2127,7 @@ send_last: hfi1_copy_sge(&qp->r_sge, data, tlen, 1); hfi1_put_ss(&qp->r_sge); qp->r_msn++; - if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) break; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -2264,7 +2264,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= HFI1_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2331,7 +2331,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= HFI1_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 98a4798a0ead..0b324b17bf09 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -208,7 +208,7 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) qp->r_wr_id = wqe->wr_id; ret = 1; - set_bit(HFI1_R_WRID_VALID, &qp->r_aflags); + set_bit(RVT_R_WRID_VALID, &qp->r_aflags); if (handler) { u32 n; @@ -382,11 +382,11 @@ static void ruc_loopback(struct rvt_qp *sqp) spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ - if ((sqp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT)) || + if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || !(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) goto unlock; - sqp->s_flags |= HFI1_S_BUSY; + sqp->s_flags |= RVT_S_BUSY; again: if (sqp->s_last == sqp->s_head) @@ -550,7 +550,7 @@ again: if (release) hfi1_put_ss(&qp->r_sge); - if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) @@ -595,7 +595,7 @@ rnr_nak: spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK)) goto clr_busy; - sqp->s_flags |= HFI1_S_WAIT_RNR; + sqp->s_flags |= RVT_S_WAIT_RNR; sqp->s_timer.function = hfi1_rc_rnr_retry; sqp->s_timer.expires = jiffies + usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]); @@ -625,7 +625,7 @@ serr: if (sqp->ibqp.qp_type == IB_QPT_RC) { int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR); - sqp->s_flags &= ~HFI1_S_BUSY; + sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; @@ -638,7 +638,7 @@ serr: goto done; } clr_busy: - sqp->s_flags &= ~HFI1_S_BUSY; + sqp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: @@ -694,9 +694,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, static inline void build_ahg(struct rvt_qp *qp, u32 npsn) { struct hfi1_qp_priv *priv = qp->priv; - if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) + if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR)) clear_ahg(qp); - if (!(qp->s_flags & HFI1_S_AHG_VALID)) { + if (!(qp->s_flags & RVT_S_AHG_VALID)) { /* first middle that needs copy */ if (qp->s_ahgidx < 0) qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); @@ -706,7 +706,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn) /* save to protect a change in another thread */ priv->s_hdr->sde = priv->s_sde; priv->s_hdr->ahgidx = qp->s_ahgidx; - qp->s_flags |= HFI1_S_AHG_VALID; + qp->s_flags |= RVT_S_AHG_VALID; } } else { /* subsequent middle after valid */ @@ -779,7 +779,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, if (middle) build_ahg(qp, bth2); else - qp->s_flags &= ~HFI1_S_AHG_VALID; + qp->s_flags &= ~RVT_S_AHG_VALID; priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); priv->s_hdr->ibh.lrh[2] = @@ -790,8 +790,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, bth0 |= extra_bytes << 20; ohdr->bth[0] = cpu_to_be32(bth0); bth1 = qp->remote_qpn; - if (qp->s_flags & HFI1_S_ECN) { - qp->s_flags &= ~HFI1_S_ECN; + if (qp->s_flags & RVT_S_ECN) { + qp->s_flags &= ~RVT_S_ECN; /* we recently received a FECN, so return a BECN */ bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT); } @@ -847,7 +847,7 @@ void hfi1_do_send(struct work_struct *work) return; } - qp->s_flags |= HFI1_S_BUSY; + qp->s_flags |= RVT_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); @@ -897,7 +897,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); /* See ch. 11.2.4.1 and 10.7.3.1 */ - if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED) || status != IB_WC_SUCCESS) { struct ib_wc wc; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index cac3724e39d5..0935182d4ac9 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -84,7 +84,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { - qp->s_flags |= HFI1_S_WAIT_DMA; + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } clear_ahg(qp); @@ -241,7 +241,7 @@ done: goto unlock; bail: - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; @@ -332,7 +332,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) inv: if (qp->r_state == OP(SEND_FIRST) || qp->r_state == OP(SEND_MIDDLE)) { - set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); + set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; } else hfi1_put_ss(&qp->r_sge); @@ -382,7 +382,7 @@ inv: goto inv; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST)) + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) qp_comm_est(qp); /* OK, process the packet. */ @@ -391,7 +391,7 @@ inv: case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): send_first: - if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) qp->r_sge = qp->s_rdma_read_sge; else { ret = hfi1_get_rwqe(qp, 0); @@ -536,7 +536,7 @@ rdma_last_imm: tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; - if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) hfi1_put_ss(&qp->s_rdma_read_sge); else { ret = hfi1_get_rwqe(qp, 1); @@ -576,7 +576,7 @@ rdma_last: return; rewind: - set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); + set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; drop: ibp->rvp.n_pkt_drops++; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index e058fd24c60f..a0e62229d7a1 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -161,8 +161,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & HFI1_R_REUSE_SGE) - qp->r_flags &= ~HFI1_R_REUSE_SGE; + if (qp->r_flags & RVT_R_REUSE_SGE) + qp->r_flags &= ~RVT_R_REUSE_SGE; else { int ret; @@ -179,7 +179,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { - qp->r_flags |= HFI1_R_REUSE_SGE; + qp->r_flags |= RVT_R_REUSE_SGE; ibp->rvp.n_pkt_drops++; goto bail_unlock; } @@ -223,7 +223,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) length -= len; } hfi1_put_ss(&qp->r_sge); - if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto bail_unlock; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -290,7 +290,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { - qp->s_flags |= HFI1_S_WAIT_DMA; + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } wqe = get_swqe_ptr(qp, qp->s_last); @@ -324,7 +324,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) * zero length descriptor so we get a callback. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { - qp->s_flags |= HFI1_S_WAIT_DMA; + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } qp->s_cur = next_cur; @@ -426,7 +426,7 @@ done: goto unlock; bail: - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; @@ -812,8 +812,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & HFI1_R_REUSE_SGE) - qp->r_flags &= ~HFI1_R_REUSE_SGE; + if (qp->r_flags & RVT_R_REUSE_SGE) + qp->r_flags &= ~RVT_R_REUSE_SGE; else { int ret; @@ -830,7 +830,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { - qp->r_flags |= HFI1_R_REUSE_SGE; + qp->r_flags |= RVT_R_REUSE_SGE; goto drop; } if (has_grh) { @@ -841,7 +841,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); hfi1_put_ss(&qp->r_sge); - if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) return; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 301716aba7fa..a1e9f0b2bf05 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -702,7 +702,7 @@ static void mem_timer(unsigned long data) write_sequnlock_irqrestore(&dev->iowait_lock, flags); if (qp) - hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM); + hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM); } void update_sge(struct rvt_sge_state *ss, u32 length) @@ -740,12 +740,12 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK && list_empty(&priv->s_iowait.list)) { dev->n_txwait++; - qp->s_flags |= HFI1_S_WAIT_TX; + qp->s_flags |= RVT_S_WAIT_TX; list_add_tail(&priv->s_iowait.list, &dev->txwait); - trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX); + trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX); atomic_inc(&qp->refcount); } - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; write_sequnlock(&dev->iowait_lock); spin_unlock_irqrestore(&qp->s_lock, flags); tx = ERR_PTR(-EBUSY); @@ -803,7 +803,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx) list_del_init(&priv->s_iowait.list); /* refcount held until actual wake up */ write_sequnlock_irqrestore(&dev->iowait_lock, flags); - hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX); + hfi1_qp_wakeup(qp, RVT_S_WAIT_TX); break; } } while (read_seqretry(&dev->iowait_lock, seq)); @@ -838,8 +838,8 @@ static void verbs_sdma_complete( * do the flush work until that QP's * sdma work has finished. */ - if (qp->s_flags & HFI1_S_WAIT_DMA) { - qp->s_flags &= ~HFI1_S_WAIT_DMA; + if (qp->s_flags & RVT_S_WAIT_DMA) { + qp->s_flags &= ~RVT_S_WAIT_DMA; hfi1_schedule_send(qp); } } @@ -860,13 +860,13 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) if (list_empty(&priv->s_iowait.list)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); - qp->s_flags |= HFI1_S_WAIT_KMEM; + qp->s_flags |= RVT_S_WAIT_KMEM; list_add_tail(&priv->s_iowait.list, &dev->memwait); - trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM); + trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM); atomic_inc(&qp->refcount); } write_sequnlock(&dev->iowait_lock); - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1092,17 +1092,17 @@ static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) int was_empty; dev->n_piowait++; - qp->s_flags |= HFI1_S_WAIT_PIO; + qp->s_flags |= RVT_S_WAIT_PIO; was_empty = list_empty(&sc->piowait); list_add_tail(&priv->s_iowait.list, &sc->piowait); - trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO); + trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO); atomic_inc(&qp->refcount); /* counting: only call wantpiobuf_intr if first user */ if (was_empty) hfi1_sc_wantpiobuf_intr(sc, 1); } write_sequnlock(&dev->iowait_lock); - qp->s_flags &= ~HFI1_S_BUSY; + qp->s_flags &= ~RVT_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1307,7 +1307,7 @@ bad: * @ps: the state of the packet to send * * Return zero if packet is sent or queued OK. - * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise. + * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise. */ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index eb1297825225..b9843a5ef0d2 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -63,6 +63,7 @@ #include #include #include +#include struct hfi1_ctxtdata; struct hfi1_pportdata; @@ -286,84 +287,6 @@ struct hfi1_pkt_state { struct hfi1_pportdata *ppd; }; -/* - * Atomic bit definitions for r_aflags. - */ -#define HFI1_R_WRID_VALID 0 -#define HFI1_R_REWIND_SGE 1 - -/* - * Bit definitions for r_flags. - */ -#define HFI1_R_REUSE_SGE 0x01 -#define HFI1_R_RDMAR_SEQ 0x02 -/* defer ack until end of interrupt session */ -#define HFI1_R_RSP_DEFERED_ACK 0x04 -/* relay ack to send engine */ -#define HFI1_R_RSP_SEND 0x08 -#define HFI1_R_COMM_EST 0x10 - -/* - * Bit definitions for s_flags. - * - * HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled - * HFI1_S_BUSY - send tasklet is processing the QP - * HFI1_S_TIMER - the RC retry timer is active - * HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics - * HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs - * before processing the next SWQE - * HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete - * before processing the next SWQE - * HFI1_S_WAIT_RNR - waiting for RNR timeout - * HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE - * HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating - * next send completion entry not via send DMA - * HFI1_S_WAIT_PIO - waiting for a send buffer to be available - * HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available - * HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available - * HFI1_S_WAIT_KMEM - waiting for kernel memory to be available - * HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue - * HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests - * HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK - * HFI1_S_ECN - a BECN was queued to the send engine - */ -#define HFI1_S_SIGNAL_REQ_WR 0x0001 -#define HFI1_S_BUSY 0x0002 -#define HFI1_S_TIMER 0x0004 -#define HFI1_S_RESP_PENDING 0x0008 -#define HFI1_S_ACK_PENDING 0x0010 -#define HFI1_S_WAIT_FENCE 0x0020 -#define HFI1_S_WAIT_RDMAR 0x0040 -#define HFI1_S_WAIT_RNR 0x0080 -#define HFI1_S_WAIT_SSN_CREDIT 0x0100 -#define HFI1_S_WAIT_DMA 0x0200 -#define HFI1_S_WAIT_PIO 0x0400 -#define HFI1_S_WAIT_TX 0x0800 -#define HFI1_S_WAIT_DMA_DESC 0x1000 -#define HFI1_S_WAIT_KMEM 0x2000 -#define HFI1_S_WAIT_PSN 0x4000 -#define HFI1_S_WAIT_ACK 0x8000 -#define HFI1_S_SEND_ONE 0x10000 -#define HFI1_S_UNLIMITED_CREDIT 0x20000 -#define HFI1_S_AHG_VALID 0x40000 -#define HFI1_S_AHG_CLEAR 0x80000 -#define HFI1_S_ECN 0x100000 - -/* - * Wait flags that would prevent any packet type from being sent. - */ -#define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \ - HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM) - -/* - * Wait flags that would prevent send work requests from making progress. - */ -#define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \ - HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \ - HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK) - -#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND) - #define HFI1_PSN_CREDIT 16 /* @@ -507,9 +430,9 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait) */ static inline int hfi1_send_ok(struct rvt_qp *qp) { - return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) && - (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) || - !(qp->s_flags & HFI1_S_ANY_WAIT_SEND)); + return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) && + (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) || + !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); } /* -- cgit v1.2.3-59-g8ed1b From 1c4b7d971d6679277844cefc0f5c191c800bf955 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:06 -0800 Subject: staging/rdma/hfi1: Remove qpdev and qpn table from hfi1 Another change on the way to removing queue pair functionality from hfi1. This patch removes the private queue pair structure and the table which holds the queue pair numbers in favor of using what is provided by rdmavt. Reviewed-by: Ira Weiny Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 147 +++++++++++++++++++------------------- drivers/staging/rdma/hfi1/qp.h | 38 ++-------- drivers/staging/rdma/hfi1/verbs.h | 3 - 3 files changed, 78 insertions(+), 110 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index d5620babd36a..1bf8083fcef2 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -60,9 +60,6 @@ #include "trace.h" #include "sdma.h" -#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) -#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) - static unsigned int hfi1_qp_table_size = 256; module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); @@ -75,10 +72,10 @@ static int iowait_sleep( unsigned seq); static void iowait_wakeup(struct iowait *wait, int reason); -static inline unsigned mk_qpn(struct hfi1_qpn_table *qpt, - struct qpn_map *map, unsigned off) +static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, + struct rvt_qpn_map *map, unsigned off) { - return (map - qpt->map) * BITS_PER_PAGE + off; + return (map - qpt->map) * RVT_BITS_PER_PAGE + off; } /* @@ -118,7 +115,7 @@ static const u16 credit_table[31] = { 32768 /* 1E */ }; -static void get_map_page(struct hfi1_qpn_table *qpt, struct qpn_map *map) +static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) { unsigned long page = get_zeroed_page(GFP_KERNEL); @@ -138,11 +135,11 @@ static void get_map_page(struct hfi1_qpn_table *qpt, struct qpn_map *map) * Allocate the next available QPN or * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. */ -static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt, +static int alloc_qpn(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt, enum ib_qp_type type, u8 port) { u32 i, offset, max_scan, qpn; - struct qpn_map *map; + struct rvt_qpn_map *map; u32 ret; if (type == IB_QPT_SMI || type == IB_QPT_GSI) { @@ -160,11 +157,11 @@ static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt, } qpn = qpt->last + qpt->incr; - if (qpn >= QPN_MAX) + if (qpn >= RVT_QPN_MAX) qpn = qpt->incr | ((qpt->last & 1) ^ 1); /* offset carries bit 0 */ - offset = qpn & BITS_PER_PAGE_MASK; - map = &qpt->map[qpn / BITS_PER_PAGE]; + offset = qpn & RVT_BITS_PER_PAGE_MASK; + map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { @@ -180,18 +177,19 @@ static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt, } offset += qpt->incr; /* - * This qpn might be bogus if offset >= BITS_PER_PAGE. - * That is OK. It gets re-assigned below + * This qpn might be bogus if offset >= + * RVT_BITS_PER_PAGE. That is OK. It gets re-assigned + * below */ qpn = mk_qpn(qpt, map, offset); - } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); + } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { - if (qpt->nmaps == QPNMAP_ENTRIES) + if (qpt->nmaps == RVT_QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; /* start at incr with current bit 0 */ @@ -216,13 +214,13 @@ bail: return ret; } -static void free_qpn(struct hfi1_qpn_table *qpt, u32 qpn) +static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) { - struct qpn_map *map; + struct rvt_qpn_map *map; - map = qpt->map + qpn / BITS_PER_PAGE; + map = qpt->map + qpn / RVT_BITS_PER_PAGE; if (map->page) - clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); + clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); } /* @@ -235,19 +233,19 @@ static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) unsigned long flags; atomic_inc(&qp->refcount); - spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); + spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); if (qp->ibqp.qp_num <= 1) { rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp); } else { - u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); + u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); - qp->next = dev->qp_dev->qp_table[n]; - rcu_assign_pointer(dev->qp_dev->qp_table[n], qp); + qp->next = dev->rdi.qp_dev->qp_table[n]; + rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp); trace_hfi1_qpinsert(qp, n); } - spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags); + spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); } /* @@ -257,40 +255,40 @@ static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); + u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); unsigned long flags; int removed = 1; - spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); + spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); if (rcu_dereference_protected(ibp->rvp.qp[0], lockdep_is_held( - &dev->qp_dev->qpt_lock)) == qp) { + &dev->rdi.qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); } else if (rcu_dereference_protected(ibp->rvp.qp[1], - lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { + lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } else { struct rvt_qp *q; struct rvt_qp __rcu **qpp; removed = 0; - qpp = &dev->qp_dev->qp_table[n]; + qpp = &dev->rdi.qp_dev->qp_table[n]; for (; (q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->qp_dev->qpt_lock))) + lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))) != NULL; qpp = &q->next) if (q == qp) { RCU_INIT_POINTER(*qpp, rcu_dereference_protected(qp->next, - lockdep_is_held(&dev->qp_dev->qpt_lock))); + lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))); removed = 1; trace_hfi1_qpremove(qp, n); break; } } - spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags); + spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); if (removed) { synchronize_rcu(); if (atomic_dec_and_test(&qp->refcount)) @@ -311,6 +309,7 @@ static unsigned free_all_qps(struct hfi1_devdata *dd) unsigned long flags; struct rvt_qp *qp; unsigned n, qp_inuse = 0; + spinlock_t *l; /* useless pointer to shutup checkpatch */ for (n = 0; n < dd->num_pports; n++) { struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; @@ -325,19 +324,20 @@ static unsigned free_all_qps(struct hfi1_devdata *dd) rcu_read_unlock(); } - if (!dev->qp_dev) + if (!dev->rdi.qp_dev) goto bail; - spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); - for (n = 0; n < dev->qp_dev->qp_table_size; n++) { - qp = rcu_dereference_protected(dev->qp_dev->qp_table[n], - lockdep_is_held(&dev->qp_dev->qpt_lock)); - RCU_INIT_POINTER(dev->qp_dev->qp_table[n], NULL); + spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); + for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { + l = &dev->rdi.qp_dev->qpt_lock; + qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n], + lockdep_is_held(l)); + RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL); for (; qp; qp = rcu_dereference_protected(qp->next, - lockdep_is_held(&dev->qp_dev->qpt_lock))) + lockdep_is_held(l))) qp_inuse++; } - spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags); + spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); synchronize_rcu(); bail: return qp_inuse; @@ -1157,7 +1157,8 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, qp->s_flags = RVT_S_SIGNAL_REQ_WR; dev = to_idev(ibpd->device); dd = dd_from_dev(dev); - err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type, + err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table, + init_attr->qp_type, init_attr->port_num); if (err < 0) { ret = ERR_PTR(err); @@ -1259,7 +1260,7 @@ bail_ip: kref_put(&qp->ip->ref, rvt_release_mmap_info); else vfree(qp->r_rq.wq); - free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); + free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); bail_qp: kfree(priv->s_hdr); kfree(priv); @@ -1310,7 +1311,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) spin_unlock_irq(&qp->r_lock); /* all user's cleaned up, mark it available */ - free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); + free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); spin_lock(&dev->n_qps_lock); dev->n_qps_allocated--; spin_unlock(&dev->n_qps_lock); @@ -1330,10 +1331,10 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) * init_qpn_table - initialize the QP number table for a device * @qpt: the QPN table */ -static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt) +static int init_qpn_table(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt) { u32 offset, qpn, i; - struct qpn_map *map; + struct rvt_qpn_map *map; int ret = 0; spin_lock_init(&qpt->lock); @@ -1343,9 +1344,9 @@ static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt) /* insure we don't assign QPs from KDETH 64K window */ qpn = kdeth_qp << 16; - qpt->nmaps = qpn / BITS_PER_PAGE; + qpt->nmaps = qpn / RVT_BITS_PER_PAGE; /* This should always be zero */ - offset = qpn & BITS_PER_PAGE_MASK; + offset = qpn & RVT_BITS_PER_PAGE_MASK; map = &qpt->map[qpt->nmaps]; dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n", qpn, qpn + 65535); @@ -1359,7 +1360,7 @@ static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt) } set_bit(offset, map->page); offset++; - if (offset == BITS_PER_PAGE) { + if (offset == RVT_BITS_PER_PAGE) { /* next page */ qpt->nmaps++; map++; @@ -1373,7 +1374,7 @@ static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt) * free_qpn_table - free the QP number table for a device * @qpt: the QPN table */ -static void free_qpn_table(struct hfi1_qpn_table *qpt) +static void free_qpn_table(struct rvt_qpn_table *qpt) { int i; @@ -1505,31 +1506,31 @@ int hfi1_qp_init(struct hfi1_ibdev *dev) int ret = -ENOMEM; /* allocate parent object */ - dev->qp_dev = kzalloc(sizeof(*dev->qp_dev), GFP_KERNEL); - if (!dev->qp_dev) + dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL); + if (!dev->rdi.qp_dev) goto nomem; /* allocate hash table */ - dev->qp_dev->qp_table_size = hfi1_qp_table_size; - dev->qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size); - dev->qp_dev->qp_table = - kmalloc(dev->qp_dev->qp_table_size * - sizeof(*dev->qp_dev->qp_table), + dev->rdi.qp_dev->qp_table_size = hfi1_qp_table_size; + dev->rdi.qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size); + dev->rdi.qp_dev->qp_table = + kmalloc(dev->rdi.qp_dev->qp_table_size * + sizeof(*dev->rdi.qp_dev->qp_table), GFP_KERNEL); - if (!dev->qp_dev->qp_table) + if (!dev->rdi.qp_dev->qp_table) goto nomem; - for (i = 0; i < dev->qp_dev->qp_table_size; i++) - RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL); - spin_lock_init(&dev->qp_dev->qpt_lock); + for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++) + RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL); + spin_lock_init(&dev->rdi.qp_dev->qpt_lock); /* initialize qpn map */ - ret = init_qpn_table(dd, &dev->qp_dev->qpn_table); + ret = init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table); if (ret) goto nomem; return ret; nomem: - if (dev->qp_dev) { - kfree(dev->qp_dev->qp_table); - free_qpn_table(&dev->qp_dev->qpn_table); - kfree(dev->qp_dev); + if (dev->rdi.qp_dev) { + kfree(dev->rdi.qp_dev->qp_table); + free_qpn_table(&dev->rdi.qp_dev->qpn_table); + kfree(dev->rdi.qp_dev); } return ret; } @@ -1543,10 +1544,10 @@ void hfi1_qp_exit(struct hfi1_ibdev *dev) if (qps_inuse) dd_dev_err(dd, "QP memory leak! %u still in use\n", qps_inuse); - if (dev->qp_dev) { - kfree(dev->qp_dev->qp_table); - free_qpn_table(&dev->qp_dev->qpn_table); - kfree(dev->qp_dev); + if (dev->rdi.qp_dev) { + kfree(dev->rdi.qp_dev->qp_table); + free_qpn_table(&dev->rdi.qp_dev->qpn_table); + kfree(dev->rdi.qp_dev); } } @@ -1619,11 +1620,11 @@ int qp_iter_next(struct qp_iter *iter) * * n = 0..iter->specials is the special qp indices * - * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are + * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are * the potential hash bucket entries * */ - for (; n < dev->qp_dev->qp_table_size + iter->specials; n++) { + for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { if (pqp) { qp = rcu_dereference(pqp->next); } else { @@ -1642,7 +1643,7 @@ int qp_iter_next(struct qp_iter *iter) qp = rcu_dereference(ibp->rvp.qp[1]); } else { qp = rcu_dereference( - dev->qp_dev->qp_table[ + dev->rdi.qp_dev->qp_table[ (n - iter->specials)]); } } diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 9efa4bc634e7..18b0f0ed6ee3 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -51,41 +51,11 @@ */ #include +#include #include "verbs.h" #include "sdma.h" -#define QPN_MAX BIT(24) -#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) - -/* - * QPN-map pages start out as NULL, they get allocated upon - * first use and are never deallocated. This way, - * large bitmaps are not allocated unless large numbers of QPs are used. - */ -struct qpn_map { - void *page; -}; - -struct hfi1_qpn_table { - spinlock_t lock; /* protect changes in this struct */ - unsigned flags; /* flags for QP0/1 allocated for each port */ - u32 last; /* last QP number allocated */ - u32 nmaps; /* size of the map table */ - u16 limit; - u8 incr; - /* bit map of free QP numbers other than 0/1 */ - struct qpn_map map[QPNMAP_ENTRIES]; -}; - -struct hfi1_qp_ibdev { - u32 qp_table_size; - u32 qp_table_bits; - struct rvt_qp __rcu **qp_table; - spinlock_t qpt_lock; - struct hfi1_qpn_table qpn_table; -}; - -static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn) +static inline u32 qpn_hash(struct rvt_qp_ibdev *dev, u32 qpn) { return hash_32(qpn, dev->qp_table_bits); } @@ -107,9 +77,9 @@ static inline struct rvt_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, qp = rcu_dereference(ibp->rvp.qp[qpn]); } else { struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; - u32 n = qpn_hash(dev->qp_dev, qpn); + u32 n = qpn_hash(dev->rdi.qp_dev, qpn); - for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp; + for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; qp = rcu_dereference(qp->next)) if (qp->ibqp.qp_num == qpn) break; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index b9843a5ef0d2..c22f0d13ad7f 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -346,12 +346,9 @@ struct hfi1_ibport { u8 sc_to_sl[32]; }; -struct hfi1_qp_ibdev; struct hfi1_ibdev { struct rvt_dev_info rdi; /* Must be first */ - struct hfi1_qp_ibdev *qp_dev; - /* QP numbers are shared by all IB ports */ /* protect wait lists */ seqlock_t iowait_lock; -- cgit v1.2.3-59-g8ed1b From a2c2d608957c1b6f444e092fa7f49c1f1ac7fa0a Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:12 -0800 Subject: staging/rdma/hfi1: Remove create_qp functionality Rely on rdmavt to provide queue pair creation. Reviewed-by: Ira Weiny Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 634 +++++--------------------------------- drivers/staging/rdma/hfi1/qp.h | 23 +- drivers/staging/rdma/hfi1/verbs.c | 31 +- 3 files changed, 102 insertions(+), 586 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 1bf8083fcef2..a336d2a40d58 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -60,7 +60,7 @@ #include "trace.h" #include "sdma.h" -static unsigned int hfi1_qp_table_size = 256; +unsigned int hfi1_qp_table_size = 256; module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); @@ -115,105 +115,6 @@ static const u16 credit_table[31] = { 32768 /* 1E */ }; -static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) -{ - unsigned long page = get_zeroed_page(GFP_KERNEL); - - /* - * Free the page if someone raced with us installing it. - */ - - spin_lock(&qpt->lock); - if (map->page) - free_page(page); - else - map->page = (void *)page; - spin_unlock(&qpt->lock); -} - -/* - * Allocate the next available QPN or - * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. - */ -static int alloc_qpn(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port) -{ - u32 i, offset, max_scan, qpn; - struct rvt_qpn_map *map; - u32 ret; - - if (type == IB_QPT_SMI || type == IB_QPT_GSI) { - unsigned n; - - ret = type == IB_QPT_GSI; - n = 1 << (ret + 2 * (port - 1)); - spin_lock(&qpt->lock); - if (qpt->flags & n) - ret = -EINVAL; - else - qpt->flags |= n; - spin_unlock(&qpt->lock); - goto bail; - } - - qpn = qpt->last + qpt->incr; - if (qpn >= RVT_QPN_MAX) - qpn = qpt->incr | ((qpt->last & 1) ^ 1); - /* offset carries bit 0 */ - offset = qpn & RVT_BITS_PER_PAGE_MASK; - map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; - max_scan = qpt->nmaps - !offset; - for (i = 0;;) { - if (unlikely(!map->page)) { - get_map_page(qpt, map); - if (unlikely(!map->page)) - break; - } - do { - if (!test_and_set_bit(offset, map->page)) { - qpt->last = qpn; - ret = qpn; - goto bail; - } - offset += qpt->incr; - /* - * This qpn might be bogus if offset >= - * RVT_BITS_PER_PAGE. That is OK. It gets re-assigned - * below - */ - qpn = mk_qpn(qpt, map, offset); - } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); - /* - * In order to keep the number of pages allocated to a - * minimum, we scan the all existing pages before increasing - * the size of the bitmap table. - */ - if (++i > max_scan) { - if (qpt->nmaps == RVT_QPNMAP_ENTRIES) - break; - map = &qpt->map[qpt->nmaps++]; - /* start at incr with current bit 0 */ - offset = qpt->incr | (offset & 1); - } else if (map < &qpt->map[qpt->nmaps]) { - ++map; - /* start at incr with current bit 0 */ - offset = qpt->incr | (offset & 1); - } else { - map = &qpt->map[0]; - /* wrap to first map page, invert bit 0 */ - offset = qpt->incr | ((offset & 1) ^ 1); - } - /* there can be no bits at shift and below */ - WARN_ON(offset & (dd->qos_shift - 1)); - qpn = mk_qpn(qpt, map, offset); - } - - ret = -ENOMEM; - -bail: - return ret; -} - static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) { struct rvt_qpn_map *map; @@ -296,113 +197,6 @@ static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) } } -/** - * free_all_qps - check for QPs still in use - * @qpt: the QP table to empty - * - * There should not be any QPs still in use. - * Free memory for table. - */ -static unsigned free_all_qps(struct hfi1_devdata *dd) -{ - struct hfi1_ibdev *dev = &dd->verbs_dev; - unsigned long flags; - struct rvt_qp *qp; - unsigned n, qp_inuse = 0; - spinlock_t *l; /* useless pointer to shutup checkpatch */ - - for (n = 0; n < dd->num_pports; n++) { - struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; - - if (!hfi1_mcast_tree_empty(ibp)) - qp_inuse++; - rcu_read_lock(); - if (rcu_dereference(ibp->rvp.qp[0])) - qp_inuse++; - if (rcu_dereference(ibp->rvp.qp[1])) - qp_inuse++; - rcu_read_unlock(); - } - - if (!dev->rdi.qp_dev) - goto bail; - spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); - for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { - l = &dev->rdi.qp_dev->qpt_lock; - qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n], - lockdep_is_held(l)); - RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL); - - for (; qp; qp = rcu_dereference_protected(qp->next, - lockdep_is_held(l))) - qp_inuse++; - } - spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); - synchronize_rcu(); -bail: - return qp_inuse; -} - -/** - * reset_qp - initialize the QP state to the reset state - * @qp: the QP to reset - * @type: the QP type - */ -static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type) -{ - struct hfi1_qp_priv *priv = qp->priv; - qp->remote_qpn = 0; - qp->qkey = 0; - qp->qp_access_flags = 0; - iowait_init( - &priv->s_iowait, - 1, - hfi1_do_send, - iowait_sleep, - iowait_wakeup); - qp->s_flags &= RVT_S_SIGNAL_REQ_WR; - qp->s_hdrwords = 0; - qp->s_wqe = NULL; - qp->s_draining = 0; - qp->s_next_psn = 0; - qp->s_last_psn = 0; - qp->s_sending_psn = 0; - qp->s_sending_hpsn = 0; - qp->s_psn = 0; - qp->r_psn = 0; - qp->r_msn = 0; - if (type == IB_QPT_RC) { - qp->s_state = IB_OPCODE_RC_SEND_LAST; - qp->r_state = IB_OPCODE_RC_SEND_LAST; - } else { - qp->s_state = IB_OPCODE_UC_SEND_LAST; - qp->r_state = IB_OPCODE_UC_SEND_LAST; - } - qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; - qp->r_nak_state = 0; - priv->r_adefered = 0; - qp->r_aflags = 0; - qp->r_flags = 0; - qp->s_head = 0; - qp->s_tail = 0; - qp->s_cur = 0; - qp->s_acked = 0; - qp->s_last = 0; - qp->s_ssn = 1; - qp->s_lsn = 0; - clear_ahg(qp); - qp->s_mig_state = IB_MIG_MIGRATED; - memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); - qp->r_head_ack_queue = 0; - qp->s_tail_ack_queue = 0; - qp->s_num_rd_atomic = 0; - if (qp->r_rq.wq) { - qp->r_rq.wq->head = 0; - qp->r_rq.wq->tail = 0; - } - qp->r_sge.num_sge = 0; -} - static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) { unsigned n; @@ -756,7 +550,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, spin_lock(&qp->s_lock); clear_mr_refs(qp, 1); clear_ahg(qp); - reset_qp(qp, ibqp->qp_type); + rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type); } break; @@ -1024,254 +818,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) return cpu_to_be32(aeth); } -/** - * hfi1_create_qp - create a queue pair for a device - * @ibpd: the protection domain who's device we create the queue pair for - * @init_attr: the attributes of the queue pair - * @udata: user data for libibverbs.so - * - * Returns the queue pair on success, otherwise returns an errno. - * - * Called by the ib_create_qp() core verbs function. - */ -struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) -{ - struct rvt_qp *qp; - struct hfi1_qp_priv *priv; - int err; - struct rvt_swqe *swq = NULL; - struct hfi1_ibdev *dev; - struct hfi1_devdata *dd; - size_t sz; - size_t sg_list_sz; - struct ib_qp *ret; - - if (init_attr->cap.max_send_sge > hfi1_max_sges || - init_attr->cap.max_send_wr > hfi1_max_qp_wrs || - init_attr->create_flags) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - - /* Check receive queue parameters if no SRQ is specified. */ - if (!init_attr->srq) { - if (init_attr->cap.max_recv_sge > hfi1_max_sges || - init_attr->cap.max_recv_wr > hfi1_max_qp_wrs) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - if (init_attr->cap.max_send_sge + - init_attr->cap.max_send_wr + - init_attr->cap.max_recv_sge + - init_attr->cap.max_recv_wr == 0) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - } - - switch (init_attr->qp_type) { - case IB_QPT_SMI: - case IB_QPT_GSI: - if (init_attr->port_num == 0 || - init_attr->port_num > ibpd->device->phys_port_cnt) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - case IB_QPT_UC: - case IB_QPT_RC: - case IB_QPT_UD: - sz = sizeof(struct rvt_sge) * - init_attr->cap.max_send_sge + - sizeof(struct rvt_swqe); - swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); - if (swq == NULL) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - sz = sizeof(*qp); - sg_list_sz = 0; - if (init_attr->srq) { - struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); - - if (srq->rq.max_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (srq->rq.max_sge - 1); - } else if (init_attr->cap.max_recv_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (init_attr->cap.max_recv_sge - 1); - qp = kzalloc(sz + sg_list_sz, GFP_KERNEL); - if (!qp) { - ret = ERR_PTR(-ENOMEM); - goto bail_swq; - } - RCU_INIT_POINTER(qp->next, NULL); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp_priv; - } - priv->owner = qp; - priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); - if (!priv->s_hdr) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; - } - qp->priv = priv; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); - if (init_attr->srq) - sz = 0; - else { - qp->r_rq.size = init_attr->cap.max_recv_wr + 1; - qp->r_rq.max_sge = init_attr->cap.max_recv_sge; - sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + - sizeof(struct rvt_rwqe); - qp->r_rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + - qp->r_rq.size * sz); - if (!qp->r_rq.wq) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; - } - } - - /* - * ib_create_qp() will initialize qp->ibqp - * except for qp->ibqp.qp_num. - */ - spin_lock_init(&qp->r_lock); - spin_lock_init(&qp->s_lock); - spin_lock_init(&qp->r_rq.lock); - atomic_set(&qp->refcount, 0); - init_waitqueue_head(&qp->wait); - init_timer(&qp->s_timer); - qp->s_timer.data = (unsigned long)qp; - INIT_LIST_HEAD(&qp->rspwait); - qp->state = IB_QPS_RESET; - qp->s_wq = swq; - qp->s_size = init_attr->cap.max_send_wr + 1; - qp->s_max_sge = init_attr->cap.max_send_sge; - if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) - qp->s_flags = RVT_S_SIGNAL_REQ_WR; - dev = to_idev(ibpd->device); - dd = dd_from_dev(dev); - err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table, - init_attr->qp_type, - init_attr->port_num); - if (err < 0) { - ret = ERR_PTR(err); - vfree(qp->r_rq.wq); - goto bail_qp; - } - qp->ibqp.qp_num = err; - qp->port_num = init_attr->port_num; - reset_qp(qp, init_attr->qp_type); - - break; - - default: - /* Don't support raw QPs */ - ret = ERR_PTR(-ENOSYS); - goto bail; - } - - init_attr->cap.max_inline_data = 0; - - /* - * Return the address of the RWQ as the offset to mmap. - * See hfi1_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - if (!qp->r_rq.wq) { - __u64 offset = 0; - - err = ib_copy_to_udata(udata, &offset, - sizeof(offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else { - u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; - - qp->ip = rvt_create_mmap_info(&dev->rdi, s, - ibpd->uobject->context, - qp->r_rq.wq); - if (!qp->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - err = ib_copy_to_udata(udata, &(qp->ip->offset), - sizeof(qp->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } - } - - spin_lock(&dev->n_qps_lock); - if (dev->n_qps_allocated == hfi1_max_qps) { - spin_unlock(&dev->n_qps_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_qps_allocated++; - spin_unlock(&dev->n_qps_lock); - - if (qp->ip) { - spin_lock_irq(&dev->rdi.pending_lock); - list_add(&qp->ip->pending_mmaps, &dev->rdi.pending_mmaps); - spin_unlock_irq(&dev->rdi.pending_lock); - } - - ret = &qp->ibqp; - - /* - * We have our QP and its good, now keep track of what types of opcodes - * can be processed on this QP. We do this by keeping track of what the - * 3 high order bits of the opcode are. - */ - switch (init_attr->qp_type) { - case IB_QPT_SMI: - case IB_QPT_GSI: - case IB_QPT_UD: - qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & OPCODE_QP_MASK; - break; - case IB_QPT_RC: - qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & OPCODE_QP_MASK; - break; - case IB_QPT_UC: - qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & OPCODE_QP_MASK; - break; - default: - ret = ERR_PTR(-EINVAL); - goto bail_ip; - } - - goto bail; - -bail_ip: - if (qp->ip) - kref_put(&qp->ip->ref, rvt_release_mmap_info); - else - vfree(qp->r_rq.wq); - free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); -bail_qp: - kfree(priv->s_hdr); - kfree(priv); -bail_qp_priv: - kfree(qp); -bail_swq: - vfree(swq); -bail: - return ret; -} - /** * hfi1_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy @@ -1327,61 +873,6 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) return 0; } -/** - * init_qpn_table - initialize the QP number table for a device - * @qpt: the QPN table - */ -static int init_qpn_table(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt) -{ - u32 offset, qpn, i; - struct rvt_qpn_map *map; - int ret = 0; - - spin_lock_init(&qpt->lock); - - qpt->last = 0; - qpt->incr = 1 << dd->qos_shift; - - /* insure we don't assign QPs from KDETH 64K window */ - qpn = kdeth_qp << 16; - qpt->nmaps = qpn / RVT_BITS_PER_PAGE; - /* This should always be zero */ - offset = qpn & RVT_BITS_PER_PAGE_MASK; - map = &qpt->map[qpt->nmaps]; - dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n", - qpn, qpn + 65535); - for (i = 0; i < 65536; i++) { - if (!map->page) { - get_map_page(qpt, map); - if (!map->page) { - ret = -ENOMEM; - break; - } - } - set_bit(offset, map->page); - offset++; - if (offset == RVT_BITS_PER_PAGE) { - /* next page */ - qpt->nmaps++; - map++; - offset = 0; - } - } - return ret; -} - -/** - * free_qpn_table - free the QP number table for a device - * @qpt: the QPN table - */ -static void free_qpn_table(struct rvt_qpn_table *qpt) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(qpt->map); i++) - free_page((unsigned long) qpt->map[i].page); -} - /** * hfi1_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush @@ -1499,58 +990,6 @@ static void iowait_wakeup(struct iowait *wait, int reason) hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); } -int hfi1_qp_init(struct hfi1_ibdev *dev) -{ - struct hfi1_devdata *dd = dd_from_dev(dev); - int i; - int ret = -ENOMEM; - - /* allocate parent object */ - dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL); - if (!dev->rdi.qp_dev) - goto nomem; - /* allocate hash table */ - dev->rdi.qp_dev->qp_table_size = hfi1_qp_table_size; - dev->rdi.qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size); - dev->rdi.qp_dev->qp_table = - kmalloc(dev->rdi.qp_dev->qp_table_size * - sizeof(*dev->rdi.qp_dev->qp_table), - GFP_KERNEL); - if (!dev->rdi.qp_dev->qp_table) - goto nomem; - for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++) - RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL); - spin_lock_init(&dev->rdi.qp_dev->qpt_lock); - /* initialize qpn map */ - ret = init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table); - if (ret) - goto nomem; - return ret; -nomem: - if (dev->rdi.qp_dev) { - kfree(dev->rdi.qp_dev->qp_table); - free_qpn_table(&dev->rdi.qp_dev->qpn_table); - kfree(dev->rdi.qp_dev); - } - return ret; -} - -void hfi1_qp_exit(struct hfi1_ibdev *dev) -{ - struct hfi1_devdata *dd = dd_from_dev(dev); - u32 qps_inuse; - - qps_inuse = free_all_qps(dd); - if (qps_inuse) - dd_dev_err(dd, "QP memory leak! %u still in use\n", - qps_inuse); - if (dev->rdi.qp_dev) { - kfree(dev->rdi.qp_dev->qp_table); - free_qpn_table(&dev->rdi.qp_dev->qpn_table); - kfree(dev->rdi.qp_dev); - } -} - /** * * qp_to_sdma_engine - map a qp to a send engine @@ -1724,6 +1163,75 @@ void qp_comm_est(struct rvt_qp *qp) } } +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, + gfp_t gfp) +{ + struct hfi1_qp_priv *priv; + + priv = kzalloc(sizeof(*priv), gfp); + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->owner = qp; + + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + if (!priv->s_hdr) { + kfree(priv); + return ERR_PTR(-ENOMEM); + } + + return priv; +} + +void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + + kfree(priv->s_hdr); + kfree(priv); +} + +unsigned free_all_qps(struct rvt_dev_info *rdi) +{ + struct hfi1_ibdev *verbs_dev = container_of(rdi, + struct hfi1_ibdev, + rdi); + struct hfi1_devdata *dd = container_of(verbs_dev, + struct hfi1_devdata, + verbs_dev); + int n; + unsigned qp_inuse = 0; + + for (n = 0; n < dd->num_pports; n++) { + struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; + + if (!hfi1_mcast_tree_empty(ibp)) + qp_inuse++; + rcu_read_lock(); + if (rcu_dereference(ibp->rvp.qp[0])) + qp_inuse++; + if (rcu_dereference(ibp->rvp.qp[1])) + qp_inuse++; + rcu_read_unlock(); + } + + return qp_inuse; +} + +void notify_qp_reset(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + + iowait_init( + &priv->s_iowait, + 1, + hfi1_do_send, + iowait_sleep, + iowait_wakeup); + priv->r_adefered = 0; + clear_ahg(qp); +} + /* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 18b0f0ed6ee3..b825cb347ee1 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -55,6 +55,8 @@ #include "verbs.h" #include "sdma.h" +extern unsigned int hfi1_qp_table_size; + static inline u32 qpn_hash(struct rvt_qp_ibdev *dev, u32 qpn) { return hash_32(qpn, dev->qp_table_bits); @@ -169,18 +171,6 @@ int hfi1_destroy_qp(struct ib_qp *ibqp); */ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth); -/** - * hfi1_qp_init - allocate QP tables - * @dev: a pointer to the hfi1_ibdev - */ -int hfi1_qp_init(struct hfi1_ibdev *dev); - -/** - * hfi1_qp_exit - free the QP related structures - * @dev: a pointer to the hfi1_ibdev - */ -void hfi1_qp_exit(struct hfi1_ibdev *dev); - /** * hfi1_qp_wakeup - wake up on the indicated event * @qp: the QP @@ -255,4 +245,13 @@ static inline void hfi1_schedule_send(struct rvt_qp *qp) void hfi1_migrate_qp(struct rvt_qp *qp); +/* + * Functions provided by hfi1 driver for rdmavt to use + */ +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, + gfp_t gfp); +void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); +unsigned free_all_qps(struct rvt_dev_info *rdi); +void notify_qp_reset(struct rvt_qp *qp); + #endif /* _QP_H */ diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index a1e9f0b2bf05..3f02d0a013c4 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1720,11 +1720,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) u16 descq_cnt; char buf[TXREQ_NAME_LEN]; - ret = hfi1_qp_init(dev); - if (ret) - goto err_qp_init; - - for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); @@ -1820,7 +1815,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_srq = hfi1_modify_srq; ibdev->query_srq = hfi1_query_srq; ibdev->destroy_srq = hfi1_destroy_srq; - ibdev->create_qp = hfi1_create_qp; + ibdev->create_qp = NULL; ibdev->modify_qp = hfi1_modify_qp; ibdev->query_qp = hfi1_query_qp; ibdev->destroy_qp = hfi1_destroy_qp; @@ -1861,8 +1856,25 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; dd->verbs_dev.rdi.dparms.props.max_ah = hfi1_max_ahs; dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; - dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER | - RVT_FLAG_CQ_INIT_DRIVER); + dd->verbs_dev.rdi.dparms.props.max_sge = hfi1_max_sges; + + /* queue pair */ + dd->verbs_dev.rdi.dparms.props.max_qp = hfi1_max_qps; + dd->verbs_dev.rdi.dparms.props.max_qp_wr = hfi1_max_qp_wrs; + dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; + dd->verbs_dev.rdi.dparms.qpn_start = 0; + dd->verbs_dev.rdi.dparms.qpn_inc = 1; + dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; + dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16; + dd->verbs_dev.rdi.dparms.qpn_res_end = + dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; + dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; + dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; + dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; + dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; + + /* misc settings */ + dd->verbs_dev.rdi.flags = RVT_FLAG_CQ_INIT_DRIVER; dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; dd->verbs_dev.rdi.dparms.nports = dd->num_pports; dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); @@ -1895,8 +1907,6 @@ err_agents: err_reg: err_verbs_txreq: kmem_cache_destroy(dev->verbs_txreq_cache); - hfi1_qp_exit(dev); -err_qp_init: dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); bail: return ret; @@ -1917,7 +1927,6 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) if (!list_empty(&dev->memwait)) dd_dev_err(dd, "memwait list not empty!\n"); - hfi1_qp_exit(dev); del_timer_sync(&dev->mem_timer); kmem_cache_destroy(dev->verbs_txreq_cache); } -- cgit v1.2.3-59-g8ed1b From 94d5171cf2d10174e0ee9c3df463607cb0f4dd53 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Tue, 19 Jan 2016 14:43:17 -0800 Subject: staging/rdma/hfi1: Remove query_device function Removed hfi1 query_device function to use rdmavt rvt_query_device function The rvt dev info device attributes still need to be filled in by the driver Reviewed-by: Dennis Dalessandro Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 101 ++++++++++++++++++-------------------- 1 file changed, 47 insertions(+), 54 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 3f02d0a013c4..70af487c990f 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1362,55 +1362,49 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) return ret; } -static int query_device(struct ib_device *ibdev, - struct ib_device_attr *props, - struct ib_udata *uhw) +/** + * hfi1_fill_device_attr - Fill in rvt dev info device attributes. + * @dd: the device data structure + */ +static void hfi1_fill_device_attr(struct hfi1_devdata *dd) { - struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - struct hfi1_ibdev *dev = to_idev(ibdev); - - if (uhw->inlen || uhw->outlen) - return -EINVAL; - memset(props, 0, sizeof(*props)); - - props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | - IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | - IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; - - props->page_size_cap = PAGE_SIZE; - props->vendor_id = - dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; - props->vendor_part_id = dd->pcidev->device; - props->hw_ver = dd->minrev; - props->sys_image_guid = ib_hfi1_sys_image_guid; - props->max_mr_size = ~0ULL; - props->max_qp = hfi1_max_qps; - props->max_qp_wr = hfi1_max_qp_wrs; - props->max_sge = hfi1_max_sges; - props->max_sge_rd = hfi1_max_sges; - props->max_cq = hfi1_max_cqs; - props->max_ah = hfi1_max_ahs; - props->max_cqe = hfi1_max_cqes; - props->max_mr = dev->rdi.lkey_table.max; - props->max_fmr = dev->rdi.lkey_table.max; - props->max_map_per_fmr = 32767; - props->max_pd = dev->rdi.dparms.props.max_pd; - props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; - props->max_qp_init_rd_atom = 255; - /* props->max_res_rd_atom */ - props->max_srq = hfi1_max_srqs; - props->max_srq_wr = hfi1_max_srq_wrs; - props->max_srq_sge = hfi1_max_srq_sges; - /* props->local_ca_ack_delay */ - props->atomic_cap = IB_ATOMIC_GLOB; - props->max_pkeys = hfi1_get_npkeys(dd); - props->max_mcast_grp = hfi1_max_mcast_grps; - props->max_mcast_qp_attach = hfi1_max_mcast_qp_attached; - props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * - props->max_mcast_grp; - - return 0; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; + + memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); + + rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | + IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | + IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; + rdi->dparms.props.page_size_cap = PAGE_SIZE; + rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; + rdi->dparms.props.vendor_part_id = dd->pcidev->device; + rdi->dparms.props.hw_ver = dd->minrev; + rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid; + rdi->dparms.props.max_mr_size = ~0ULL; + rdi->dparms.props.max_qp = hfi1_max_qps; + rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs; + rdi->dparms.props.max_sge = hfi1_max_sges; + rdi->dparms.props.max_sge_rd = hfi1_max_sges; + rdi->dparms.props.max_cq = hfi1_max_cqs; + rdi->dparms.props.max_ah = hfi1_max_ahs; + rdi->dparms.props.max_cqe = hfi1_max_cqes; + rdi->dparms.props.max_mr = rdi->lkey_table.max; + rdi->dparms.props.max_fmr = rdi->lkey_table.max; + rdi->dparms.props.max_map_per_fmr = 32767; + rdi->dparms.props.max_pd = hfi1_max_pds; + rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; + rdi->dparms.props.max_qp_init_rd_atom = 255; + rdi->dparms.props.max_srq = hfi1_max_srqs; + rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs; + rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges; + rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; + rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd); + rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps; + rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached; + rdi->dparms.props.max_total_mcast_qp_attach = + rdi->dparms.props.max_mcast_qp_attach * + rdi->dparms.props.max_mcast_grp; } static inline u16 opa_speed_to_ib(u16 in) @@ -1797,7 +1791,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->phys_port_cnt = dd->num_pports; ibdev->num_comp_vectors = 1; ibdev->dma_device = &dd->pcidev->dev; - ibdev->query_device = query_device; + ibdev->query_device = NULL; ibdev->modify_device = modify_device; ibdev->query_port = query_port; ibdev->modify_port = modify_port; @@ -1854,13 +1848,12 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; - dd->verbs_dev.rdi.dparms.props.max_ah = hfi1_max_ahs; - dd->verbs_dev.rdi.dparms.props.max_pd = hfi1_max_pds; - dd->verbs_dev.rdi.dparms.props.max_sge = hfi1_max_sges; + /* + * Fill in rvt info device attributes. + */ + hfi1_fill_device_attr(dd); /* queue pair */ - dd->verbs_dev.rdi.dparms.props.max_qp = hfi1_max_qps; - dd->verbs_dev.rdi.dparms.props.max_qp_wr = hfi1_max_qp_wrs; dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size; dd->verbs_dev.rdi.dparms.qpn_start = 0; dd->verbs_dev.rdi.dparms.qpn_inc = 1; -- cgit v1.2.3-59-g8ed1b From abd712daeeb4461aee5ca5a2bfe2717dc22577ea Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:22 -0800 Subject: staging/rdma/hfi1: Remove CQ data structures and functions from hfi1 The completion queue is not a complex data structure and it can be removed at the same time as its functions. Unlike the more complicated queue pair which was done in multiple patches. This single patch removes all traces of hfi1 specific completeion queues from the hfi1 driver. Reviewed-by: Ira Weiny Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/cq.c | 558 ------------------------------------- drivers/staging/rdma/hfi1/hfi.h | 2 - drivers/staging/rdma/hfi1/init.c | 2 - drivers/staging/rdma/hfi1/qp.c | 4 +- drivers/staging/rdma/hfi1/rc.c | 8 +- drivers/staging/rdma/hfi1/ruc.c | 10 +- drivers/staging/rdma/hfi1/uc.c | 6 +- drivers/staging/rdma/hfi1/ud.c | 10 +- drivers/staging/rdma/hfi1/verbs.c | 21 +- drivers/staging/rdma/hfi1/verbs.h | 65 +---- 11 files changed, 34 insertions(+), 654 deletions(-) delete mode 100644 drivers/staging/rdma/hfi1/cq.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 55077f396cf9..7797f2c0f01f 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -7,7 +7,7 @@ # obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o -hfi1-y := chip.o cq.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ +hfi1-y := chip.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ init.o intr.o mad.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/staging/rdma/hfi1/cq.c deleted file mode 100644 index 25d1a2a25fee..000000000000 --- a/drivers/staging/rdma/hfi1/cq.c +++ /dev/null @@ -1,558 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include -#include - -#include "verbs.h" -#include "hfi.h" - -/** - * hfi1_cq_enter - add a new entry to the completion queue - * @cq: completion queue - * @entry: work completion entry to add - * @sig: true if @entry is a solicited entry - * - * This may be called with qp->s_lock held. - */ -void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited) -{ - struct hfi1_cq_wc *wc; - unsigned long flags; - u32 head; - u32 next; - - spin_lock_irqsave(&cq->lock, flags); - - /* - * Note that the head pointer might be writable by user processes. - * Take care to verify it is a sane value. - */ - wc = cq->queue; - head = wc->head; - if (head >= (unsigned) cq->ibcq.cqe) { - head = cq->ibcq.cqe; - next = 0; - } else - next = head + 1; - if (unlikely(next == wc->tail)) { - spin_unlock_irqrestore(&cq->lock, flags); - if (cq->ibcq.event_handler) { - struct ib_event ev; - - ev.device = cq->ibcq.device; - ev.element.cq = &cq->ibcq; - ev.event = IB_EVENT_CQ_ERR; - cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); - } - return; - } - if (cq->ip) { - wc->uqueue[head].wr_id = entry->wr_id; - wc->uqueue[head].status = entry->status; - wc->uqueue[head].opcode = entry->opcode; - wc->uqueue[head].vendor_err = entry->vendor_err; - wc->uqueue[head].byte_len = entry->byte_len; - wc->uqueue[head].ex.imm_data = - (__u32 __force)entry->ex.imm_data; - wc->uqueue[head].qp_num = entry->qp->qp_num; - wc->uqueue[head].src_qp = entry->src_qp; - wc->uqueue[head].wc_flags = entry->wc_flags; - wc->uqueue[head].pkey_index = entry->pkey_index; - wc->uqueue[head].slid = entry->slid; - wc->uqueue[head].sl = entry->sl; - wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; - wc->uqueue[head].port_num = entry->port_num; - /* Make sure entry is written before the head index. */ - smp_wmb(); - } else - wc->kqueue[head] = *entry; - wc->head = next; - - if (cq->notify == IB_CQ_NEXT_COMP || - (cq->notify == IB_CQ_SOLICITED && - (solicited || entry->status != IB_WC_SUCCESS))) { - struct kthread_worker *worker; - /* - * This will cause send_complete() to be called in - * another thread. - */ - smp_read_barrier_depends(); /* see hfi1_cq_exit */ - worker = cq->dd->worker; - if (likely(worker)) { - cq->notify = IB_CQ_NONE; - cq->triggered++; - queue_kthread_work(worker, &cq->comptask); - } - } - - spin_unlock_irqrestore(&cq->lock, flags); -} - -/** - * hfi1_poll_cq - poll for work completion entries - * @ibcq: the completion queue to poll - * @num_entries: the maximum number of entries to return - * @entry: pointer to array where work completions are placed - * - * Returns the number of completion entries polled. - * - * This may be called from interrupt context. Also called by ib_poll_cq() - * in the generic verbs code. - */ -int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) -{ - struct hfi1_cq *cq = to_icq(ibcq); - struct hfi1_cq_wc *wc; - unsigned long flags; - int npolled; - u32 tail; - - /* The kernel can only poll a kernel completion queue */ - if (cq->ip) { - npolled = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&cq->lock, flags); - - wc = cq->queue; - tail = wc->tail; - if (tail > (u32) cq->ibcq.cqe) - tail = (u32) cq->ibcq.cqe; - for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { - if (tail == wc->head) - break; - /* The kernel doesn't need a RMB since it has the lock. */ - *entry = wc->kqueue[tail]; - if (tail >= cq->ibcq.cqe) - tail = 0; - else - tail++; - } - wc->tail = tail; - - spin_unlock_irqrestore(&cq->lock, flags); - -bail: - return npolled; -} - -static void send_complete(struct kthread_work *work) -{ - struct hfi1_cq *cq = container_of(work, struct hfi1_cq, comptask); - - /* - * The completion handler will most likely rearm the notification - * and poll for all pending entries. If a new completion entry - * is added while we are in this routine, queue_work() - * won't call us again until we return so we check triggered to - * see if we need to call the handler again. - */ - for (;;) { - u8 triggered = cq->triggered; - - /* - * IPoIB connected mode assumes the callback is from a - * soft IRQ. We simulate this by blocking "bottom halves". - * See the implementation for ipoib_cm_handle_tx_wc(), - * netif_tx_lock_bh() and netif_tx_lock(). - */ - local_bh_disable(); - cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); - local_bh_enable(); - - if (cq->triggered == triggered) - return; - } -} - -/** - * hfi1_create_cq - create a completion queue - * @ibdev: the device this completion queue is attached to - * @attr: creation attributes - * @context: unused by the driver - * @udata: user data for libibverbs.so - * - * Returns a pointer to the completion queue or negative errno values - * for failure. - * - * Called by ib_create_cq() in the generic verbs code. - */ -struct ib_cq *hfi1_create_cq( - struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata) -{ - struct hfi1_ibdev *dev = to_idev(ibdev); - struct hfi1_cq *cq; - struct hfi1_cq_wc *wc; - struct ib_cq *ret; - u32 sz; - unsigned int entries = attr->cqe; - - if (attr->flags) - return ERR_PTR(-EINVAL); - - if (entries < 1 || entries > hfi1_max_cqes) - return ERR_PTR(-EINVAL); - - /* Allocate the completion queue structure. */ - cq = kmalloc(sizeof(*cq), GFP_KERNEL); - if (!cq) - return ERR_PTR(-ENOMEM); - - /* - * Allocate the completion queue entries and head/tail pointers. - * This is allocated separately so that it can be resized and - * also mapped into user space. - * We need to use vmalloc() in order to support mmap and large - * numbers of entries. - */ - sz = sizeof(*wc); - if (udata && udata->outlen >= sizeof(__u64)) - sz += sizeof(struct ib_uverbs_wc) * (entries + 1); - else - sz += sizeof(struct ib_wc) * (entries + 1); - wc = vmalloc_user(sz); - if (!wc) { - ret = ERR_PTR(-ENOMEM); - goto bail_cq; - } - - /* - * Return the address of the WC as the offset to mmap. - * See hfi1_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - int err; - - cq->ip = rvt_create_mmap_info(&dev->rdi, sz, context, wc); - if (!cq->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wc; - } - - err = ib_copy_to_udata(udata, &cq->ip->offset, - sizeof(cq->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else - cq->ip = NULL; - - spin_lock(&dev->n_cqs_lock); - if (dev->n_cqs_allocated == hfi1_max_cqs) { - spin_unlock(&dev->n_cqs_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_cqs_allocated++; - spin_unlock(&dev->n_cqs_lock); - - if (cq->ip) { - spin_lock_irq(&dev->rdi.pending_lock); - list_add(&cq->ip->pending_mmaps, &dev->rdi.pending_mmaps); - spin_unlock_irq(&dev->rdi.pending_lock); - } - - /* - * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. - * The number of entries should be >= the number requested or return - * an error. - */ - cq->dd = dd_from_dev(dev); - cq->ibcq.cqe = entries; - cq->notify = IB_CQ_NONE; - cq->triggered = 0; - spin_lock_init(&cq->lock); - init_kthread_work(&cq->comptask, send_complete); - wc->head = 0; - wc->tail = 0; - cq->queue = wc; - - ret = &cq->ibcq; - - goto done; - -bail_ip: - kfree(cq->ip); -bail_wc: - vfree(wc); -bail_cq: - kfree(cq); -done: - return ret; -} - -/** - * hfi1_destroy_cq - destroy a completion queue - * @ibcq: the completion queue to destroy. - * - * Returns 0 for success. - * - * Called by ib_destroy_cq() in the generic verbs code. - */ -int hfi1_destroy_cq(struct ib_cq *ibcq) -{ - struct hfi1_ibdev *dev = to_idev(ibcq->device); - struct hfi1_cq *cq = to_icq(ibcq); - - flush_kthread_work(&cq->comptask); - spin_lock(&dev->n_cqs_lock); - dev->n_cqs_allocated--; - spin_unlock(&dev->n_cqs_lock); - if (cq->ip) - kref_put(&cq->ip->ref, rvt_release_mmap_info); - else - vfree(cq->queue); - kfree(cq); - - return 0; -} - -/** - * hfi1_req_notify_cq - change the notification type for a completion queue - * @ibcq: the completion queue - * @notify_flags: the type of notification to request - * - * Returns 0 for success. - * - * This may be called from interrupt context. Also called by - * ib_req_notify_cq() in the generic verbs code. - */ -int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) -{ - struct hfi1_cq *cq = to_icq(ibcq); - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&cq->lock, flags); - /* - * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow - * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). - */ - if (cq->notify != IB_CQ_NEXT_COMP) - cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; - - if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && - cq->queue->head != cq->queue->tail) - ret = 1; - - spin_unlock_irqrestore(&cq->lock, flags); - - return ret; -} - -/** - * hfi1_resize_cq - change the size of the CQ - * @ibcq: the completion queue - * - * Returns 0 for success. - */ -int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) -{ - struct hfi1_cq *cq = to_icq(ibcq); - struct hfi1_cq_wc *old_wc; - struct hfi1_cq_wc *wc; - u32 head, tail, n; - int ret; - u32 sz; - - if (cqe < 1 || cqe > hfi1_max_cqes) { - ret = -EINVAL; - goto bail; - } - - /* - * Need to use vmalloc() if we want to support large #s of entries. - */ - sz = sizeof(*wc); - if (udata && udata->outlen >= sizeof(__u64)) - sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); - else - sz += sizeof(struct ib_wc) * (cqe + 1); - wc = vmalloc_user(sz); - if (!wc) { - ret = -ENOMEM; - goto bail; - } - - /* Check that we can write the offset to mmap. */ - if (udata && udata->outlen >= sizeof(__u64)) { - __u64 offset = 0; - - ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (ret) - goto bail_free; - } - - spin_lock_irq(&cq->lock); - /* - * Make sure head and tail are sane since they - * might be user writable. - */ - old_wc = cq->queue; - head = old_wc->head; - if (head > (u32) cq->ibcq.cqe) - head = (u32) cq->ibcq.cqe; - tail = old_wc->tail; - if (tail > (u32) cq->ibcq.cqe) - tail = (u32) cq->ibcq.cqe; - if (head < tail) - n = cq->ibcq.cqe + 1 + head - tail; - else - n = head - tail; - if (unlikely((u32)cqe < n)) { - ret = -EINVAL; - goto bail_unlock; - } - for (n = 0; tail != head; n++) { - if (cq->ip) - wc->uqueue[n] = old_wc->uqueue[tail]; - else - wc->kqueue[n] = old_wc->kqueue[tail]; - if (tail == (u32) cq->ibcq.cqe) - tail = 0; - else - tail++; - } - cq->ibcq.cqe = cqe; - wc->head = n; - wc->tail = 0; - cq->queue = wc; - spin_unlock_irq(&cq->lock); - - vfree(old_wc); - - if (cq->ip) { - struct hfi1_ibdev *dev = to_idev(ibcq->device); - struct rvt_mmap_info *ip = cq->ip; - - rvt_update_mmap_info(&dev->rdi, ip, sz, wc); - - /* - * Return the offset to mmap. - * See hfi1_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - ret = ib_copy_to_udata(udata, &ip->offset, - sizeof(ip->offset)); - if (ret) - goto bail; - } - - spin_lock_irq(&dev->rdi.pending_lock); - if (list_empty(&ip->pending_mmaps)) - list_add(&ip->pending_mmaps, &dev->rdi.pending_mmaps); - spin_unlock_irq(&dev->rdi.pending_lock); - } - - ret = 0; - goto bail; - -bail_unlock: - spin_unlock_irq(&cq->lock); -bail_free: - vfree(wc); -bail: - return ret; -} - -int hfi1_cq_init(struct hfi1_devdata *dd) -{ - int ret = 0; - int cpu; - struct task_struct *task; - - if (dd->worker) - return 0; - dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL); - if (!dd->worker) - return -ENOMEM; - init_kthread_worker(dd->worker); - task = kthread_create_on_node( - kthread_worker_fn, - dd->worker, - dd->assigned_node_id, - "hfi1_cq%d", dd->unit); - if (IS_ERR(task)) - goto task_fail; - cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id)); - kthread_bind(task, cpu); - wake_up_process(task); -out: - return ret; -task_fail: - ret = PTR_ERR(task); - kfree(dd->worker); - dd->worker = NULL; - goto out; -} - -void hfi1_cq_exit(struct hfi1_devdata *dd) -{ - struct kthread_worker *worker; - - worker = dd->worker; - if (!worker) - return; - /* blocks future queuing from send_complete() */ - dd->worker = NULL; - smp_wmb(); /* See hfi1_cq_enter */ - flush_kthread_worker(worker); - kthread_stop(worker->task); - kfree(worker); -} diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index e5f3451fecc2..e6a5fede0c02 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1009,8 +1009,6 @@ struct hfi1_devdata { u16 psxmitwait_check_rate; /* high volume overflow errors deferred to tasklet */ struct tasklet_struct error_tasklet; - /* per device cq worker */ - struct kthread_worker *worker; /* MSI-X information */ struct hfi1_msix_entry *msix_entries; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index b4076b22af14..d1cb2c854f9e 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -765,7 +765,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) /* enable chip even if we have an error, so we can debug cause */ enable_chip(dd); - ret = hfi1_cq_init(dd); done: /* * Set status even if port serdes is not initialized @@ -1312,7 +1311,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd) kfree(dd->boardname); vfree(dd->events); vfree(dd->status); - hfi1_cq_exit(dd); } /* diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index a336d2a40d58..5e50dea12de9 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -304,7 +304,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { wc.wr_id = qp->r_wr_id; wc.status = err; - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); } wc.status = IB_WC_WR_FLUSH_ERR; @@ -327,7 +327,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; if (++tail >= qp->r_rq.size) tail = 0; - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); } wq->tail = tail; diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index bd504decc46d..e80a09261dcc 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1040,7 +1040,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } if (++qp->s_last >= qp->s_size) qp->s_last = 0; @@ -1097,7 +1097,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } if (++qp->s_last >= qp->s_size) qp->s_last = 0; @@ -2157,8 +2157,8 @@ send_last: wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - (bth0 & IB_BTH_SOLICITED) != 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + (bth0 & IB_BTH_SOLICITED) != 0); break; case OP(RDMA_WRITE_FIRST): diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 0b324b17bf09..c659cf806992 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -138,7 +138,7 @@ bad_lkey: wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; @@ -566,8 +566,8 @@ again: wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - wqe->wr.send_flags & IB_SEND_SOLICITED); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); @@ -909,8 +909,8 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, wc.qp = &qp->ibqp; if (status == IB_WC_SUCCESS) wc.byte_len = wqe->length; - hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, - status != IB_WC_SUCCESS); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, + status != IB_WC_SUCCESS); } last = qp->s_last; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 0935182d4ac9..75cded36ec17 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -469,9 +469,9 @@ last_imm: wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - (ohdr->bth[0] & - cpu_to_be32(IB_BTH_SOLICITED)) != 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + (ohdr->bth[0] & + cpu_to_be32(IB_BTH_SOLICITED)) != 0); break; case OP(RDMA_WRITE_FIRST): diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index a0e62229d7a1..a4746e83999d 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -247,8 +247,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - swqe->wr.send_flags & IB_SEND_SOLICITED); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + swqe->wr.send_flags & IB_SEND_SOLICITED); ibp->rvp.n_loop_pkts++; bail_unlock: spin_unlock_irqrestore(&qp->r_lock, flags); @@ -878,9 +878,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ - hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - (ohdr->bth[0] & - cpu_to_be32(IB_BTH_SOLICITED)) != 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + (ohdr->bth[0] & + cpu_to_be32(IB_BTH_SOLICITED)) != 0); return; drop: diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 70af487c990f..ee969d0dcd77 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1719,7 +1719,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->n_cqs_lock); spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); spin_lock_init(&dev->n_mcast_grps_lock); @@ -1816,11 +1815,11 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->post_send = post_send; ibdev->post_recv = post_receive; ibdev->post_srq_recv = hfi1_post_srq_receive; - ibdev->create_cq = hfi1_create_cq; - ibdev->destroy_cq = hfi1_destroy_cq; - ibdev->resize_cq = hfi1_resize_cq; - ibdev->poll_cq = hfi1_poll_cq; - ibdev->req_notify_cq = hfi1_req_notify_cq; + ibdev->create_cq = NULL; + ibdev->destroy_cq = NULL; + ibdev->resize_cq = NULL; + ibdev->poll_cq = NULL; + ibdev->req_notify_cq = NULL; ibdev->get_dma_mr = NULL; ibdev->reg_user_mr = NULL; ibdev->dereg_mr = NULL; @@ -1860,14 +1859,20 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift; dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16; dd->verbs_dev.rdi.dparms.qpn_res_end = - dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; + dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; + /* completeion queue */ + snprintf(dd->verbs_dev.rdi.dparms.cq_name, + sizeof(dd->verbs_dev.rdi.dparms.cq_name), + "hfi1_cq%d", dd->unit); + dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id; + /* misc settings */ - dd->verbs_dev.rdi.flags = RVT_FLAG_CQ_INIT_DRIVER; + dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size; dd->verbs_dev.rdi.dparms.nports = dd->num_pports; dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd); diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index c22f0d13ad7f..ef8fb13d2af2 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -64,6 +64,7 @@ #include #include #include +#include struct hfi1_ctxtdata; struct hfi1_pportdata; @@ -81,12 +82,6 @@ struct hfi1_packet; */ #define HFI1_UVERBS_ABI_VERSION 2 -/* - * Define an ib_cq_notify value that is not valid so we know when CQ - * notifications are armed. - */ -#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) - #define IB_SEQ_NAK (3 << 29) /* AETH NAK opcode values */ @@ -235,35 +230,6 @@ struct hfi1_mcast { int n_attached; }; -/* - * This structure is used to contain the head pointer, tail pointer, - * and completion queue entries as a single memory allocation so - * it can be mmap'ed into user space. - */ -struct hfi1_cq_wc { - u32 head; /* index of next entry to fill */ - u32 tail; /* index of next ib_poll_cq() entry */ - union { - /* these are actually size ibcq.cqe + 1 */ - struct ib_uverbs_wc uqueue[0]; - struct ib_wc kqueue[0]; - }; -}; - -/* - * The completion queue structure. - */ -struct hfi1_cq { - struct ib_cq ibcq; - struct kthread_work comptask; - struct hfi1_devdata *dd; - spinlock_t lock; /* protect changes in this struct */ - u8 notify; - u8 triggered; - struct hfi1_cq_wc *queue; - struct rvt_mmap_info *ip; -}; - /* * hfi1 specific data structures that will be hidden from rvt after the queue * pair is made common @@ -363,8 +329,6 @@ struct hfi1_ibdev { u64 n_kmem_wait; u64 n_send_schedule; - u32 n_cqs_allocated; /* number of CQs allocated for device */ - spinlock_t n_cqs_lock; u32 n_qps_allocated; /* number of QPs allocated for device */ spinlock_t n_qps_lock; u32 n_srqs_allocated; /* number of SRQs allocated for device */ @@ -395,11 +359,6 @@ struct hfi1_verbs_counters { u32 vl15_dropped; }; -static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq) -{ - return container_of(ibcq, struct hfi1_cq, ibcq); -} - static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp) { return container_of(ibqp, struct rvt_qp, ibqp); @@ -563,28 +522,6 @@ int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int hfi1_destroy_srq(struct ib_srq *ibsrq); -int hfi1_cq_init(struct hfi1_devdata *dd); - -void hfi1_cq_exit(struct hfi1_devdata *dd); - -void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int sig); - -int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); - -struct ib_cq *hfi1_create_cq( - struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata); - -int hfi1_destroy_cq(struct ib_cq *ibcq); - -int hfi1_req_notify_cq( - struct ib_cq *ibcq, - enum ib_cq_notify_flags notify_flags); - -int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); - static inline void hfi1_put_ss(struct rvt_sge_state *ss) { while (ss->num_sge) { -- cgit v1.2.3-59-g8ed1b From 83693bd146063e6843efafbedf302014511fee25 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:33 -0800 Subject: staging/rdma/hfi1: Use rdmavt version of post_send This patch removes the post_send and post_one_send from the hfi1 driver. The "posting" of sends will be done by rdmavt which will walk a WQE and queue work. This patch will still provide the capability to schedule that work as well as kick the progress. These are provided to the rdmavt layer. Reviewed-by: Jubin John Signed-off-by: Dean Luick Signed-off-by: Harish Chegondi Signed-off-by: Edward Mascarenhas Signed-off-by: Mike Marciniszyn Signed-off-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 8 +- drivers/staging/rdma/hfi1/qp.c | 14 +-- drivers/staging/rdma/hfi1/rc.c | 40 +++---- drivers/staging/rdma/hfi1/ruc.c | 28 +++-- drivers/staging/rdma/hfi1/uc.c | 12 +- drivers/staging/rdma/hfi1/ud.c | 10 +- drivers/staging/rdma/hfi1/verbs.c | 201 ++------------------------------ drivers/staging/rdma/hfi1/verbs.h | 35 +----- drivers/staging/rdma/hfi1/verbs_mcast.c | 4 +- 9 files changed, 74 insertions(+), 278 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index ec2286a1e883..d57c08f3b69c 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -335,8 +335,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, spin_lock_irqsave(&qp->r_lock, flags); /* Check for valid receive state. */ - if (!(ib_hfi1_state_ops[qp->state] & - HFI1_PROCESS_RECV_OK)) { + if (!(ib_rvt_state_ops[qp->state] & + RVT_PROCESS_RECV_OK)) { ibp->rvp.n_pkt_drops++; } @@ -790,8 +790,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet) qp->r_flags &= ~RVT_R_RSP_SEND; spin_lock_irqsave(&qp->s_lock, flags); - if (ib_hfi1_state_ops[qp->state] & - HFI1_PROCESS_OR_FLUSH_SEND) + if (ib_rvt_state_ops[qp->state] & + RVT_PROCESS_OR_FLUSH_SEND) hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 5e50dea12de9..ff27f1a24af2 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -208,7 +208,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) if (clr_sends) { while (qp->s_last != qp->s_head) { - struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_last); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); unsigned i; for (i = 0; i < wqe->wr.num_sge; i++) { @@ -411,7 +411,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct hfi1_qp_priv *priv = qp->priv; enum ib_qp_state cur_state, new_state; struct ib_event ev; @@ -710,7 +710,7 @@ bail: int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; @@ -829,7 +829,7 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) */ int hfi1_destroy_qp(struct ib_qp *ibqp) { - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_qp_priv *priv = qp->priv; @@ -943,7 +943,7 @@ static int iowait_sleep( priv = qp->priv; spin_lock_irqsave(&qp->s_lock, flags); - if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { /* * If we couldn't queue the DMA request, save the info @@ -1117,7 +1117,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) struct sdma_engine *sde; sde = qp_to_sdma_engine(qp, priv->s_sc); - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); seq_printf(s, "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n", iter->n, @@ -1225,7 +1225,7 @@ void notify_qp_reset(struct rvt_qp *qp) iowait_init( &priv->s_iowait, 1, - hfi1_do_send, + _hfi1_do_send, iowait_sleep, iowait_wakeup); priv->r_adefered = 0; diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index e80a09261dcc..a30bf300f5cb 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -105,7 +105,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, int middle = 0; /* Don't send an ACK if we aren't supposed to. */ - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto bail; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ @@ -291,8 +291,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp) make_rc_ack(dev, qp, ohdr, pmtu)) goto done; - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) { - if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == qp->s_head) @@ -303,7 +303,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) goto bail; } clear_ahg(qp); - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ @@ -323,10 +323,10 @@ int hfi1_make_rc_req(struct rvt_qp *qp) } /* Send a request. */ - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); switch (qp->s_state) { default: - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* * Resend an old request or start a new one. @@ -797,7 +797,7 @@ queue_ack: static void reset_psn(struct rvt_qp *qp, u32 psn) { u32 n = qp->s_acked; - struct rvt_swqe *wqe = get_swqe_ptr(qp, n); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); u32 opcode; qp->s_cur = n; @@ -820,7 +820,7 @@ static void reset_psn(struct rvt_qp *qp, u32 psn) n = 0; if (n == qp->s_tail) break; - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); diff = cmp_psn(psn, wqe->psn); if (diff < 0) break; @@ -882,7 +882,7 @@ done: */ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) { - struct rvt_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); struct hfi1_ibport *ibp; if (qp->s_retry == 0) { @@ -964,7 +964,7 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn) /* Find the work request corresponding to the given PSN. */ for (;;) { - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); if (cmp_psn(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_sending_psn = wqe->lpsn + 1; @@ -991,7 +991,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) u32 opcode; u32 psn; - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; /* Find out where the BTH is */ @@ -1018,11 +1018,11 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && - (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) + (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; @@ -1132,7 +1132,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_acked = qp->s_cur; - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); if (qp->s_acked != qp->s_tail) { qp->s_state = OP(SEND_LAST); qp->s_psn = wqe->psn; @@ -1142,7 +1142,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, qp->s_acked = 0; if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) qp->s_draining = 0; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); } return wqe; } @@ -1183,7 +1183,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ack_psn = psn; if (aeth >> 29) ack_psn--; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); /* @@ -1392,7 +1392,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, del_timer(&qp->s_timer); } - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); while (cmp_psn(psn, wqe->lpsn) > 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || @@ -1474,7 +1474,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); status = IB_WC_SUCCESS; switch (opcode) { @@ -1492,7 +1492,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* @@ -1557,7 +1557,7 @@ read_middle: * have to be careful to copy the data to the right * location. */ - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_last; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index c659cf806992..b47e462c26b3 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -176,7 +176,7 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) } spin_lock_irqsave(&rq->lock, flags); - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ret = 0; goto unlock; } @@ -383,7 +383,7 @@ static void ruc_loopback(struct rvt_qp *sqp) /* Return if we are already busy processing a work request. */ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || - !(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) + !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) goto unlock; sqp->s_flags |= RVT_S_BUSY; @@ -391,11 +391,11 @@ static void ruc_loopback(struct rvt_qp *sqp) again: if (sqp->s_last == sqp->s_head) goto clr_busy; - wqe = get_swqe_ptr(sqp, sqp->s_last); + wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work request. */ - if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_NEXT_SEND_OK)) { - if (!(ib_hfi1_state_ops[sqp->state] & HFI1_FLUSH_SEND)) + if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { + if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; @@ -413,7 +413,7 @@ again: } spin_unlock_irqrestore(&sqp->s_lock, flags); - if (!qp || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) || + if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { ibp->rvp.n_pkt_drops++; /* @@ -593,7 +593,7 @@ rnr_nak: if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); - if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) goto clr_busy; sqp->s_flags |= RVT_S_WAIT_RNR; sqp->s_timer.function = hfi1_rc_rnr_retry; @@ -802,6 +802,14 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, /* when sending, force a reschedule every one of these periods */ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ +void _hfi1_do_send(struct work_struct *work) +{ + struct iowait *wait = container_of(work, struct iowait, iowork); + struct rvt_qp *qp = iowait_to_qp(wait); + + hfi1_do_send(qp); +} + /** * hfi1_do_send - perform a send on a QP * @work: contains a pointer to the QP @@ -810,10 +818,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ -void hfi1_do_send(struct work_struct *work) +void hfi1_do_send(struct rvt_qp *qp) { - struct iowait *wait = container_of(work, struct iowait, iowork); - struct rvt_qp *qp = iowait_to_qp(wait); struct hfi1_pkt_state ps; int (*make_req)(struct rvt_qp *qp); unsigned long flags; @@ -883,7 +889,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 old_last, last; unsigned i; - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; for (i = 0; i < wqe->wr.num_sge; i++) { diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 75cded36ec17..ec404ff9e9a6 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -76,8 +76,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) { - if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == qp->s_head) @@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp) goto bail; } clear_ahg(qp); - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done; } @@ -98,12 +98,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp) ohdr = &priv->s_hdr->ibh.u.l.oth; /* Get the next send request. */ - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); qp->s_wqe = NULL; switch (qp->s_state) { default: - if (!(ib_hfi1_state_ops[qp->state] & - HFI1_PROCESS_NEXT_SEND_OK)) + if (!(ib_rvt_state_ops[qp->state] & + RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ if (qp->s_cur == qp->s_head) { diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index a4746e83999d..e2cbdc86d1a3 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -93,7 +93,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) IB_QPT_UD : qp->ibqp.qp_type; if (dqptype != sqptype || - !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) { + !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ibp->rvp.n_pkt_drops++; goto drop; } @@ -282,8 +282,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) { - if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == qp->s_head) @@ -293,7 +293,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done; } @@ -301,7 +301,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) if (qp->s_cur == qp->s_head) goto bail; - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); next_cur = qp->s_cur + 1; if (next_cur >= qp->s_size) next_cur = 0; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index ee969d0dcd77..b4cfda482254 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -132,28 +132,6 @@ static void verbs_sdma_complete( /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 -/* - * Note that it is OK to post send work requests in the SQE and ERR - * states; hfi1_do_send() will process them and generate error - * completions as per IB 1.2 C10-96. - */ -const int ib_hfi1_state_ops[IB_QPS_ERR + 1] = { - [IB_QPS_RESET] = 0, - [IB_QPS_INIT] = HFI1_POST_RECV_OK, - [IB_QPS_RTR] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK, - [IB_QPS_RTS] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK | - HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK | - HFI1_PROCESS_NEXT_SEND_OK, - [IB_QPS_SQD] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK | - HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK, - [IB_QPS_SQE] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK | - HFI1_POST_SEND_OK | HFI1_FLUSH_SEND, - [IB_QPS_ERR] = HFI1_POST_RECV_OK | HFI1_FLUSH_RECV | - HFI1_POST_SEND_OK | HFI1_FLUSH_SEND, -}; - -static inline void _hfi1_schedule_send(struct rvt_qp *qp); - /* * Translate ib_wr_opcode into ib_wc_opcode. */ @@ -345,169 +323,6 @@ void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release) } } -/** - * post_one_send - post one RC, UC, or UD send work request - * @qp: the QP to post on - * @wr: the work request to send - */ -static int post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr) -{ - struct rvt_swqe *wqe; - u32 next; - int i; - int j; - int acc; - struct rvt_lkey_table *rkt; - struct rvt_pd *pd; - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - struct hfi1_pportdata *ppd; - struct hfi1_ibport *ibp; - - /* IB spec says that num_sge == 0 is OK. */ - if (unlikely(wr->num_sge > qp->s_max_sge)) - return -EINVAL; - - ppd = &dd->pport[qp->port_num - 1]; - ibp = &ppd->ibport_data; - - /* - * Don't allow RDMA reads or atomic operations on UC or - * undefined operations. - * Make sure buffer is large enough to hold the result for atomics. - */ - if (qp->ibqp.qp_type == IB_QPT_UC) { - if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) - return -EINVAL; - } else if (qp->ibqp.qp_type != IB_QPT_RC) { - /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ - if (wr->opcode != IB_WR_SEND && - wr->opcode != IB_WR_SEND_WITH_IMM) - return -EINVAL; - /* Check UD destination address PD */ - if (qp->ibqp.pd != ud_wr(wr)->ah->pd) - return -EINVAL; - } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) - return -EINVAL; - else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && - (wr->num_sge == 0 || - wr->sg_list[0].length < sizeof(u64) || - wr->sg_list[0].addr & (sizeof(u64) - 1))) - return -EINVAL; - else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) - return -EINVAL; - - next = qp->s_head + 1; - if (next >= qp->s_size) - next = 0; - if (next == qp->s_last) - return -ENOMEM; - - rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; - pd = ibpd_to_rvtpd(qp->ibqp.pd); - wqe = get_swqe_ptr(qp, qp->s_head); - - - if (qp->ibqp.qp_type != IB_QPT_UC && - qp->ibqp.qp_type != IB_QPT_RC) - memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); - else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || - wr->opcode == IB_WR_RDMA_WRITE || - wr->opcode == IB_WR_RDMA_READ) - memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); - else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || - wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) - memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); - else - memcpy(&wqe->wr, wr, sizeof(wqe->wr)); - - wqe->length = 0; - j = 0; - if (wr->num_sge) { - acc = wr->opcode >= IB_WR_RDMA_READ ? - IB_ACCESS_LOCAL_WRITE : 0; - for (i = 0; i < wr->num_sge; i++) { - u32 length = wr->sg_list[i].length; - int ok; - - if (length == 0) - continue; - ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], - &wr->sg_list[i], acc); - if (!ok) - goto bail_inval_free; - wqe->length += length; - j++; - } - wqe->wr.num_sge = j; - } - if (qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_RC) { - if (wqe->length > 0x80000000U) - goto bail_inval_free; - } else { - atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); - } - wqe->ssn = qp->s_ssn++; - qp->s_head = next; - - return 0; - -bail_inval_free: - /* release mr holds */ - while (j) { - struct rvt_sge *sge = &wqe->sg_list[--j]; - - rvt_put_mr(sge->mr); - } - return -EINVAL; -} - -/** - * post_send - post a send on a QP - * @ibqp: the QP to post the send on - * @wr: the list of work requests to post - * @bad_wr: the first bad WR is put here - * - * This may be called from interrupt context. - */ -static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, - struct ib_send_wr **bad_wr) -{ - struct rvt_qp *qp = to_iqp(ibqp); - struct hfi1_qp_priv *priv = qp->priv; - int err = 0; - int call_send; - unsigned long flags; - unsigned nreq = 0; - - spin_lock_irqsave(&qp->s_lock, flags); - - /* Check that state is OK to post send. */ - if (unlikely(!(ib_hfi1_state_ops[qp->state] & HFI1_POST_SEND_OK))) { - spin_unlock_irqrestore(&qp->s_lock, flags); - return -EINVAL; - } - - /* sq empty and not list -> call send */ - call_send = qp->s_head == qp->s_last && !wr->next; - - for (; wr; wr = wr->next) { - err = post_one_send(qp, wr); - if (unlikely(err)) { - *bad_wr = wr; - goto bail; - } - nreq++; - } -bail: - spin_unlock_irqrestore(&qp->s_lock, flags); - if (nreq && !call_send) - _hfi1_schedule_send(qp); - if (nreq && call_send) - hfi1_do_send(&priv->s_iowait.iowork); - return err; -} - /** * post_receive - post a receive on a QP * @ibqp: the QP to post the receive on @@ -519,13 +334,13 @@ bail: static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct rvt_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ - if (!(ib_hfi1_state_ops[qp->state] & HFI1_POST_RECV_OK) || !wq) { + if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; @@ -576,7 +391,7 @@ static inline int qp_ok(int opcode, struct hfi1_packet *packet) { struct hfi1_ibport *ibp; - if (!(ib_hfi1_state_ops[packet->qp->state] & HFI1_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK)) goto dropit; if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) || (opcode == IB_OPCODE_CNP)) @@ -737,7 +552,7 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, if (!tx) { spin_lock_irqsave(&qp->s_lock, flags); write_seqlock(&dev->iowait_lock); - if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK && + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK && list_empty(&priv->s_iowait.list)) { dev->n_txwait++; qp->s_flags |= RVT_S_WAIT_TX; @@ -855,7 +670,7 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); - if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); if (list_empty(&priv->s_iowait.list)) { if (list_empty(&dev->memwait)) @@ -1085,7 +900,7 @@ static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) * enabling the PIO avail interrupt. */ spin_lock_irqsave(&qp->s_lock, flags); - if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibdev *dev = &dd->verbs_dev; @@ -1812,7 +1627,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_qp = hfi1_modify_qp; ibdev->query_qp = hfi1_query_qp; ibdev->destroy_qp = hfi1_destroy_qp; - ibdev->post_send = post_send; + ibdev->post_send = NULL; ibdev->post_recv = post_receive; ibdev->post_srq_recv = hfi1_post_srq_receive; ibdev->create_cq = NULL; @@ -1864,6 +1679,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; + dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; + dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; /* completeion queue */ snprintf(dd->verbs_dev.rdi.dparms.cq_name, diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index ef8fb13d2af2..8e032a79b818 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -92,17 +92,6 @@ struct hfi1_packet; #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 #define IB_NAK_INVALID_RD_REQUEST 0x64 -/* Flags for checking QP state (see ib_hfi1_state_ops[]) */ -#define HFI1_POST_SEND_OK 0x01 -#define HFI1_POST_RECV_OK 0x02 -#define HFI1_PROCESS_RECV_OK 0x04 -#define HFI1_PROCESS_SEND_OK 0x08 -#define HFI1_PROCESS_NEXT_SEND_OK 0x10 -#define HFI1_FLUSH_SEND 0x20 -#define HFI1_FLUSH_RECV 0x40 -#define HFI1_PROCESS_OR_FLUSH_SEND \ - (HFI1_PROCESS_SEND_OK | HFI1_FLUSH_SEND) - /* IB Performance Manager status values */ #define IB_PMA_SAMPLE_STATUS_DONE 0x00 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 @@ -255,19 +244,6 @@ struct hfi1_pkt_state { #define HFI1_PSN_CREDIT 16 -/* - * Since struct rvt_swqe is not a fixed size, we can't simply index into - * struct hfi1_qp.s_wq. This function does the array index computation. - */ -static inline struct rvt_swqe *get_swqe_ptr(struct rvt_qp *qp, - unsigned n) -{ - return (struct rvt_swqe *)((char *)qp->s_wq + - (sizeof(struct rvt_swqe) + - qp->s_max_sge * - sizeof(struct rvt_sge)) * n); -} - /* * Since struct rvt_rwqe is not a fixed size, we can't simply index into * struct rvt_rwq.wq. This function does the array index computation. @@ -359,11 +335,6 @@ struct hfi1_verbs_counters { u32 vl15_dropped; }; -static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp) -{ - return container_of(ibqp, struct rvt_qp, ibqp); -} - static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) { struct rvt_dev_info *rdi; @@ -544,7 +515,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, u32 bth0, u32 bth2, int middle); -void hfi1_do_send(struct work_struct *work); +void _hfi1_do_send(struct work_struct *work); + +void hfi1_do_send(struct rvt_qp *qp); void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status); @@ -577,7 +550,7 @@ extern const enum ib_wc_opcode ib_hfi1_wc_opcode[]; extern const u8 hdr_len_by_opcode[]; -extern const int ib_hfi1_state_ops[]; +extern const int ib_rvt_state_ops[]; extern __be64 ib_hfi1_sys_image_guid; /* in network order */ diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/staging/rdma/hfi1/verbs_mcast.c index aa3f560d2f43..175396b8b347 100644 --- a/drivers/staging/rdma/hfi1/verbs_mcast.c +++ b/drivers/staging/rdma/hfi1/verbs_mcast.c @@ -241,7 +241,7 @@ bail: int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_ibport *ibp; struct hfi1_mcast *mcast; @@ -299,7 +299,7 @@ bail: int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { - struct rvt_qp *qp = to_iqp(ibqp); + struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num); struct hfi1_mcast *mcast = NULL; -- cgit v1.2.3-59-g8ed1b From 0facc5a1635252a45ab2fdb119309a3c24e9be82 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:39 -0800 Subject: staging/rdma/hfi1: Remove multicast verbs functions Multicast is now supported by rdmavt. Remove the verbs multicast functions and use that. Reviewed-by: Mike Marciniszyn Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/qp.c | 2 - drivers/staging/rdma/hfi1/verbs.c | 13 +- drivers/staging/rdma/hfi1/verbs.h | 29 --- drivers/staging/rdma/hfi1/verbs_mcast.c | 385 -------------------------------- 5 files changed, 7 insertions(+), 424 deletions(-) delete mode 100644 drivers/staging/rdma/hfi1/verbs_mcast.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 7797f2c0f01f..0069796add25 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ init.o intr.o mad.o pcie.o pio.o pio_copy.o \ qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ - uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs_mcast.o verbs.o + uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o CFLAGS_trace.o = -I$(src) diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index ff27f1a24af2..748a3a739859 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -1205,8 +1205,6 @@ unsigned free_all_qps(struct rvt_dev_info *rdi) for (n = 0; n < dd->num_pports; n++) { struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; - if (!hfi1_mcast_tree_empty(ibp)) - qp_inuse++; rcu_read_lock(); if (rcu_dereference(ibp->rvp.qp[0])) qp_inuse++; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index b4cfda482254..2fed28487c89 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -451,12 +451,12 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) lid = be16_to_cpu(hdr->lrh[1]); if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { - struct hfi1_mcast *mcast; - struct hfi1_mcast_qp *p; + struct rvt_mcast *mcast; + struct rvt_mcast_qp *p; if (lnh != HFI1_LRH_GRH) goto drop; - mcast = hfi1_mcast_find(ibp, &hdr->u.l.grh.dgid); + mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid); if (mcast == NULL) goto drop; list_for_each_entry_rcu(p, &mcast->qp_list, list) { @@ -467,7 +467,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) spin_unlock_irqrestore(&packet->qp->r_lock, flags); } /* - * Notify hfi1_multicast_detach() if it is waiting for us + * Notify rvt_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) @@ -1536,7 +1536,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); - spin_lock_init(&dev->n_mcast_grps_lock); init_timer(&dev->mem_timer); dev->mem_timer.function = mem_timer; dev->mem_timer.data = (unsigned long) dev; @@ -1644,8 +1643,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->map_phys_fmr = NULL; ibdev->unmap_fmr = NULL; ibdev->dealloc_fmr = NULL; - ibdev->attach_mcast = hfi1_multicast_attach; - ibdev->detach_mcast = hfi1_multicast_detach; + ibdev->attach_mcast = NULL; + ibdev->detach_mcast = NULL; ibdev->process_mad = hfi1_process_mad; ibdev->mmap = NULL; ibdev->dma_ops = NULL; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 8e032a79b818..8e82cf0fe3fd 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -200,25 +200,6 @@ struct tx_pio_header { struct hfi1_pio_header phdr; } ____cacheline_aligned; -/* - * There is one struct hfi1_mcast for each multicast GID. - * All attached QPs are then stored as a list of - * struct hfi1_mcast_qp. - */ -struct hfi1_mcast_qp { - struct list_head list; - struct rvt_qp *qp; -}; - -struct hfi1_mcast { - struct rb_node rb_node; - union ib_gid mgid; - struct list_head qp_list; - wait_queue_head_t wait; - atomic_t refcount; - int n_attached; -}; - /* * hfi1 specific data structures that will be hidden from rvt after the queue * pair is made common @@ -309,8 +290,6 @@ struct hfi1_ibdev { spinlock_t n_qps_lock; u32 n_srqs_allocated; /* number of SRQs allocated for device */ spinlock_t n_srqs_lock; - u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ - spinlock_t n_mcast_grps_lock; #ifdef CONFIG_DEBUG_FS /* per HFI debugfs */ struct dentry *hfi1_ibdev_dbg; @@ -434,14 +413,6 @@ static inline u32 delta_psn(u32 a, u32 b) return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT; } -struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid); - -int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); - -int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); - -int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp); - struct verbs_txreq; void hfi1_put_txreq(struct verbs_txreq *tx); diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/staging/rdma/hfi1/verbs_mcast.c deleted file mode 100644 index 175396b8b347..000000000000 --- a/drivers/staging/rdma/hfi1/verbs_mcast.c +++ /dev/null @@ -1,385 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include - -#include "hfi.h" - -/** - * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct - * @qp: the QP to link - */ -static struct hfi1_mcast_qp *mcast_qp_alloc(struct rvt_qp *qp) -{ - struct hfi1_mcast_qp *mqp; - - mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); - if (!mqp) - goto bail; - - mqp->qp = qp; - atomic_inc(&qp->refcount); - -bail: - return mqp; -} - -static void mcast_qp_free(struct hfi1_mcast_qp *mqp) -{ - struct rvt_qp *qp = mqp->qp; - - /* Notify hfi1_destroy_qp() if it is waiting. */ - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); - - kfree(mqp); -} - -/** - * mcast_alloc - allocate the multicast GID structure - * @mgid: the multicast GID - * - * A list of QPs will be attached to this structure. - */ -static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid) -{ - struct hfi1_mcast *mcast; - - mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); - if (!mcast) - goto bail; - - mcast->mgid = *mgid; - INIT_LIST_HEAD(&mcast->qp_list); - init_waitqueue_head(&mcast->wait); - atomic_set(&mcast->refcount, 0); - mcast->n_attached = 0; - -bail: - return mcast; -} - -static void mcast_free(struct hfi1_mcast *mcast) -{ - struct hfi1_mcast_qp *p, *tmp; - - list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) - mcast_qp_free(p); - - kfree(mcast); -} - -/** - * hfi1_mcast_find - search the global table for the given multicast GID - * @ibp: the IB port structure - * @mgid: the multicast GID to search for - * - * Returns NULL if not found. - * - * The caller is responsible for decrementing the reference count if found. - */ -struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid) -{ - struct rb_node *n; - unsigned long flags; - struct hfi1_mcast *mcast; - - spin_lock_irqsave(&ibp->rvp.lock, flags); - n = ibp->rvp.mcast_tree.rb_node; - while (n) { - int ret; - - mcast = rb_entry(n, struct hfi1_mcast, rb_node); - - ret = memcmp(mgid->raw, mcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) - n = n->rb_left; - else if (ret > 0) - n = n->rb_right; - else { - atomic_inc(&mcast->refcount); - spin_unlock_irqrestore(&ibp->rvp.lock, flags); - goto bail; - } - } - spin_unlock_irqrestore(&ibp->rvp.lock, flags); - - mcast = NULL; - -bail: - return mcast; -} - -/** - * mcast_add - insert mcast GID into table and attach QP struct - * @mcast: the mcast GID table - * @mqp: the QP to attach - * - * Return zero if both were added. Return EEXIST if the GID was already in - * the table but the QP was added. Return ESRCH if the QP was already - * attached and neither structure was added. - */ -static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp, - struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp) -{ - struct rb_node **n = &ibp->rvp.mcast_tree.rb_node; - struct rb_node *pn = NULL; - int ret; - - spin_lock_irq(&ibp->rvp.lock); - - while (*n) { - struct hfi1_mcast *tmcast; - struct hfi1_mcast_qp *p; - - pn = *n; - tmcast = rb_entry(pn, struct hfi1_mcast, rb_node); - - ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) { - n = &pn->rb_left; - continue; - } - if (ret > 0) { - n = &pn->rb_right; - continue; - } - - /* Search the QP list to see if this is already there. */ - list_for_each_entry_rcu(p, &tmcast->qp_list, list) { - if (p->qp == mqp->qp) { - ret = ESRCH; - goto bail; - } - } - if (tmcast->n_attached == hfi1_max_mcast_qp_attached) { - ret = ENOMEM; - goto bail; - } - - tmcast->n_attached++; - - list_add_tail_rcu(&mqp->list, &tmcast->qp_list); - ret = EEXIST; - goto bail; - } - - spin_lock(&dev->n_mcast_grps_lock); - if (dev->n_mcast_grps_allocated == hfi1_max_mcast_grps) { - spin_unlock(&dev->n_mcast_grps_lock); - ret = ENOMEM; - goto bail; - } - - dev->n_mcast_grps_allocated++; - spin_unlock(&dev->n_mcast_grps_lock); - - mcast->n_attached++; - - list_add_tail_rcu(&mqp->list, &mcast->qp_list); - - atomic_inc(&mcast->refcount); - rb_link_node(&mcast->rb_node, pn, n); - rb_insert_color(&mcast->rb_node, &ibp->rvp.mcast_tree); - - ret = 0; - -bail: - spin_unlock_irq(&ibp->rvp.lock); - - return ret; -} - -int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) -{ - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct hfi1_ibport *ibp; - struct hfi1_mcast *mcast; - struct hfi1_mcast_qp *mqp; - int ret; - - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { - ret = -EINVAL; - goto bail; - } - - /* - * Allocate data structures since its better to do this outside of - * spin locks and it will most likely be needed. - */ - mcast = mcast_alloc(gid); - if (mcast == NULL) { - ret = -ENOMEM; - goto bail; - } - mqp = mcast_qp_alloc(qp); - if (mqp == NULL) { - mcast_free(mcast); - ret = -ENOMEM; - goto bail; - } - ibp = to_iport(ibqp->device, qp->port_num); - switch (mcast_add(dev, ibp, mcast, mqp)) { - case ESRCH: - /* Neither was used: OK to attach the same QP twice. */ - mcast_qp_free(mqp); - mcast_free(mcast); - break; - - case EEXIST: /* The mcast wasn't used */ - mcast_free(mcast); - break; - - case ENOMEM: - /* Exceeded the maximum number of mcast groups. */ - mcast_qp_free(mqp); - mcast_free(mcast); - ret = -ENOMEM; - goto bail; - - default: - break; - } - - ret = 0; - -bail: - return ret; -} - -int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) -{ - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num); - struct hfi1_mcast *mcast = NULL; - struct hfi1_mcast_qp *p, *tmp; - struct rb_node *n; - int last = 0; - int ret; - - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { - ret = -EINVAL; - goto bail; - } - - spin_lock_irq(&ibp->rvp.lock); - - /* Find the GID in the mcast table. */ - n = ibp->rvp.mcast_tree.rb_node; - while (1) { - if (n == NULL) { - spin_unlock_irq(&ibp->rvp.lock); - ret = -EINVAL; - goto bail; - } - - mcast = rb_entry(n, struct hfi1_mcast, rb_node); - ret = memcmp(gid->raw, mcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) - n = n->rb_left; - else if (ret > 0) - n = n->rb_right; - else - break; - } - - /* Search the QP list. */ - list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { - if (p->qp != qp) - continue; - /* - * We found it, so remove it, but don't poison the forward - * link until we are sure there are no list walkers. - */ - list_del_rcu(&p->list); - mcast->n_attached--; - - /* If this was the last attached QP, remove the GID too. */ - if (list_empty(&mcast->qp_list)) { - rb_erase(&mcast->rb_node, &ibp->rvp.mcast_tree); - last = 1; - } - break; - } - - spin_unlock_irq(&ibp->rvp.lock); - - if (p) { - /* - * Wait for any list walkers to finish before freeing the - * list element. - */ - wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); - mcast_qp_free(p); - } - if (last) { - atomic_dec(&mcast->refcount); - wait_event(mcast->wait, !atomic_read(&mcast->refcount)); - mcast_free(mcast); - spin_lock_irq(&dev->n_mcast_grps_lock); - dev->n_mcast_grps_allocated--; - spin_unlock_irq(&dev->n_mcast_grps_lock); - } - - ret = 0; - -bail: - return ret; -} - -int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp) -{ - return !ibp->rvp.mcast_tree.rb_node; -} -- cgit v1.2.3-59-g8ed1b From ec4274f1aeb5e5012c1e46ba11ceef7767af8b3d Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:44 -0800 Subject: staging/rdma/hfi1: Remove modify queue pair from hfi1 In addition to removing the modify queue pair verb from hfi1 we also remove ancillary functions which existed only for modify queue pair and are also already present in hfi1. Reviewed-by: Mike Marciniszyn Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 7 - drivers/staging/rdma/hfi1/common.h | 1 - drivers/staging/rdma/hfi1/driver.c | 19 +- drivers/staging/rdma/hfi1/qp.c | 631 +++++++------------------------------ drivers/staging/rdma/hfi1/qp.h | 65 +--- drivers/staging/rdma/hfi1/rc.c | 24 +- drivers/staging/rdma/hfi1/ruc.c | 17 +- drivers/staging/rdma/hfi1/srq.c | 4 +- drivers/staging/rdma/hfi1/trace.c | 2 +- drivers/staging/rdma/hfi1/trace.h | 35 +- drivers/staging/rdma/hfi1/uc.c | 16 +- drivers/staging/rdma/hfi1/ud.c | 15 +- drivers/staging/rdma/hfi1/verbs.c | 24 +- drivers/staging/rdma/hfi1/verbs.h | 31 +- 14 files changed, 205 insertions(+), 686 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index e8d0da89ea8e..322de64164f7 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1133,13 +1133,6 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; - - /* - * Ignored by drivers which do not support it. Not - * really worth creating a call back into the driver - * just to set a flag. - */ - qp->s_flags |= RVT_S_AHG_CLEAR; } } diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h index cb5ca794ac08..dcf8edf910b5 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/staging/rdma/hfi1/common.h @@ -345,7 +345,6 @@ struct hfi1_message_header { #define HFI1_AETH_CREDIT_MASK 0x1F #define HFI1_AETH_CREDIT_INVAL 0x1F #define HFI1_MSN_MASK 0xFFFFFF -#define HFI1_QPN_MASK 0xFFFFFF #define HFI1_FECN_SHIFT 31 #define HFI1_FECN_MASK 1 #define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT) diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index d57c08f3b69c..d848cc01f07a 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -282,6 +282,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, u32 rte = rhf_rcv_type_err(packet->rhf); int lnh = be16_to_cpu(rhdr->lrh[0]) & 3; struct hfi1_ibport *ibp = &ppd->ibport_data; + struct hfi1_devdata *dd = ppd->dd; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR)) return; @@ -316,13 +318,13 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, goto drop; /* Get the destination QP number. */ - qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; + qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { struct rvt_qp *qp; unsigned long flags; rcu_read_lock(); - qp = hfi1_lookup_qpn(ibp, qp_num); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!qp) { rcu_read_unlock(); goto drop; @@ -397,9 +399,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, sc5 |= 0x10; sl = ibp->sc_to_sl[sc5]; - lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK; + lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK; rcu_read_lock(); - qp = hfi1_lookup_qpn(ibp, lqpn); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); if (qp == NULL) { rcu_read_unlock(); goto drop; @@ -470,7 +472,7 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr, case IB_QPT_GSI: case IB_QPT_UD: rlid = be16_to_cpu(hdr->lrh[3]); - rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK; + rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; svc_type = IB_CC_SVCTYPE_UD; break; case IB_QPT_UC: @@ -500,7 +502,7 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr, if (bth1 & HFI1_BECN_SMASK) { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - u32 lqpn = bth1 & HFI1_QPN_MASK; + u32 lqpn = bth1 & RVT_QPN_MASK; u8 sl = ibp->sc_to_sl[sc5]; process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); @@ -599,6 +601,7 @@ static void prescan_rxq(struct hfi1_packet *packet) struct hfi1_ib_header *hdr; struct hfi1_other_headers *ohdr; struct ib_grh *grh = NULL; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; u64 rhf = rhf_to_cpu(rhf_addr); u32 etype = rhf_rcv_type(rhf), qpn, bth1; int is_ecn = 0; @@ -631,9 +634,9 @@ static void prescan_rxq(struct hfi1_packet *packet) if (!is_ecn) goto next; - qpn = bth1 & HFI1_QPN_MASK; + qpn = bth1 & RVT_QPN_MASK; rcu_read_lock(); - qp = hfi1_lookup_qpn(ibp, qpn); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); if (qp == NULL) { rcu_read_unlock(); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 748a3a739859..1e6ca4fb7925 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -54,6 +54,8 @@ #include #include #include +#include +#include #include "hfi.h" #include "qp.h" @@ -115,230 +117,6 @@ static const u16 credit_table[31] = { 32768 /* 1E */ }; -static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn) -{ - struct rvt_qpn_map *map; - - map = qpt->map + qpn / RVT_BITS_PER_PAGE; - if (map->page) - clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); -} - -/* - * Put the QP into the hash table. - * The hash table holds a reference to the QP. - */ -static void insert_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) -{ - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - unsigned long flags; - - atomic_inc(&qp->refcount); - spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); - - if (qp->ibqp.qp_num <= 1) { - rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp); - } else { - u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); - - qp->next = dev->rdi.qp_dev->qp_table[n]; - rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp); - trace_hfi1_qpinsert(qp, n); - } - - spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); -} - -/* - * Remove the QP from the table so it can't be found asynchronously by - * the receive interrupt routine. - */ -static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp) -{ - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num); - unsigned long flags; - int removed = 1; - - spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags); - - if (rcu_dereference_protected(ibp->rvp.qp[0], - lockdep_is_held( - &dev->rdi.qp_dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); - } else if (rcu_dereference_protected(ibp->rvp.qp[1], - lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); - } else { - struct rvt_qp *q; - struct rvt_qp __rcu **qpp; - - removed = 0; - qpp = &dev->rdi.qp_dev->qp_table[n]; - for (; (q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))) - != NULL; - qpp = &q->next) - if (q == qp) { - RCU_INIT_POINTER(*qpp, - rcu_dereference_protected(qp->next, - lockdep_is_held(&dev->rdi.qp_dev->qpt_lock))); - removed = 1; - trace_hfi1_qpremove(qp, n); - break; - } - } - - spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags); - if (removed) { - synchronize_rcu(); - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); - } -} - -static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) -{ - unsigned n; - - if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) - hfi1_put_ss(&qp->s_rdma_read_sge); - - hfi1_put_ss(&qp->r_sge); - - if (clr_sends) { - while (qp->s_last != qp->s_head) { - struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); - unsigned i; - - for (i = 0; i < wqe->wr.num_sge; i++) { - struct rvt_sge *sge = &wqe->sg_list[i]; - - rvt_put_mr(sge->mr); - } - if (qp->ibqp.qp_type == IB_QPT_UD || - qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI) - atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; - } - if (qp->s_rdma_mr) { - rvt_put_mr(qp->s_rdma_mr); - qp->s_rdma_mr = NULL; - } - } - - if (qp->ibqp.qp_type != IB_QPT_RC) - return; - - for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { - struct rvt_ack_entry *e = &qp->s_ack_queue[n]; - - if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && - e->rdma_sge.mr) { - rvt_put_mr(e->rdma_sge.mr); - e->rdma_sge.mr = NULL; - } - } -} - -/** - * hfi1_error_qp - put a QP into the error state - * @qp: the QP to put into the error state - * @err: the receive completion error to signal if a RWQE is active - * - * Flushes both send and receive work queues. - * Returns true if last WQE event should be generated. - * The QP r_lock and s_lock should be held and interrupts disabled. - * If we are already in error state, just return. - */ -int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) -{ - struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); - struct hfi1_qp_priv *priv = qp->priv; - struct ib_wc wc; - int ret = 0; - - if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) - goto bail; - - qp->state = IB_QPS_ERR; - - if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); - del_timer(&qp->s_timer); - } - - if (qp->s_flags & RVT_S_ANY_WAIT_SEND) - qp->s_flags &= ~RVT_S_ANY_WAIT_SEND; - - write_seqlock(&dev->iowait_lock); - if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { - qp->s_flags &= ~RVT_S_ANY_WAIT_IO; - list_del_init(&priv->s_iowait.list); - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); - } - write_sequnlock(&dev->iowait_lock); - - if (!(qp->s_flags & RVT_S_BUSY)) { - qp->s_hdrwords = 0; - if (qp->s_rdma_mr) { - rvt_put_mr(qp->s_rdma_mr); - qp->s_rdma_mr = NULL; - } - flush_tx_list(qp); - } - - /* Schedule the sending tasklet to drain the send work queue. */ - if (qp->s_last != qp->s_head) - hfi1_schedule_send(qp); - - clear_mr_refs(qp, 0); - - memset(&wc, 0, sizeof(wc)); - wc.qp = &qp->ibqp; - wc.opcode = IB_WC_RECV; - - if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { - wc.wr_id = qp->r_wr_id; - wc.status = err; - rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); - } - wc.status = IB_WC_WR_FLUSH_ERR; - - if (qp->r_rq.wq) { - struct rvt_rwq *wq; - u32 head; - u32 tail; - - spin_lock(&qp->r_rq.lock); - - /* sanity check pointers before trusting them */ - wq = qp->r_rq.wq; - head = wq->head; - if (head >= qp->r_rq.size) - head = 0; - tail = wq->tail; - if (tail >= qp->r_rq.size) - tail = 0; - while (tail != head) { - wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; - if (++tail >= qp->r_rq.size) - tail = 0; - rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); - } - wq->tail = tail; - - spin_unlock(&qp->r_rq.lock); - } else if (qp->ibqp.event_handler) - ret = 1; - -bail: - return ret; -} - static void flush_tx_list(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; @@ -397,314 +175,49 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) return ib_mtu_enum_to_int(mtu); } - -/** - * hfi1_modify_qp - modify the attributes of a queue pair - * @ibqp: the queue pair who's attributes we're modifying - * @attr: the new attributes - * @attr_mask: the mask of attributes to modify - * @udata: user data for libibverbs.so - * - * Returns 0 on success, otherwise returns an errno. - */ -int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata) +int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) { + struct ib_qp *ibqp = &qp->ibqp; struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - struct hfi1_qp_priv *priv = qp->priv; - enum ib_qp_state cur_state, new_state; - struct ib_event ev; - int lastwqe = 0; - int mig = 0; - int ret; - u32 pmtu = 0; /* for gcc warning only */ struct hfi1_devdata *dd = dd_from_dev(dev); - - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - - cur_state = attr_mask & IB_QP_CUR_STATE ? - attr->cur_qp_state : qp->state; - new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask, IB_LINK_LAYER_UNSPECIFIED)) - goto inval; + u8 sc; if (attr_mask & IB_QP_AV) { - u8 sc; - - if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) - goto inval; - if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr)) - goto inval; sc = ah_to_sc(ibqp->device, &attr->ah_attr); if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) - goto inval; + return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { - u8 sc; - - if (attr->alt_ah_attr.dlid >= - be16_to_cpu(IB_MULTICAST_LID_BASE)) - goto inval; - if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) - goto inval; - if (attr->alt_pkey_index >= hfi1_get_npkeys(dd)) - goto inval; sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) - goto inval; - } - - if (attr_mask & IB_QP_PKEY_INDEX) - if (attr->pkey_index >= hfi1_get_npkeys(dd)) - goto inval; - - if (attr_mask & IB_QP_MIN_RNR_TIMER) - if (attr->min_rnr_timer > 31) - goto inval; - - if (attr_mask & IB_QP_PORT) - if (qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI || - attr->port_num == 0 || - attr->port_num > ibqp->device->phys_port_cnt) - goto inval; - - if (attr_mask & IB_QP_DEST_QPN) - if (attr->dest_qp_num > HFI1_QPN_MASK) - goto inval; - - if (attr_mask & IB_QP_RETRY_CNT) - if (attr->retry_cnt > 7) - goto inval; - - if (attr_mask & IB_QP_RNR_RETRY) - if (attr->rnr_retry > 7) - goto inval; - - /* - * Don't allow invalid path_mtu values. OK to set greater - * than the active mtu (or even the max_cap, if we have tuned - * that to a small mtu. We'll set qp->path_mtu - * to the lesser of requested attribute mtu and active, - * for packetizing messages. - * Note that the QP port has to be set in INIT and MTU in RTR. - */ - if (attr_mask & IB_QP_PATH_MTU) { - int mtu, pidx = qp->port_num - 1; - - dd = dd_from_dev(dev); - mtu = verbs_mtu_enum_to_int(ibqp->device, attr->path_mtu); - if (mtu == -1) - goto inval; - - if (mtu > dd->pport[pidx].ibmtu) - pmtu = mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); - else - pmtu = attr->path_mtu; + return -EINVAL; } - if (attr_mask & IB_QP_PATH_MIG_STATE) { - if (attr->path_mig_state == IB_MIG_REARM) { - if (qp->s_mig_state == IB_MIG_ARMED) - goto inval; - if (new_state != IB_QPS_RTS) - goto inval; - } else if (attr->path_mig_state == IB_MIG_MIGRATED) { - if (qp->s_mig_state == IB_MIG_REARM) - goto inval; - if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) - goto inval; - if (qp->s_mig_state == IB_MIG_ARMED) - mig = 1; - } else - goto inval; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - if (attr->max_dest_rd_atomic > HFI1_MAX_RDMA_ATOMIC) - goto inval; - - switch (new_state) { - case IB_QPS_RESET: - if (qp->state != IB_QPS_RESET) { - qp->state = IB_QPS_RESET; - flush_iowait(qp); - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - /* Stop the sending work queue and retry timer */ - cancel_work_sync(&priv->s_iowait.iowork); - del_timer_sync(&qp->s_timer); - iowait_sdma_drain(&priv->s_iowait); - flush_tx_list(qp); - remove_qp(dev, qp); - wait_event(qp->wait, !atomic_read(&qp->refcount)); - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - clear_mr_refs(qp, 1); - clear_ahg(qp); - rvt_reset_qp(&dev->rdi, qp, ibqp->qp_type); - } - break; - - case IB_QPS_RTR: - /* Allow event to re-trigger if QP set to RTR more than once */ - qp->r_flags &= ~RVT_R_COMM_EST; - qp->state = new_state; - break; - - case IB_QPS_SQD: - qp->s_draining = qp->s_last != qp->s_cur; - qp->state = new_state; - break; - - case IB_QPS_SQE: - if (qp->ibqp.qp_type == IB_QPT_RC) - goto inval; - qp->state = new_state; - break; - - case IB_QPS_ERR: - lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); - break; - - default: - qp->state = new_state; - break; - } - - if (attr_mask & IB_QP_PKEY_INDEX) - qp->s_pkey_index = attr->pkey_index; - - if (attr_mask & IB_QP_PORT) - qp->port_num = attr->port_num; - - if (attr_mask & IB_QP_DEST_QPN) - qp->remote_qpn = attr->dest_qp_num; - - if (attr_mask & IB_QP_SQ_PSN) { - qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK; - qp->s_psn = qp->s_next_psn; - qp->s_sending_psn = qp->s_next_psn; - qp->s_last_psn = qp->s_next_psn - 1; - qp->s_sending_hpsn = qp->s_last_psn; - } - - if (attr_mask & IB_QP_RQ_PSN) - qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK; + return 0; +} - if (attr_mask & IB_QP_ACCESS_FLAGS) - qp->qp_access_flags = attr->qp_access_flags; +void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct ib_qp *ibqp = &qp->ibqp; + struct hfi1_qp_priv *priv = qp->priv; if (attr_mask & IB_QP_AV) { - qp->remote_ah_attr = attr->ah_attr; - qp->s_srate = attr->ah_attr.static_rate; - qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); } - if (attr_mask & IB_QP_ALT_PATH) { - qp->alt_ah_attr = attr->alt_ah_attr; - qp->s_alt_pkey_index = attr->alt_pkey_index; - } - - if (attr_mask & IB_QP_PATH_MIG_STATE) { - qp->s_mig_state = attr->path_mig_state; - if (mig) { - qp->remote_ah_attr = qp->alt_ah_attr; - qp->port_num = qp->alt_ah_attr.port_num; - qp->s_pkey_index = qp->s_alt_pkey_index; - qp->s_flags |= RVT_S_AHG_CLEAR; - priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); - priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); - } - } - - if (attr_mask & IB_QP_PATH_MTU) { - struct hfi1_ibport *ibp; - u8 sc, vl; - u32 mtu; - - dd = dd_from_dev(dev); - ibp = &dd->pport[qp->port_num - 1].ibport_data; - - sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; - vl = sc_to_vlt(dd, sc); - - mtu = verbs_mtu_enum_to_int(ibqp->device, pmtu); - if (vl < PER_VL_SEND_CONTEXTS) - mtu = min_t(u32, mtu, dd->vld[vl].mtu); - pmtu = mtu_to_enum(mtu, OPA_MTU_8192); - - qp->path_mtu = pmtu; - qp->pmtu = mtu; - } - - if (attr_mask & IB_QP_RETRY_CNT) { - qp->s_retry_cnt = attr->retry_cnt; - qp->s_retry = attr->retry_cnt; - } - - if (attr_mask & IB_QP_RNR_RETRY) { - qp->s_rnr_retry_cnt = attr->rnr_retry; - qp->s_rnr_retry = attr->rnr_retry; - } - - if (attr_mask & IB_QP_MIN_RNR_TIMER) - qp->r_min_rnr_timer = attr->min_rnr_timer; - - if (attr_mask & IB_QP_TIMEOUT) { - qp->timeout = attr->timeout; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); - } - - if (attr_mask & IB_QP_QKEY) - qp->qkey = attr->qkey; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - qp->r_max_rd_atomic = attr->max_dest_rd_atomic; - - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) - qp->s_max_rd_atomic = attr->max_rd_atomic; - - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) - insert_qp(dev, qp); - - if (lastwqe) { - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_QP_LAST_WQE_REACHED; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } - if (mig) { - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_PATH_MIG; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); + if (attr_mask & IB_QP_PATH_MIG_STATE && + attr->path_mig_state == IB_MIG_MIGRATED && + qp->s_mig_state == IB_MIG_ARMED) { + qp->s_flags |= RVT_S_AHG_CLEAR; + priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); + priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); } - ret = 0; - goto bail; - -inval: - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - ret = -EINVAL; - -bail: - return ret; } int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -846,21 +359,19 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) del_timer_sync(&qp->s_timer); iowait_sdma_drain(&priv->s_iowait); flush_tx_list(qp); - remove_qp(dev, qp); + rvt_remove_qp(ib_to_rvt(ibqp->device), qp); wait_event(qp->wait, !atomic_read(&qp->refcount)); spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_lock); - clear_mr_refs(qp, 1); + rvt_clear_mr_refs(qp, 1); clear_ahg(qp); } spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); /* all user's cleaned up, mark it available */ - free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); - spin_lock(&dev->n_qps_lock); - dev->n_qps_allocated--; - spin_unlock(&dev->n_qps_lock); + rvt_free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); + rvt_dec_qp_cnt(&dev->rdi); if (qp->ip) kref_put(&qp->ip->ref, rvt_release_mmap_info); @@ -1216,6 +727,26 @@ unsigned free_all_qps(struct rvt_dev_info *rdi) return qp_inuse; } +void flush_qp_waiters(struct rvt_qp *qp) +{ + flush_iowait(qp); +} + +void stop_send_queue(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + + cancel_work_sync(&priv->s_iowait.iowork); +} + +void quiesce_qp(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + + iowait_sdma_drain(&priv->s_iowait); + flush_tx_list(qp); +} + void notify_qp_reset(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; @@ -1252,3 +783,75 @@ void hfi1_migrate_qp(struct rvt_qp *qp) ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } + +int mtu_to_path_mtu(u32 mtu) +{ + return mtu_to_enum(mtu, OPA_MTU_8192); +} + +u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) +{ + u32 mtu; + struct hfi1_ibdev *verbs_dev = container_of(rdi, + struct hfi1_ibdev, + rdi); + struct hfi1_devdata *dd = container_of(verbs_dev, + struct hfi1_devdata, + verbs_dev); + struct hfi1_ibport *ibp; + u8 sc, vl; + + ibp = &dd->pport[qp->port_num - 1].ibport_data; + sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; + vl = sc_to_vlt(dd, sc); + + mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); + if (vl < PER_VL_SEND_CONTEXTS) + mtu = min_t(u32, mtu, dd->vld[vl].mtu); + return mtu; +} + +int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, + struct ib_qp_attr *attr) +{ + int mtu, pidx = qp->port_num - 1; + struct hfi1_ibdev *verbs_dev = container_of(rdi, + struct hfi1_ibdev, + rdi); + struct hfi1_devdata *dd = container_of(verbs_dev, + struct hfi1_devdata, + verbs_dev); + mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); + if (mtu == -1) + return -1; /* values less than 0 are error */ + + if (mtu > dd->pport[pidx].ibmtu) + return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); + else + return attr->path_mtu; +} + +void notify_error_qp(struct rvt_qp *qp) +{ + struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); + struct hfi1_qp_priv *priv = qp->priv; + + write_seqlock(&dev->iowait_lock); + if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { + qp->s_flags &= ~RVT_S_ANY_WAIT_IO; + list_del_init(&priv->s_iowait.list); + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); + } + write_sequnlock(&dev->iowait_lock); + + if (!(qp->s_flags & RVT_S_BUSY)) { + qp->s_hdrwords = 0; + if (qp->s_rdma_mr) { + rvt_put_mr(qp->s_rdma_mr); + qp->s_rdma_mr = NULL; + } + flush_tx_list(qp); + } +} + diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index b825cb347ee1..d6bfb987b830 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -57,38 +57,6 @@ extern unsigned int hfi1_qp_table_size; -static inline u32 qpn_hash(struct rvt_qp_ibdev *dev, u32 qpn) -{ - return hash_32(qpn, dev->qp_table_bits); -} - -/** - * hfi1_lookup_qpn - return the QP with the given QPN - * @ibp: the ibport - * @qpn: the QP number to look up - * - * The caller must hold the rcu_read_lock(), and keep the lock until - * the returned qp is no longer in use. - */ -static inline struct rvt_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp, - u32 qpn) __must_hold(RCU) -{ - struct rvt_qp *qp = NULL; - - if (unlikely(qpn <= 1)) { - qp = rcu_dereference(ibp->rvp.qp[qpn]); - } else { - struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; - u32 n = qpn_hash(dev->rdi.qp_dev, qpn); - - for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; - qp = rcu_dereference(qp->next)) - if (qp->ibqp.qp_num == qpn) - break; - } - return qp; -} - /* * free_ahg - clear ahg from QP */ @@ -103,30 +71,6 @@ static inline void clear_ahg(struct rvt_qp *qp) qp->s_ahgidx = -1; } -/** - * hfi1_error_qp - put a QP into the error state - * @qp: the QP to put into the error state - * @err: the receive completion error to signal if a RWQE is active - * - * Flushes both send and receive work queues. - * Returns true if last WQE event should be generated. - * The QP r_lock and s_lock should be held and interrupts disabled. - * If we are already in error state, just return. - */ -int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err); - -/** - * hfi1_modify_qp - modify the attributes of a queue pair - * @ibqp: the queue pair who's attributes we're modifying - * @attr: the new attributes - * @attr_mask: the mask of attributes to modify - * @udata: user data for libibverbs.so - * - * Returns 0 on success, otherwise returns an errno. - */ -int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata); - int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr); @@ -253,5 +197,12 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); unsigned free_all_qps(struct rvt_dev_info *rdi); void notify_qp_reset(struct rvt_qp *qp); - +int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, + struct ib_qp_attr *attr); +void flush_qp_waiters(struct rvt_qp *qp); +void notify_error_qp(struct rvt_qp *qp); +void stop_send_queue(struct rvt_qp *qp); +void quiesce_qp(struct rvt_qp *qp); +u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu); +int mtu_to_path_mtu(u32 mtu); #endif /* _QP_H */ diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index a30bf300f5cb..50559fd14a70 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -49,6 +49,8 @@ */ #include +#include +#include #include "hfi.h" #include "qp.h" @@ -891,7 +893,7 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) qp->s_retry = qp->s_retry_cnt; } else if (qp->s_last == qp->s_acked) { hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); - hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); return; } else /* need to handle delayed completion */ return; @@ -1355,7 +1357,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, class_b: if (qp->s_last == qp->s_acked) { hfi1_send_complete(qp, wqe, status); - hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } break; @@ -1601,7 +1603,7 @@ ack_len_err: ack_err: if (qp->s_last == qp->s_acked) { hfi1_send_complete(qp, wqe, status); - hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1832,7 +1834,7 @@ void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err) int lastwqe; spin_lock_irqsave(&qp->s_lock, flags); - lastwqe = hfi1_error_qp(qp, err); + lastwqe = rvt_error_qp(qp, err); spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { @@ -1873,8 +1875,8 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, cc_event = &ppd->cc_events[ppd->cc_log_idx++]; if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS) ppd->cc_log_idx = 0; - cc_event->lqpn = lqpn & HFI1_QPN_MASK; - cc_event->rqpn = rqpn & HFI1_QPN_MASK; + cc_event->lqpn = lqpn & RVT_QPN_MASK; + cc_event->rqpn = rqpn & RVT_QPN_MASK; cc_event->sl = sl; cc_event->svc_type = svc_type; cc_event->rlid = rlid; @@ -2063,7 +2065,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) /* OK, process the packet. */ switch (opcode) { case OP(SEND_FIRST): - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) goto nack_op_err; if (!ret) @@ -2084,7 +2086,7 @@ send_middle: case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): /* consume RWQE */ - ret = hfi1_get_rwqe(qp, 1); + ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; if (!ret) @@ -2093,7 +2095,7 @@ send_middle: case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) goto nack_op_err; if (!ret) @@ -2125,7 +2127,7 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; hfi1_copy_sge(&qp->r_sge, data, tlen, 1); - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); qp->r_msn++; if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) break; @@ -2193,7 +2195,7 @@ send_last: goto send_middle; else if (opcode == OP(RDMA_WRITE_ONLY)) goto no_immediate_data; - ret = hfi1_get_rwqe(qp, 1); + ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; if (!ret) diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index b47e462c26b3..6379df53fa72 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -145,7 +145,7 @@ bail: } /** - * hfi1_get_rwqe - copy the next RWQE into the QP's RWQE + * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * @@ -154,7 +154,7 @@ bail: * * Can be called from interrupt level. */ -int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) +int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only) { unsigned long flags; struct rvt_rq *rq; @@ -192,7 +192,7 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) } /* Make sure entry is read after head index is read. */ smp_rmb(); - wqe = get_rwqe_ptr(rq, tail); + wqe = rvt_get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a @@ -377,7 +377,8 @@ static void ruc_loopback(struct rvt_qp *sqp) * Note that we check the responder QP state after * checking the requester's state. */ - qp = hfi1_lookup_qpn(ibp, sqp->remote_qpn); + qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, + sqp->remote_qpn); spin_lock_irqsave(&sqp->s_lock, flags); @@ -441,7 +442,7 @@ again: wc.ex.imm_data = wqe->wr.ex.imm_data; /* FALLTHROUGH */ case IB_WR_SEND: - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) goto op_err; if (!ret) @@ -453,7 +454,7 @@ again: goto inv_err; wc.wc_flags = IB_WC_WITH_IMM; wc.ex.imm_data = wqe->wr.ex.imm_data; - ret = hfi1_get_rwqe(qp, 1); + ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto op_err; if (!ret) @@ -548,7 +549,7 @@ again: sqp->s_len -= len; } if (release) - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto send_comp; @@ -623,7 +624,7 @@ serr: spin_lock_irqsave(&sqp->s_lock, flags); hfi1_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c index c53b378497e1..f71dff05dec4 100644 --- a/drivers/staging/rdma/hfi1/srq.c +++ b/drivers/staging/rdma/hfi1/srq.c @@ -93,7 +93,7 @@ int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, goto bail; } - wqe = get_rwqe_ptr(&srq->rq, wq->head); + wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) @@ -299,7 +299,7 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, struct rvt_rwqe *wqe; int i; - wqe = get_rwqe_ptr(&srq->rq, tail); + wqe = rvt_get_rwqe_ptr(&srq->rq, tail); p->wr_id = wqe->wr_id; p->num_sge = wqe->num_sge; for (i = 0; i < wqe->num_sge; i++) diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c index 10122e84cb2f..9eadec5be3b0 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/staging/rdma/hfi1/trace.c @@ -166,7 +166,7 @@ const char *parse_everbs_hdrs( case OP(UD, SEND_ONLY_WITH_IMMEDIATE): trace_seq_printf(p, DETH_PRN, be32_to_cpu(eh->ud.deth[0]), - be32_to_cpu(eh->ud.deth[1]) & HFI1_QPN_MASK); + be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); break; } trace_seq_putc(p, 0); diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index 14601d788c19..fcae96e5b784 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -363,37 +363,6 @@ DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep, TP_PROTO(struct rvt_qp *qp, u32 flags), TP_ARGS(qp, flags)); -#undef TRACE_SYSTEM -#define TRACE_SYSTEM hfi1_qphash -DECLARE_EVENT_CLASS(hfi1_qphash_template, - TP_PROTO(struct rvt_qp *qp, u32 bucket), - TP_ARGS(qp, bucket), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) - __field(u32, qpn) - __field(u32, bucket) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) - __entry->qpn = qp->ibqp.qp_num; - __entry->bucket = bucket; - ), - TP_printk( - "[%s] qpn 0x%x bucket %u", - __get_str(dev), - __entry->qpn, - __entry->bucket - ) -); - -DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert, - TP_PROTO(struct rvt_qp *qp, u32 bucket), - TP_ARGS(qp, bucket)); - -DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove, - TP_PROTO(struct rvt_qp *qp, u32 bucket), - TP_ARGS(qp, bucket)); - #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_ibhdrs @@ -538,7 +507,7 @@ DECLARE_EVENT_CLASS(hfi1_ibhdr_template, (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) & HFI1_BECN_MASK; __entry->qpn = - be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; + be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; __entry->a = (be32_to_cpu(ohdr->bth[2]) >> 31) & 1; /* allow for larger PSN */ @@ -627,7 +596,7 @@ TRACE_EVENT(snoop_capture, DD_DEV_ASSIGN(dd); __entry->slid = be16_to_cpu(hdr->lrh[3]); __entry->dlid = be16_to_cpu(hdr->lrh[1]); - __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; + __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index ec404ff9e9a6..1e50d303c024 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -292,7 +292,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) u16 rlid = be16_to_cpu(hdr->lrh[3]); u8 sl, sc5; - lqpn = bth1 & HFI1_QPN_MASK; + lqpn = bth1 & RVT_QPN_MASK; rqpn = qp->remote_qpn; sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl]; @@ -335,7 +335,7 @@ inv: set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; } else - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): @@ -394,7 +394,7 @@ send_first: if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) qp->r_sge = qp->s_rdma_read_sge; else { - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) goto op_err; if (!ret) @@ -444,7 +444,7 @@ send_last: goto rewind; wc.opcode = IB_WC_RECV; hfi1_copy_sge(&qp->r_sge, data, tlen, 0); - hfi1_put_ss(&qp->s_rdma_read_sge); + rvt_put_ss(&qp->s_rdma_read_sge); last_imm: wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -537,9 +537,9 @@ rdma_last_imm: if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) - hfi1_put_ss(&qp->s_rdma_read_sge); + rvt_put_ss(&qp->s_rdma_read_sge); else { - ret = hfi1_get_rwqe(qp, 1); + ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto op_err; if (!ret) @@ -548,7 +548,7 @@ rdma_last_imm: wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; hfi1_copy_sge(&qp->r_sge, data, tlen, 1); - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); goto last_imm; case OP(RDMA_WRITE_LAST): @@ -564,7 +564,7 @@ rdma_last: if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; hfi1_copy_sge(&qp->r_sge, data, tlen, 1); - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); break; default: diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index e2cbdc86d1a3..2eae16769688 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -80,7 +80,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) rcu_read_lock(); - qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); + qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp, + swqe->ud_wr.remote_qpn); if (!qp) { ibp->rvp.n_pkt_drops++; rcu_read_unlock(); @@ -166,7 +167,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) else { int ret; - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) { hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); goto bail_unlock; @@ -222,7 +223,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) } length -= len; } - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto bail_unlock; wc.wr_id = qp->r_wr_id; @@ -664,7 +665,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) struct ib_grh *grh = NULL; qkey = be32_to_cpu(ohdr->u.ud.deth[0]); - src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK; + src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; dlid = be16_to_cpu(hdr->lrh[1]); is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); @@ -675,7 +676,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) * error path. */ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; + u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; u8 sl, sc5; sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; @@ -817,7 +818,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) else { int ret; - ret = hfi1_get_rwqe(qp, 0); + ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) { hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; @@ -840,7 +841,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } else hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); - hfi1_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) return; wc.wr_id = qp->r_wr_id; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 2fed28487c89..e51f8270553d 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -368,7 +368,7 @@ static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto bail; } - wqe = get_rwqe_ptr(&qp->r_rq, wq->head); + wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) @@ -418,6 +418,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) u32 tlen = packet->tlen; struct hfi1_pportdata *ppd = rcd->ppd; struct hfi1_ibport *ibp = &ppd->ibport_data; + struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi; unsigned long flags; u32 qp_num; int lnh; @@ -447,7 +448,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) inc_opstats(tlen, &rcd->opstats->stats[opcode]); /* Get the destination QP number. */ - qp_num = be32_to_cpu(packet->ohdr->bth[1]) & HFI1_QPN_MASK; + qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK; lid = be16_to_cpu(hdr->lrh[1]); if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) { @@ -474,7 +475,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) wake_up(&mcast->wait); } else { rcu_read_lock(); - packet->qp = hfi1_lookup_qpn(ibp, qp_num); + packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); if (!packet->qp) { rcu_read_unlock(); goto drop; @@ -1534,7 +1535,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); init_timer(&dev->mem_timer); dev->mem_timer.function = mem_timer; @@ -1623,7 +1623,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->query_srq = hfi1_query_srq; ibdev->destroy_srq = hfi1_destroy_srq; ibdev->create_qp = NULL; - ibdev->modify_qp = hfi1_modify_qp; + ibdev->modify_qp = NULL; ibdev->query_qp = hfi1_query_qp; ibdev->destroy_qp = hfi1_destroy_qp; ibdev->post_send = NULL; @@ -1674,12 +1674,26 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16; dd->verbs_dev.rdi.dparms.qpn_res_end = dd->verbs_dev.rdi.dparms.qpn_res_start + 65535; + dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC; + dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; + dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; + dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; + dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; + dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; + dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; + dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue; + dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp; + dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; + dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp; + dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; + dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; + dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; /* completeion queue */ snprintf(dd->verbs_dev.rdi.dparms.cq_name, diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 8e82cf0fe3fd..f2c8a212104c 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -225,18 +225,6 @@ struct hfi1_pkt_state { #define HFI1_PSN_CREDIT 16 -/* - * Since struct rvt_rwqe is not a fixed size, we can't simply index into - * struct rvt_rwq.wq. This function does the array index computation. - */ -static inline struct rvt_rwqe *get_rwqe_ptr(struct rvt_rq *rq, unsigned n) -{ - return (struct rvt_rwqe *) - ((char *) rq->wq->wq + - (sizeof(struct rvt_rwqe) + - rq->max_sge * sizeof(struct ib_sge)) * n); -} - struct hfi1_opcode_stats { u64 n_packets; /* number of packets */ u64 n_bytes; /* total number of bytes */ @@ -286,8 +274,6 @@ struct hfi1_ibdev { u64 n_kmem_wait; u64 n_send_schedule; - u32 n_qps_allocated; /* number of QPs allocated for device */ - spinlock_t n_qps_lock; u32 n_srqs_allocated; /* number of SRQs allocated for device */ spinlock_t n_srqs_lock; #ifdef CONFIG_DEBUG_FS @@ -464,19 +450,16 @@ int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int hfi1_destroy_srq(struct ib_srq *ibsrq); -static inline void hfi1_put_ss(struct rvt_sge_state *ss) -{ - while (ss->num_sge) { - rvt_put_mr(ss->sge.mr); - if (--ss->num_sge) - ss->sge = *ss->sg_list++; - } -} - -int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only); +int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only); void hfi1_migrate_qp(struct rvt_qp *qp); +int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); + +void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); + int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0); -- cgit v1.2.3-59-g8ed1b From 75261cc6ab663e0d44f6f5a02a46d3e197cbe639 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:50 -0800 Subject: staging/rdma/hfi1: Remove destroy qp verb This removes the destroy qp verbs in favor of using rdmavt. Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 53 --------------------------------------- drivers/staging/rdma/hfi1/qp.h | 11 -------- drivers/staging/rdma/hfi1/verbs.c | 2 +- 3 files changed, 1 insertion(+), 65 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 1e6ca4fb7925..c9f246740b25 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -331,59 +331,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) return cpu_to_be32(aeth); } -/** - * hfi1_destroy_qp - destroy a queue pair - * @ibqp: the queue pair to destroy - * - * Returns 0 on success. - * - * Note that this can be called while the QP is actively sending or - * receiving! - */ -int hfi1_destroy_qp(struct ib_qp *ibqp) -{ - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - struct hfi1_ibdev *dev = to_idev(ibqp->device); - struct hfi1_qp_priv *priv = qp->priv; - - /* Make sure HW and driver activity is stopped. */ - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - if (qp->state != IB_QPS_RESET) { - qp->state = IB_QPS_RESET; - flush_iowait(qp); - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - cancel_work_sync(&priv->s_iowait.iowork); - del_timer_sync(&qp->s_timer); - iowait_sdma_drain(&priv->s_iowait); - flush_tx_list(qp); - rvt_remove_qp(ib_to_rvt(ibqp->device), qp); - wait_event(qp->wait, !atomic_read(&qp->refcount)); - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - rvt_clear_mr_refs(qp, 1); - clear_ahg(qp); - } - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - - /* all user's cleaned up, mark it available */ - rvt_free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num); - rvt_dec_qp_cnt(&dev->rdi); - - if (qp->ip) - kref_put(&qp->ip->ref, rvt_release_mmap_info); - else - vfree(qp->r_rq.wq); - vfree(qp->s_wq); - kfree(priv->s_hdr); - kfree(priv); - kfree(qp); - return 0; -} - /** * hfi1_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index d6bfb987b830..21af3adbf3e3 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -95,17 +95,6 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp); struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata); -/** - * hfi1_destroy_qp - destroy a queue pair - * @ibqp: the queue pair to destroy - * - * Returns 0 on success. - * - * Note that this can be called while the QP is actively sending or - * receiving! - */ -int hfi1_destroy_qp(struct ib_qp *ibqp); - /** * hfi1_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index e51f8270553d..1ed1f20e1ab3 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1625,7 +1625,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->create_qp = NULL; ibdev->modify_qp = NULL; ibdev->query_qp = hfi1_query_qp; - ibdev->destroy_qp = hfi1_destroy_qp; + ibdev->destroy_qp = NULL; ibdev->post_send = NULL; ibdev->post_recv = post_receive; ibdev->post_srq_recv = hfi1_post_srq_receive; -- cgit v1.2.3-59-g8ed1b From 1897ce219143cae13a87e0544b3b467ad3932964 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:43:55 -0800 Subject: staging/rdma/hfi1: Remove post_recv and use rdmavt version This patch removes the simple post recv function in favor of using rdmavt. The packet receive processing still lives in the driver though. Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 63 +-------------------------------------- 1 file changed, 1 insertion(+), 62 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 1ed1f20e1ab3..b72eb7b9c8fd 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -323,67 +323,6 @@ void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release) } } -/** - * post_receive - post a receive on a QP - * @ibqp: the QP to post the receive on - * @wr: the WR to post - * @bad_wr: the first bad WR is put here - * - * This may be called from interrupt context. - */ -static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) -{ - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - struct rvt_rwq *wq = qp->r_rq.wq; - unsigned long flags; - int ret; - - /* Check that state is OK to post receive. */ - if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - for (; wr; wr = wr->next) { - struct rvt_rwqe *wqe; - u32 next; - int i; - - if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&qp->r_rq.lock, flags); - next = wq->head + 1; - if (next >= qp->r_rq.size) - next = 0; - if (next == wq->tail) { - spin_unlock_irqrestore(&qp->r_rq.lock, flags); - *bad_wr = wr; - ret = -ENOMEM; - goto bail; - } - - wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); - wqe->wr_id = wr->wr_id; - wqe->num_sge = wr->num_sge; - for (i = 0; i < wr->num_sge; i++) - wqe->sg_list[i] = wr->sg_list[i]; - /* Make sure queue entry is written before the head index. */ - smp_wmb(); - wq->head = next; - spin_unlock_irqrestore(&qp->r_rq.lock, flags); - } - ret = 0; - -bail: - return ret; -} - /* * Make sure the QP is ready and able to accept the given opcode. */ @@ -1627,7 +1566,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->query_qp = hfi1_query_qp; ibdev->destroy_qp = NULL; ibdev->post_send = NULL; - ibdev->post_recv = post_receive; + ibdev->post_recv = NULL; ibdev->post_srq_recv = hfi1_post_srq_receive; ibdev->create_cq = NULL; ibdev->destroy_cq = NULL; -- cgit v1.2.3-59-g8ed1b From 4331629f57c4def899e560a7e3cb87fda577fb4b Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:44:01 -0800 Subject: staging/rdma/hfi1: Clean up register device Now that rdmavt has solidified in its design we can clean up the driver specific register device functions. This handles hfi1. Reviewed-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 69 ++------------------------------------- 1 file changed, 2 insertions(+), 67 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index b72eb7b9c8fd..67999150921c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1507,86 +1507,21 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz); ibdev->owner = THIS_MODULE; ibdev->node_guid = cpu_to_be64(ppd->guid); - ibdev->uverbs_abi_ver = HFI1_UVERBS_ABI_VERSION; - ibdev->uverbs_cmd_mask = - (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | - (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | - (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | - (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | - (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | - (1ull << IB_USER_VERBS_CMD_CREATE_AH) | - (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | - (1ull << IB_USER_VERBS_CMD_QUERY_AH) | - (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | - (1ull << IB_USER_VERBS_CMD_REG_MR) | - (1ull << IB_USER_VERBS_CMD_DEREG_MR) | - (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | - (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | - (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | - (1ull << IB_USER_VERBS_CMD_POLL_CQ) | - (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | - (1ull << IB_USER_VERBS_CMD_CREATE_QP) | - (1ull << IB_USER_VERBS_CMD_QUERY_QP) | - (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | - (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | - (1ull << IB_USER_VERBS_CMD_POST_SEND) | - (1ull << IB_USER_VERBS_CMD_POST_RECV) | - (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | - (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | - (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | - (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); - ibdev->node_type = RDMA_NODE_IB_CA; ibdev->phys_port_cnt = dd->num_pports; - ibdev->num_comp_vectors = 1; ibdev->dma_device = &dd->pcidev->dev; - ibdev->query_device = NULL; ibdev->modify_device = modify_device; ibdev->query_port = query_port; ibdev->modify_port = modify_port; - ibdev->query_pkey = NULL; ibdev->query_gid = query_gid; - ibdev->alloc_ucontext = NULL; - ibdev->dealloc_ucontext = NULL; - ibdev->alloc_pd = NULL; - ibdev->dealloc_pd = NULL; - ibdev->create_ah = NULL; - ibdev->destroy_ah = NULL; - ibdev->modify_ah = NULL; - ibdev->query_ah = NULL; ibdev->create_srq = hfi1_create_srq; ibdev->modify_srq = hfi1_modify_srq; ibdev->query_srq = hfi1_query_srq; ibdev->destroy_srq = hfi1_destroy_srq; - ibdev->create_qp = NULL; - ibdev->modify_qp = NULL; ibdev->query_qp = hfi1_query_qp; - ibdev->destroy_qp = NULL; - ibdev->post_send = NULL; - ibdev->post_recv = NULL; ibdev->post_srq_recv = hfi1_post_srq_receive; - ibdev->create_cq = NULL; - ibdev->destroy_cq = NULL; - ibdev->resize_cq = NULL; - ibdev->poll_cq = NULL; - ibdev->req_notify_cq = NULL; - ibdev->get_dma_mr = NULL; - ibdev->reg_user_mr = NULL; - ibdev->dereg_mr = NULL; - ibdev->alloc_mr = NULL; - ibdev->map_mr_sg = NULL; - ibdev->alloc_fmr = NULL; - ibdev->map_phys_fmr = NULL; - ibdev->unmap_fmr = NULL; - ibdev->dealloc_fmr = NULL; - ibdev->attach_mcast = NULL; - ibdev->detach_mcast = NULL; + + /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; - ibdev->mmap = NULL; - ibdev->dma_ops = NULL; ibdev->get_port_immutable = port_immutable; strncpy(ibdev->node_desc, init_utsname()->nodename, -- cgit v1.2.3-59-g8ed1b From 7af6d00654a16ca805f50e05eebb545ef9dbb016 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:44:06 -0800 Subject: staging/rdma/hfi1: Use rdmavt device allocation function No longer do drivers need to call into the IB core to allocate the verbs device. Use the functionality provided by rdmavt. Reviewed-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index d1cb2c854f9e..7def3f33ac87 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -998,13 +998,16 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) { unsigned long flags; struct hfi1_devdata *dd; - int ret; + int ret, nports; + + /* extra is * number of ports */ + nports = extra / sizeof(struct hfi1_pportdata); - dd = (struct hfi1_devdata *)ib_alloc_device(sizeof(*dd) + extra); + dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra, + nports); if (!dd) return ERR_PTR(-ENOMEM); - /* extra is * number of ports */ - dd->num_pports = extra / sizeof(struct hfi1_pportdata); + dd->num_pports = nports; dd->pport = (struct hfi1_pportdata *)(dd + 1); INIT_LIST_HEAD(&dd->list); -- cgit v1.2.3-59-g8ed1b From 9c4a311e6ca03db4e16c4c06bb37a1189ba0bc03 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Tue, 19 Jan 2016 14:44:11 -0800 Subject: staging/rdma/hfi1: Remove create and free mad agents Get rid of create and free mad agent from the driver and use rdmavt version. Reviewed-by: Ira Weiny Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 76 ++++----------------------------------- drivers/staging/rdma/hfi1/verbs.c | 14 ++------ drivers/staging/rdma/hfi1/verbs.h | 4 --- 3 files changed, 8 insertions(+), 86 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 6daf2770cc56..9cadf77427a2 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -129,7 +129,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) memcpy(smp->route.lid.data, data, len); spin_lock_irqsave(&ibp->rvp.lock, flags); - if (!ibp->sm_ah) { + if (!ibp->rvp.sm_ah) { if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; @@ -138,13 +138,13 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) ret = PTR_ERR(ah); else { send_buf->ah = ah; - ibp->sm_ah = ibah_to_rvtah(ah); + ibp->rvp.sm_ah = ibah_to_rvtah(ah); ret = 0; } } else ret = -EINVAL; } else { - send_buf->ah = &ibp->sm_ah->ibah; + send_buf->ah = &ibp->rvp.sm_ah->ibah; ret = 0; } spin_unlock_irqrestore(&ibp->rvp.lock, flags); @@ -1138,11 +1138,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid); spin_lock_irqsave(&ibp->rvp.lock, flags); - if (ibp->sm_ah) { + if (ibp->rvp.sm_ah) { if (smlid != ibp->rvp.sm_lid) - ibp->sm_ah->attr.dlid = smlid; + ibp->rvp.sm_ah->attr.dlid = smlid; if (msl != ibp->rvp.sm_sl) - ibp->sm_ah->attr.sl = msl; + ibp->rvp.sm_ah->attr.sl = msl; } spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (smlid != ibp->rvp.sm_lid) @@ -4157,67 +4157,3 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, return IB_MAD_RESULT_FAILURE; } - -static void send_handler(struct ib_mad_agent *agent, - struct ib_mad_send_wc *mad_send_wc) -{ - ib_free_send_mad(mad_send_wc->send_buf); -} - -int hfi1_create_agents(struct hfi1_ibdev *dev) -{ - struct hfi1_devdata *dd = dd_from_dev(dev); - struct ib_mad_agent *agent; - struct hfi1_ibport *ibp; - int p; - int ret; - - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - agent = ib_register_mad_agent(&dev->rdi.ibdev, p + 1, - IB_QPT_SMI, - NULL, 0, send_handler, - NULL, NULL, 0); - if (IS_ERR(agent)) { - ret = PTR_ERR(agent); - goto err; - } - - ibp->rvp.send_agent = agent; - } - - return 0; - -err: - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - if (ibp->rvp.send_agent) { - agent = ibp->rvp.send_agent; - ibp->rvp.send_agent = NULL; - ib_unregister_mad_agent(agent); - } - } - - return ret; -} - -void hfi1_free_agents(struct hfi1_ibdev *dev) -{ - struct hfi1_devdata *dd = dd_from_dev(dev); - struct ib_mad_agent *agent; - struct hfi1_ibport *ibp; - int p; - - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - if (ibp->rvp.send_agent) { - agent = ibp->rvp.send_agent; - ibp->rvp.send_agent = NULL; - ib_unregister_mad_agent(agent); - } - if (ibp->sm_ah) { - ib_destroy_ah(&ibp->sm_ah->ibah); - ibp->sm_ah = NULL; - } - } -} diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 67999150921c..68f4045dfa99 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1590,27 +1590,19 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) - goto err_reg; - - ret = hfi1_create_agents(dev); - if (ret) - goto err_agents; + goto err_verbs_txreq; ret = hfi1_verbs_register_sysfs(dd); if (ret) goto err_class; - goto bail; + return ret; err_class: - hfi1_free_agents(dev); -err_agents: rvt_unregister_device(&dd->verbs_dev.rdi); -err_reg: err_verbs_txreq: kmem_cache_destroy(dev->verbs_txreq_cache); dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); -bail: return ret; } @@ -1620,8 +1612,6 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) hfi1_verbs_unregister_sysfs(dd); - hfi1_free_agents(dev); - rvt_unregister_device(&dd->verbs_dev.rdi); if (!list_empty(&dev->txwait)) diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index f2c8a212104c..c845514d3abf 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -247,8 +247,6 @@ static inline void inc_opstats( struct hfi1_ibport { struct rvt_qp __rcu *qp[2]; struct rvt_ibport rvp; - struct rvt_ah *sm_ah; - struct rvt_ah *smi_ah; __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */ @@ -340,8 +338,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_mad_hdr *in_mad, size_t in_mad_size, struct ib_mad_hdr *out_mad, size_t *out_mad_size, u16 *out_mad_pkey_index); -int hfi1_create_agents(struct hfi1_ibdev *dev); -void hfi1_free_agents(struct hfi1_ibdev *dev); /* * The PSN_MASK and PSN_SHIFT allow for -- cgit v1.2.3-59-g8ed1b From 07336db4003fd911681e37b0523529fbd04fa604 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Tue, 19 Jan 2016 14:44:17 -0800 Subject: staging/rdma/hfi1: Remove hfi1_query_qp function Rely on rvt_query_qp function defined in rdmavt Reviewed-by: Dennis Dalessandro Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 50 --------------------------------------- drivers/staging/rdma/hfi1/qp.h | 3 --- drivers/staging/rdma/hfi1/verbs.c | 1 - 3 files changed, 54 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index c9f246740b25..52723c2bad37 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -220,56 +220,6 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, } } -int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_qp_init_attr *init_attr) -{ - struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); - - attr->qp_state = qp->state; - attr->cur_qp_state = attr->qp_state; - attr->path_mtu = qp->path_mtu; - attr->path_mig_state = qp->s_mig_state; - attr->qkey = qp->qkey; - attr->rq_psn = mask_psn(qp->r_psn); - attr->sq_psn = mask_psn(qp->s_next_psn); - attr->dest_qp_num = qp->remote_qpn; - attr->qp_access_flags = qp->qp_access_flags; - attr->cap.max_send_wr = qp->s_size - 1; - attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; - attr->cap.max_send_sge = qp->s_max_sge; - attr->cap.max_recv_sge = qp->r_rq.max_sge; - attr->cap.max_inline_data = 0; - attr->ah_attr = qp->remote_ah_attr; - attr->alt_ah_attr = qp->alt_ah_attr; - attr->pkey_index = qp->s_pkey_index; - attr->alt_pkey_index = qp->s_alt_pkey_index; - attr->en_sqd_async_notify = 0; - attr->sq_draining = qp->s_draining; - attr->max_rd_atomic = qp->s_max_rd_atomic; - attr->max_dest_rd_atomic = qp->r_max_rd_atomic; - attr->min_rnr_timer = qp->r_min_rnr_timer; - attr->port_num = qp->port_num; - attr->timeout = qp->timeout; - attr->retry_cnt = qp->s_retry_cnt; - attr->rnr_retry = qp->s_rnr_retry_cnt; - attr->alt_port_num = qp->alt_ah_attr.port_num; - attr->alt_timeout = qp->alt_timeout; - - init_attr->event_handler = qp->ibqp.event_handler; - init_attr->qp_context = qp->ibqp.qp_context; - init_attr->send_cq = qp->ibqp.send_cq; - init_attr->recv_cq = qp->ibqp.recv_cq; - init_attr->srq = qp->ibqp.srq; - init_attr->cap = attr->cap; - if (qp->s_flags & RVT_S_SIGNAL_REQ_WR) - init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; - else - init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr->qp_type = qp->ibqp.qp_type; - init_attr->port_num = qp->port_num; - return 0; -} - /** * hfi1_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 21af3adbf3e3..36be54771205 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -71,9 +71,6 @@ static inline void clear_ahg(struct rvt_qp *qp) qp->s_ahgidx = -1; } -int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_qp_init_attr *init_attr); - /** * hfi1_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 68f4045dfa99..f5cc0b996966 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1517,7 +1517,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_srq = hfi1_modify_srq; ibdev->query_srq = hfi1_query_srq; ibdev->destroy_srq = hfi1_destroy_srq; - ibdev->query_qp = hfi1_query_qp; ibdev->post_srq_recv = hfi1_post_srq_receive; /* keep process mad in the driver */ -- cgit v1.2.3-59-g8ed1b From 9cd70e1bbf9393633904b1cb71925c40e1839d68 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Wed, 3 Feb 2016 14:30:40 -0800 Subject: staging/rdma/hfi1: Remove srq functionality srq functionality is now in rdmavt. Remove it from the hfi1 driver. Reviewed-by: Dennis Dalessandro Reviewed-by: Harish Chegondi Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/srq.c | 397 ------------------------------------- drivers/staging/rdma/hfi1/verbs.c | 6 - drivers/staging/rdma/hfi1/verbs.h | 17 -- 4 files changed, 1 insertion(+), 421 deletions(-) delete mode 100644 drivers/staging/rdma/hfi1/srq.c (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 0069796add25..ca2dea54e37c 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ init.o intr.o mad.o pcie.o pio.o pio_copy.o \ - qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \ + qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/staging/rdma/hfi1/srq.c deleted file mode 100644 index f71dff05dec4..000000000000 --- a/drivers/staging/rdma/hfi1/srq.c +++ /dev/null @@ -1,397 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include -#include -#include - -#include "verbs.h" - -/** - * hfi1_post_srq_receive - post a receive on a shared receive queue - * @ibsrq: the SRQ to post the receive on - * @wr: the list of work requests to post - * @bad_wr: A pointer to the first WR to cause a problem is put here - * - * This may be called from interrupt context. - */ -int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) -{ - struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); - struct rvt_rwq *wq; - unsigned long flags; - int ret; - - for (; wr; wr = wr->next) { - struct rvt_rwqe *wqe; - u32 next; - int i; - - if ((unsigned) wr->num_sge > srq->rq.max_sge) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&srq->rq.lock, flags); - wq = srq->rq.wq; - next = wq->head + 1; - if (next >= srq->rq.size) - next = 0; - if (next == wq->tail) { - spin_unlock_irqrestore(&srq->rq.lock, flags); - *bad_wr = wr; - ret = -ENOMEM; - goto bail; - } - - wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); - wqe->wr_id = wr->wr_id; - wqe->num_sge = wr->num_sge; - for (i = 0; i < wr->num_sge; i++) - wqe->sg_list[i] = wr->sg_list[i]; - /* Make sure queue entry is written before the head index. */ - smp_wmb(); - wq->head = next; - spin_unlock_irqrestore(&srq->rq.lock, flags); - } - ret = 0; - -bail: - return ret; -} - -/** - * hfi1_create_srq - create a shared receive queue - * @ibpd: the protection domain of the SRQ to create - * @srq_init_attr: the attributes of the SRQ - * @udata: data from libibverbs when creating a user SRQ - */ -struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_udata *udata) -{ - struct hfi1_ibdev *dev = to_idev(ibpd->device); - struct rvt_srq *srq; - u32 sz; - struct ib_srq *ret; - - if (srq_init_attr->srq_type != IB_SRQT_BASIC) { - ret = ERR_PTR(-ENOSYS); - goto done; - } - - if (srq_init_attr->attr.max_sge == 0 || - srq_init_attr->attr.max_sge > hfi1_max_srq_sges || - srq_init_attr->attr.max_wr == 0 || - srq_init_attr->attr.max_wr > hfi1_max_srq_wrs) { - ret = ERR_PTR(-EINVAL); - goto done; - } - - srq = kmalloc(sizeof(*srq), GFP_KERNEL); - if (!srq) { - ret = ERR_PTR(-ENOMEM); - goto done; - } - - /* - * Need to use vmalloc() if we want to support large #s of entries. - */ - srq->rq.size = srq_init_attr->attr.max_wr + 1; - srq->rq.max_sge = srq_init_attr->attr.max_sge; - sz = sizeof(struct ib_sge) * srq->rq.max_sge + - sizeof(struct rvt_rwqe); - srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz); - if (!srq->rq.wq) { - ret = ERR_PTR(-ENOMEM); - goto bail_srq; - } - - /* - * Return the address of the RWQ as the offset to mmap. - * See hfi1_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - int err; - u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; - - srq->ip = - rvt_create_mmap_info(&dev->rdi, s, ibpd->uobject->context, - srq->rq.wq); - if (!srq->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wq; - } - - err = ib_copy_to_udata(udata, &srq->ip->offset, - sizeof(srq->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else - srq->ip = NULL; - - /* - * ib_create_srq() will initialize srq->ibsrq. - */ - spin_lock_init(&srq->rq.lock); - srq->rq.wq->head = 0; - srq->rq.wq->tail = 0; - srq->limit = srq_init_attr->attr.srq_limit; - - spin_lock(&dev->n_srqs_lock); - if (dev->n_srqs_allocated == hfi1_max_srqs) { - spin_unlock(&dev->n_srqs_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_srqs_allocated++; - spin_unlock(&dev->n_srqs_lock); - - if (srq->ip) { - spin_lock_irq(&dev->rdi.pending_lock); - list_add(&srq->ip->pending_mmaps, &dev->rdi.pending_mmaps); - spin_unlock_irq(&dev->rdi.pending_lock); - } - - ret = &srq->ibsrq; - goto done; - -bail_ip: - kfree(srq->ip); -bail_wq: - vfree(srq->rq.wq); -bail_srq: - kfree(srq); -done: - return ret; -} - -/** - * hfi1_modify_srq - modify a shared receive queue - * @ibsrq: the SRQ to modify - * @attr: the new attributes of the SRQ - * @attr_mask: indicates which attributes to modify - * @udata: user data for libibverbs.so - */ -int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask, - struct ib_udata *udata) -{ - struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); - struct rvt_rwq *wq; - int ret = 0; - - if (attr_mask & IB_SRQ_MAX_WR) { - struct rvt_rwq *owq; - struct rvt_rwqe *p; - u32 sz, size, n, head, tail; - - /* Check that the requested sizes are below the limits. */ - if ((attr->max_wr > hfi1_max_srq_wrs) || - ((attr_mask & IB_SRQ_LIMIT) ? - attr->srq_limit : srq->limit) > attr->max_wr) { - ret = -EINVAL; - goto bail; - } - - sz = sizeof(struct rvt_rwqe) + - srq->rq.max_sge * sizeof(struct ib_sge); - size = attr->max_wr + 1; - wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz); - if (!wq) { - ret = -ENOMEM; - goto bail; - } - - /* Check that we can write the offset to mmap. */ - if (udata && udata->inlen >= sizeof(__u64)) { - __u64 offset_addr; - __u64 offset = 0; - - ret = ib_copy_from_udata(&offset_addr, udata, - sizeof(offset_addr)); - if (ret) - goto bail_free; - udata->outbuf = - (void __user *) (unsigned long) offset_addr; - ret = ib_copy_to_udata(udata, &offset, - sizeof(offset)); - if (ret) - goto bail_free; - } - - spin_lock_irq(&srq->rq.lock); - /* - * validate head and tail pointer values and compute - * the number of remaining WQEs. - */ - owq = srq->rq.wq; - head = owq->head; - tail = owq->tail; - if (head >= srq->rq.size || tail >= srq->rq.size) { - ret = -EINVAL; - goto bail_unlock; - } - n = head; - if (n < tail) - n += srq->rq.size - tail; - else - n -= tail; - if (size <= n) { - ret = -EINVAL; - goto bail_unlock; - } - n = 0; - p = wq->wq; - while (tail != head) { - struct rvt_rwqe *wqe; - int i; - - wqe = rvt_get_rwqe_ptr(&srq->rq, tail); - p->wr_id = wqe->wr_id; - p->num_sge = wqe->num_sge; - for (i = 0; i < wqe->num_sge; i++) - p->sg_list[i] = wqe->sg_list[i]; - n++; - p = (struct rvt_rwqe *)((char *)p + sz); - if (++tail >= srq->rq.size) - tail = 0; - } - srq->rq.wq = wq; - srq->rq.size = size; - wq->head = n; - wq->tail = 0; - if (attr_mask & IB_SRQ_LIMIT) - srq->limit = attr->srq_limit; - spin_unlock_irq(&srq->rq.lock); - - vfree(owq); - - if (srq->ip) { - struct rvt_mmap_info *ip = srq->ip; - struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device); - u32 s = sizeof(struct rvt_rwq) + size * sz; - - rvt_update_mmap_info(&dev->rdi, ip, s, wq); - - /* - * Return the offset to mmap. - * See hfi1_mmap() for details. - */ - if (udata && udata->inlen >= sizeof(__u64)) { - ret = ib_copy_to_udata(udata, &ip->offset, - sizeof(ip->offset)); - if (ret) - goto bail; - } - - /* - * Put user mapping info onto the pending list - * unless it already is on the list. - */ - spin_lock_irq(&dev->rdi.pending_lock); - if (list_empty(&ip->pending_mmaps)) - list_add(&ip->pending_mmaps, - &dev->rdi.pending_mmaps); - spin_unlock_irq(&dev->rdi.pending_lock); - } - } else if (attr_mask & IB_SRQ_LIMIT) { - spin_lock_irq(&srq->rq.lock); - if (attr->srq_limit >= srq->rq.size) - ret = -EINVAL; - else - srq->limit = attr->srq_limit; - spin_unlock_irq(&srq->rq.lock); - } - goto bail; - -bail_unlock: - spin_unlock_irq(&srq->rq.lock); -bail_free: - vfree(wq); -bail: - return ret; -} - -int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) -{ - struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); - - attr->max_wr = srq->rq.size - 1; - attr->max_sge = srq->rq.max_sge; - attr->srq_limit = srq->limit; - return 0; -} - -/** - * hfi1_destroy_srq - destroy a shared receive queue - * @ibsrq: the SRQ to destroy - */ -int hfi1_destroy_srq(struct ib_srq *ibsrq) -{ - struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); - struct hfi1_ibdev *dev = to_idev(ibsrq->device); - - spin_lock(&dev->n_srqs_lock); - dev->n_srqs_allocated--; - spin_unlock(&dev->n_srqs_lock); - if (srq->ip) - kref_put(&srq->ip->ref, rvt_release_mmap_info); - else - vfree(srq->rq.wq); - kfree(srq); - - return 0; -} diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index f5cc0b996966..a53d93a5245c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1474,7 +1474,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->n_srqs_lock); init_timer(&dev->mem_timer); dev->mem_timer.function = mem_timer; dev->mem_timer.data = (unsigned long) dev; @@ -1513,11 +1512,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->query_port = query_port; ibdev->modify_port = modify_port; ibdev->query_gid = query_gid; - ibdev->create_srq = hfi1_create_srq; - ibdev->modify_srq = hfi1_modify_srq; - ibdev->query_srq = hfi1_query_srq; - ibdev->destroy_srq = hfi1_destroy_srq; - ibdev->post_srq_recv = hfi1_post_srq_receive; /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index c845514d3abf..79bcab61d2ba 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -272,8 +272,6 @@ struct hfi1_ibdev { u64 n_kmem_wait; u64 n_send_schedule; - u32 n_srqs_allocated; /* number of SRQs allocated for device */ - spinlock_t n_srqs_lock; #ifdef CONFIG_DEBUG_FS /* per HFI debugfs */ struct dentry *hfi1_ibdev_dbg; @@ -431,21 +429,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet); int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey); -int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr); - -struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_udata *udata); - -int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask, - struct ib_udata *udata); - -int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); - -int hfi1_destroy_srq(struct ib_srq *ibsrq); - int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only); void hfi1_migrate_qp(struct rvt_qp *qp); -- cgit v1.2.3-59-g8ed1b From a9c05e350c17db98d82e8784ed0c05a78bd0169f Mon Sep 17 00:00:00 2001 From: Bryan Morgan Date: Wed, 3 Feb 2016 14:30:49 -0800 Subject: staging/rdma/hfi1: HFI reports wrong offline disabled reason when cable removed Removing QSFP cable should report 'No Local Media' instead of 'Transient' as reported by 'opaportinfo'. Workaround is to change the state to OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED in cable handler. With cable still removed, 'opaportinfo bounce' should not cause a state change to Polling, as reported by 'opaportinfo'. Resolution is to prevent physical state change from Offline->Polling. Use a macro to mask lower nibble of OPA_LINKDOWN_REASON* as needed for offline_disabled_reason. Reviewed-by: Mike Marciniszyn Reviewed-by: Easwar Hariharan Reviewed-by: Dean Luick Reported-by: Todd Rimmer Signed-off-by: Bryan Morgan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 18 +++++++++++++++--- drivers/staging/rdma/hfi1/hfi.h | 2 ++ drivers/staging/rdma/hfi1/intr.c | 3 ++- drivers/staging/rdma/hfi1/mad.c | 24 +++++++++++++++--------- 4 files changed, 34 insertions(+), 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 93e152dd4228..16e2ff2b071d 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -5857,6 +5857,16 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_int_mgmt); + + if ((ppd->offline_disabled_reason > + HFI1_ODR_MASK( + OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) || + (ppd->offline_disabled_reason == + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) + ppd->offline_disabled_reason = + HFI1_ODR_MASK( + OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED); + if (ppd->host_link_state == HLS_DN_POLL) { /* * The link is still in POLL. This means @@ -9615,9 +9625,10 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) ret); return -EINVAL; } - if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE) + if (ppd->offline_disabled_reason == + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) ppd->offline_disabled_reason = - OPA_LINKDOWN_REASON_TRANSIENT; + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); } if (do_wait) { @@ -9972,7 +9983,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret = -EINVAL; } } - ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE; + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* * If an error occurred above, go back to offline. The * caller may reschedule another attempt. diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index e6a5fede0c02..57014b017a0d 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -99,6 +99,8 @@ extern unsigned long hfi1_cap_mask; #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap)) #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \ HFI1_CAP_MISC_MASK) +/* Offline Disabled Reason is 4-bits */ +#define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON) /* * Control context is always 0 and handles the error packets. diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 1283f2d9136c..9adab8638f21 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -152,7 +152,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) /* physical link went up */ ppd->linkup = 1; - ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE; + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); /* link widths are not available until the link is fully up */ get_linkup_link_widths(ppd); diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 9cadf77427a2..303dfeeed2bc 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -590,12 +590,11 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, pi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; pi->port_states.ledenable_offlinereason |= - ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON; + ppd->offline_disabled_reason; #else pi->port_states.offline_reason = ppd->neighbor_normal << 4; pi->port_states.offline_reason |= ppd->is_sm_config_started << 5; - pi->port_states.offline_reason |= ppd->offline_disabled_reason & - OPA_PI_MASK_OFFLINE_REASON; + pi->port_states.offline_reason |= ppd->offline_disabled_reason; #endif /* PI_LED_ENABLE_SUP */ pi->port_states.portphysstate_portstate = @@ -929,6 +928,14 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd, physical_allowed == HFI_TRANSITION_IGNORED) return HFI_TRANSITION_IGNORED; + /* + * A change request of Physical Port State from + * 'Offline' to 'Polling' should be ignored. + */ + if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) && + (physical_new == IB_PORTPHYSSTATE_POLLING)) + return HFI_TRANSITION_IGNORED; + /* * Either physical_allowed or logical_allowed is * HFI_TRANSITION_ALLOWED. @@ -993,11 +1000,11 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, set_link_state(ppd, link_state); if (link_state == HLS_DN_DISABLE && (ppd->offline_disabled_reason > - OPA_LINKDOWN_REASON_SMA_DISABLED || + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) || ppd->offline_disabled_reason == - OPA_LINKDOWN_REASON_NONE)) + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) ppd->offline_disabled_reason = - OPA_LINKDOWN_REASON_SMA_DISABLED; + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); /* * Don't send a reply if the response would be sent * through the disabled port. @@ -1710,12 +1717,11 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, psi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; psi->port_states.ledenable_offlinereason |= - ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON; + ppd->offline_disabled_reason; #else psi->port_states.offline_reason = ppd->neighbor_normal << 4; psi->port_states.offline_reason |= ppd->is_sm_config_started << 5; - psi->port_states.offline_reason |= ppd->offline_disabled_reason & - OPA_PI_MASK_OFFLINE_REASON; + psi->port_states.offline_reason |= ppd->offline_disabled_reason; #endif /* PI_LED_ENABLE_SUP */ psi->port_states.portphysstate_portstate = -- cgit v1.2.3-59-g8ed1b From 76ef8c0798d3377fd58a1ef083d65b4528682db4 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:30:57 -0800 Subject: staging/rdma/hfi1: cleanup messages on qsfp_read() failure The ":" in "%s:" adds no value. Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qsfp.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 6326a915d7fd..6e9c56fe27e7 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -330,48 +330,48 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) /* all */ ret = qsfp_read(ppd, target, 384, cache + 256, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 640, cache + 384, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else if ((cache[195] & 0x80) == 0x80) { /* only page 2 and 3 */ ret = qsfp_read(ppd, target, 640, cache + 384, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else if ((cache[195] & 0x40) == 0x40) { /* only page 1 and 3 */ ret = qsfp_read(ppd, target, 384, cache + 256, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } else { /* only page 3 */ ret = qsfp_read(ppd, target, 896, cache + 512, 128); if (ret <= 0 || ret != 128) { - dd_dev_info(ppd->dd, "%s: failed\n", __func__); + dd_dev_info(ppd->dd, "%s failed\n", __func__); goto bail; } } -- cgit v1.2.3-59-g8ed1b From c7cb7635d91d9126431159ee7f90b7137c908e89 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 3 Feb 2016 14:31:05 -0800 Subject: staging/rdma/hfi1: Fix QSFP memory read/write across 128 byte boundary The QSFP memory cache reads both lower and upper page 0H in one shot, which leads to the address counter wrapping around to the beginning of lower page 00H at byte 128, as defined by SFF-8636. This patch fixes this by modifying the underlying QSFP read and writes to avoid this wrap around. Reviewed-by: Dean Luick Reviewed-by: Ira Weiny Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qsfp.c | 44 ++++++++++++++++++++++++++-------------- drivers/staging/rdma/hfi1/qsfp.h | 28 +++++++++++++++++-------- 2 files changed, 49 insertions(+), 23 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 6e9c56fe27e7..0d2ec972ea9f 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -186,6 +186,10 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, return ret; } +/* + * Write page n, offset m of QSFP memory as defined by SFF 8636 + * in the cache by writing @addr = ((256 * n) + m) + */ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { @@ -217,15 +221,15 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, break; } - /* truncate write to end of page if crossing page boundary */ offset = addr % QSFP_PAGESIZE; nwrite = len - count; - if ((offset + nwrite) > QSFP_PAGESIZE) - nwrite = QSFP_PAGESIZE - offset; + /* truncate write to boundary if crossing boundary */ + if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY) + nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count, nwrite); - if (ret <= 0) /* stop on error or nothing read */ + if (ret <= 0) /* stop on error or nothing written */ break; count += ret; @@ -239,6 +243,10 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, return count; } +/* + * Access page n, offset m of QSFP memory as defined by SFF 8636 + * in the cache by reading @addr = ((256 * n) + m) + */ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) { @@ -269,11 +277,11 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, break; } - /* truncate read to end of page if crossing page boundary */ offset = addr % QSFP_PAGESIZE; nread = len - count; - if ((offset + nread) > QSFP_PAGESIZE) - nread = QSFP_PAGESIZE - offset; + /* truncate read to boundary if crossing boundary */ + if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY) + nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count, nread); @@ -295,6 +303,11 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, * This function caches the QSFP memory range in 128 byte chunks. * As an example, the next byte after address 255 is byte 128 from * upper page 01H (if existing) rather than byte 0 from lower page 00H. + * Access page n, offset m of QSFP memory as defined by SFF 8636 + * in the cache by reading byte ((128 * n) + m) + * The calls to qsfp_{read,write} in this function correctly handle the + * address map difference between this mapping and the mapping implemented + * by those functions */ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) { @@ -305,23 +318,24 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) /* ensure sane contents on invalid reads, for cable swaps */ memset(cache, 0, (QSFP_MAX_NUM_PAGES*128)); - dd_dev_info(ppd->dd, "%s: called\n", __func__); + spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); + ppd->qsfp_info.cache_valid = 0; + spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); + + dd_dev_info(ppd->dd, "%s called\n", __func__); if (!qsfp_mod_present(ppd)) { ret = -ENODEV; goto bail; } - ret = qsfp_read(ppd, target, 0, cache, 256); - if (ret != 256) { + ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE); + if (ret != QSFP_PAGESIZE) { dd_dev_info(ppd->dd, - "%s: Read of pages 00H failed, expected 256, got %d\n", - __func__, ret); + "%s: Page 0 read failed, expected %d, got %d\n", + __func__, QSFP_PAGESIZE, ret); goto bail; } - if (cache[0] != 0x0C && cache[0] != 0x0D) - goto bail; - /* Is paging enabled? */ if (!(cache[2] & 4)) { diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index 16aebdc7f679..34222501cc33 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -67,15 +67,16 @@ /* QSFP is paged at 256 bytes */ #define QSFP_PAGESIZE 256 +/* Reads/writes cannot cross 128 byte boundaries */ +#define QSFP_RW_BOUNDARY 128 /* Defined fields that Intel requires of qualified cables */ /* Byte 0 is Identifier, not checked */ /* Byte 1 is reserved "status MSB" */ -/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */ -/* - * Rest of first 128 not used, although 127 is reserved for page select - * if module is not "Flat memory". - */ +#define QSFP_TX_CTRL_BYTE_OFFS 86 +#define QSFP_PWR_CTRL_BYTE_OFFS 93 +#define QSFP_CDR_CTRL_BYTE_OFFS 98 + #define QSFP_PAGE_SELECT_BYTE_OFFS 127 /* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */ #define QSFP_MOD_ID_OFFS 128 @@ -87,7 +88,8 @@ /* Byte 130 is Connector type. Not Intel req'd */ /* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */ /* Byte 139 is encoding. code 0x01 is 8b10b. Not Intel req'd */ -/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not Intel req'd */ +/* byte 140 is nominal bit-rate, in units of 100Mbits/sec */ +#define QSFP_NOM_BIT_RATE_100_OFFS 140 /* Byte 141 is Extended Rate Select. Not Intel req'd */ /* Bytes 142..145 are lengths for various fiber types. Not Intel req'd */ /* Byte 146 is length for Copper. Units of 1 meter */ @@ -135,11 +137,18 @@ extern const char *const hfi1_qsfp_devtech[16]; */ #define QSFP_ATTEN_OFFS 186 #define QSFP_ATTEN_LEN 2 -/* Bytes 188,189 are Wavelength tolerance, not Intel req'd */ +/* + * Bytes 188,189 are Wavelength tolerance, if optical + * If copper, they are attenuation in dB: + * Byte 188 is at 12.5 Gb/s, Byte 189 at 25 Gb/s + */ +#define QSFP_CU_ATTEN_7G_OFFS 188 +#define QSFP_CU_ATTEN_12G_OFFS 189 /* Byte 190 is Max Case Temp. Not Intel req'd */ /* Byte 191 is LSB of sum of bytes 128..190. Not Intel req'd */ #define QSFP_CC_OFFS 191 -/* Bytes 192..195 are Options implemented in qsfp. Not Intel req'd */ +#define QSFP_EQ_INFO_OFFS 193 +#define QSFP_CDR_INFO_OFFS 194 /* Bytes 196..211 are Serial Number, String */ #define QSFP_SN_OFFS 196 #define QSFP_SN_LEN 16 @@ -150,6 +159,8 @@ extern const char *const hfi1_qsfp_devtech[16]; #define QSFP_LOT_OFFS 218 #define QSFP_LOT_LEN 2 /* Bytes 220, 221 indicate monitoring options, Not Intel req'd */ +/* Byte 222 indicates nominal bitrate in units of 250Mbits/sec */ +#define QSFP_NOM_BIT_RATE_250_OFFS 222 /* Byte 223 is LSB of sum of bytes 192..222 */ #define QSFP_CC_EXT_OFFS 223 @@ -191,6 +202,7 @@ extern const char *const hfi1_qsfp_devtech[16]; */ #define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3) +#define QSFP_HIGH_PWR(pbyte) (((pbyte) & 3) | 4) #define QSFP_ATTEN_SDR(attenarray) (attenarray[0]) #define QSFP_ATTEN_DDR(attenarray) (attenarray[1]) -- cgit v1.2.3-59-g8ed1b From 8ebd4cf1852afb56773ce8818da22157bfffa900 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:31:14 -0800 Subject: staging/rdma/hfi1: Add active and optical cable support This patch qualifies and tunes active and optical cables for optimal bit error rate and signal integrity settings. These settings are fetched from the platform configuration data. Based on attributes of the QSFP cable as read from the SFF-8636 compliant memory map, we select the appropriate settings from the platform configuration data (examples: TX/RX equalization, enabling cable high power, enabling TX/RX clock data recovery mechanisms, and RX amplitude control) and apply them to the SERDES and QSFP cable. The platform configuration data also contains system parameters such as maximum power dissipation supported, and the cables are qualified based on these parameters. As part of qualifying the cables, the correct OfflineDisabledReasons are set for the appropriate scenarios. Reviewed-by: Dean Luick Reviewed-by: Mike Marciniszyn Reviewed-by: Brent R Rothermel Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/chip.c | 296 +++++----- drivers/staging/rdma/hfi1/chip.h | 8 +- drivers/staging/rdma/hfi1/hfi.h | 5 +- drivers/staging/rdma/hfi1/init.c | 10 +- drivers/staging/rdma/hfi1/platform.c | 838 ++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/platform.h | 298 ++++++++++ drivers/staging/rdma/hfi1/platform_config.h | 286 ---------- drivers/staging/rdma/hfi1/qsfp.h | 3 +- 9 files changed, 1302 insertions(+), 444 deletions(-) create mode 100644 drivers/staging/rdma/hfi1/platform.c create mode 100644 drivers/staging/rdma/hfi1/platform.h delete mode 100644 drivers/staging/rdma/hfi1/platform_config.h (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index ca2dea54e37c..9b3f7e9f0796 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := chip.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ - init.o intr.o mad.o pcie.o pio.o pio_copy.o \ + init.o intr.o mad.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 16e2ff2b071d..4d70a960ff54 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -64,6 +64,7 @@ #include "sdma.h" #include "eprom.h" #include "efivar.h" +#include "platform.h" #define NUM_IB_PORTS 1 @@ -5826,7 +5827,7 @@ static void is_various_int(struct hfi1_devdata *dd, unsigned int source) static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) { - /* source is always zero */ + /* src_ctx is always zero */ struct hfi1_pportdata *ppd = dd->pport; unsigned long flags; u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); @@ -5849,14 +5850,13 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) * an interrupt when a cable is inserted */ ppd->qsfp_info.cache_valid = 0; - ppd->qsfp_info.qsfp_interrupt_functional = 0; + ppd->qsfp_info.reset_needed = 0; + ppd->qsfp_info.limiting_active = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); - write_csr(dd, - dd->hfi1_id ? - ASIC_QSFP2_INVERT : - ASIC_QSFP1_INVERT, - qsfp_int_mgmt); + /* Invert the ModPresent pin now to detect plug-in */ + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : + ASIC_QSFP1_INVERT, qsfp_int_mgmt); if ((ppd->offline_disabled_reason > HFI1_ODR_MASK( @@ -5883,12 +5883,16 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); + /* + * Stop inversion of ModPresent pin to detect + * removal of the cable + */ qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; - write_csr(dd, - dd->hfi1_id ? - ASIC_QSFP2_INVERT : - ASIC_QSFP1_INVERT, - qsfp_int_mgmt); + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : + ASIC_QSFP1_INVERT, qsfp_int_mgmt); + + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); } } @@ -5898,7 +5902,6 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 1; - ppd->qsfp_info.qsfp_interrupt_functional = 1; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); } @@ -6666,6 +6669,7 @@ void handle_link_up(struct work_struct *work) set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, OPA_LINKDOWN_REASON_SPEED_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); + tune_serdes(ppd); start_link(ppd); } } @@ -6691,7 +6695,13 @@ void handle_link_down(struct work_struct *work) struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_down_work); - /* go offline first, then deal with reasons */ + if ((ppd->host_link_state & + (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && + ppd->port_type == PORT_TYPE_FIXED) + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); + + /* Go offline first, then deal with reading/writing through 8051 */ set_link_state(ppd, HLS_DN_OFFLINE); lcl_reason = 0; @@ -6713,10 +6723,12 @@ void handle_link_down(struct work_struct *work) /* If there is no cable attached, turn the DC off. Otherwise, * start the link bring up. */ - if (!qsfp_mod_present(ppd)) + if (!qsfp_mod_present(ppd)) { dc_shutdown(ppd->dd); - else + } else { + tune_serdes(ppd); start_link(ppd); + } } void handle_link_bounce(struct work_struct *work) @@ -6729,6 +6741,7 @@ void handle_link_bounce(struct work_struct *work) */ if (ppd->host_link_state & HLS_UP) { set_link_state(ppd, HLS_DN_OFFLINE); + tune_serdes(ppd); start_link(ppd); } else { dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", @@ -7237,6 +7250,7 @@ done: set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, OPA_LINKDOWN_REASON_WIDTH_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); + tune_serdes(ppd); start_link(ppd); } } @@ -8235,8 +8249,8 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); } -static int load_8051_config(struct hfi1_devdata *dd, u8 field_id, - u8 lane_id, u32 config_data) +int load_8051_config(struct hfi1_devdata *dd, u8 field_id, + u8 lane_id, u32 config_data) { u64 data; int ret; @@ -8258,8 +8272,8 @@ static int load_8051_config(struct hfi1_devdata *dd, u8 field_id, * set the result, even on error. * Return 0 on success, -errno on failure */ -static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, - u32 *result) +int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, + u32 *result) { u64 big_data; u32 addr; @@ -8881,32 +8895,80 @@ int start_link(struct hfi1_pportdata *ppd) return -EAGAIN; } -static void reset_qsfp(struct hfi1_pportdata *ppd) +static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) +{ + struct hfi1_devdata *dd = ppd->dd; + u64 mask; + unsigned long timeout; + + /* + * Check for QSFP interrupt for t_init (SFF 8679) + */ + timeout = jiffies + msecs_to_jiffies(2000); + while (1) { + mask = read_csr(dd, dd->hfi1_id ? + ASIC_QSFP2_IN : ASIC_QSFP1_IN); + if (!(mask & QSFP_HFI0_INT_N)) { + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : + ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N); + break; + } + if (time_after(jiffies, timeout)) { + dd_dev_info(dd, "%s: No IntN detected, reset complete\n", + __func__); + break; + } + udelay(2); + } +} + +static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) +{ + struct hfi1_devdata *dd = ppd->dd; + u64 mask; + + mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); + if (enable) + mask |= (u64)QSFP_HFI0_INT_N; + else + mask &= ~(u64)QSFP_HFI0_INT_N; + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); +} + +void reset_qsfp(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; u64 mask, qsfp_mask; + /* Disable INT_N from triggering QSFP interrupts */ + set_qsfp_int_n(ppd, 0); + + /* Reset the QSFP */ mask = (u64)QSFP_HFI0_RESET_N; - qsfp_mask = read_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE); + qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE); qsfp_mask |= mask; write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, - qsfp_mask); + dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask); - qsfp_mask = read_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); + qsfp_mask = read_csr(dd, dd->hfi1_id ? + ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); qsfp_mask &= ~mask; write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, - qsfp_mask); + dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); udelay(10); qsfp_mask |= mask; write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, - qsfp_mask); + dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); + + wait_for_qsfp_init(ppd); + + /* + * Allow INT_N to trigger the QSFP interrupt to watch + * for alarms and warnings + */ + set_qsfp_int_n(ppd, 1); } static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, @@ -9018,35 +9080,8 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, return 0; } -static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd) -{ - refresh_qsfp_cache(ppd, &ppd->qsfp_info); - - return 0; -} - -static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd) -{ - struct hfi1_devdata *dd = ppd->dd; - u8 qsfp_interrupt_status = 0; - - if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1) - != 1) { - dd_dev_info(dd, - "%s: Failed to read status of QSFP module\n", - __func__); - return -EIO; - } - - /* We don't care about alarms & warnings with a non-functional INT_N */ - if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY)) - do_pre_lni_host_behaviors(ppd); - - return 0; -} - /* This routine will only be scheduled if the QSFP module is present */ -static void qsfp_event(struct work_struct *work) +void qsfp_event(struct work_struct *work) { struct qsfp_data *qd; struct hfi1_pportdata *ppd; @@ -9068,20 +9103,20 @@ static void qsfp_event(struct work_struct *work) dc_start(dd); if (qd->cache_refresh_required) { - msleep(3000); - reset_qsfp(ppd); - /* Check for QSFP interrupt after t_init (SFF 8679) - * + extra + set_qsfp_int_n(ppd, 0); + + wait_for_qsfp_init(ppd); + + /* + * Allow INT_N to trigger the QSFP interrupt to watch + * for alarms and warnings */ - msleep(3000); - if (!qd->qsfp_interrupt_functional) { - if (do_qsfp_intr_fallback(ppd) < 0) - dd_dev_info(dd, "%s: QSFP fallback failed\n", - __func__); - ppd->driver_link_ready = 1; - start_link(ppd); - } + set_qsfp_int_n(ppd, 1); + + tune_serdes(ppd); + + start_link(ppd); } if (qd->check_interrupt_flags) { @@ -9094,50 +9129,50 @@ static void qsfp_event(struct work_struct *work) __func__); } else { unsigned long flags; - u8 data_status; + handle_qsfp_error_conditions( + ppd, qsfp_interrupt_status); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); - - if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1) - != 1) { - dd_dev_info(dd, - "%s: Failed to read status of QSFP module\n", - __func__); - } - if (!(data_status & QSFP_DATA_NOT_READY)) { - do_pre_lni_host_behaviors(ppd); - start_link(ppd); - } else - handle_qsfp_error_conditions(ppd, - qsfp_interrupt_status); } } } -void init_qsfp(struct hfi1_pportdata *ppd) +static void init_qsfp_int(struct hfi1_devdata *dd) { - struct hfi1_devdata *dd = ppd->dd; - u64 qsfp_mask; + struct hfi1_pportdata *ppd = dd->pport; + u64 qsfp_mask, cce_int_mask; + const int qsfp1_int_smask = QSFP1_INT % 64; + const int qsfp2_int_smask = QSFP2_INT % 64; - if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || - ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { - ppd->driver_link_ready = 1; - return; + /* + * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0 + * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR, + * therefore just one of QSFP1_INT/QSFP2_INT can be used to find + * the index of the appropriate CSR in the CCEIntMask CSR array + */ + cce_int_mask = read_csr(dd, CCE_INT_MASK + + (8 * (QSFP1_INT / 64))); + if (dd->hfi1_id) { + cce_int_mask &= ~((u64)1 << qsfp1_int_smask); + write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)), + cce_int_mask); + } else { + cce_int_mask &= ~((u64)1 << qsfp2_int_smask); + write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)), + cce_int_mask); } - ppd->qsfp_info.ppd = ppd; - INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); - qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); /* Clear current status to avoid spurious interrupts */ - write_csr(dd, - dd->hfi1_id ? - ASIC_QSFP2_CLEAR : - ASIC_QSFP1_CLEAR, - qsfp_mask); + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, + qsfp_mask); + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, + qsfp_mask); + + set_qsfp_int_n(ppd, 0); /* Handle active low nature of INT_N and MODPRST_N pins */ if (qsfp_mod_present(ppd)) @@ -9145,29 +9180,6 @@ void init_qsfp(struct hfi1_pportdata *ppd) write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_mask); - - /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */ - qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N; - write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, - qsfp_mask); - - if (qsfp_mod_present(ppd)) { - msleep(3000); - reset_qsfp(ppd); - - /* Check for QSFP interrupt after t_init (SFF 8679) - * + extra - */ - msleep(3000); - if (!ppd->qsfp_info.qsfp_interrupt_functional) { - if (do_qsfp_intr_fallback(ppd) < 0) - dd_dev_info(dd, - "%s: QSFP fallback failed\n", - __func__); - ppd->driver_link_ready = 1; - } - } } /* @@ -9203,8 +9215,6 @@ int bringup_serdes(struct hfi1_pportdata *ppd) ppd->guid = guid; } - /* the link defaults to enabled */ - ppd->link_enabled = 1; /* Set linkinit_reason on power up per OPA spec */ ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; @@ -9217,6 +9227,12 @@ int bringup_serdes(struct hfi1_pportdata *ppd) return ret; } + /* tune the SERDES to a ballpark setting for + * optimal signal and bit error rate + * Needs to be done before starting the link + */ + tune_serdes(ppd); + return start_link(ppd); } @@ -9234,6 +9250,8 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) ppd->driver_link_ready = 0; ppd->link_enabled = 0; + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, OPA_LINKDOWN_REASON_SMA_DISABLED); set_link_state(ppd, HLS_DN_OFFLINE); @@ -9649,6 +9667,12 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ + if (ppd->port_type == PORT_TYPE_QSFP && + ppd->qsfp_info.limiting_active && + qsfp_mod_present(ppd)) { + set_qsfp_tx(ppd, 0); + } + /* * The LNI has a mandatory wait time after the physical state * moves to Offline.Quiet. The wait time may be different @@ -12078,31 +12102,11 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable) * In HFI, the mask needs to be 1 to allow interrupts. */ if (enable) { - u64 cce_int_mask; - const int qsfp1_int_smask = QSFP1_INT % 64; - const int qsfp2_int_smask = QSFP2_INT % 64; - /* enable all interrupts */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0); - /* - * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0 - * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR, - * therefore just one of QSFP1_INT/QSFP2_INT can be used to find - * the index of the appropriate CSR in the CCEIntMask CSR array - */ - cce_int_mask = read_csr(dd, CCE_INT_MASK + - (8*(QSFP1_INT/64))); - if (dd->hfi1_id) { - cce_int_mask &= ~((u64)1 << qsfp1_int_smask); - write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)), - cce_int_mask); - } else { - cce_int_mask &= ~((u64)1 << qsfp2_int_smask); - write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)), - cce_int_mask); - } + init_qsfp_int(dd); } else { for (i = 0; i < CCE_NUM_INT_CSRS; i++) write_csr(dd, CCE_INT_MASK + (8*i), 0ull); diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 78ba42567f2b..5e4fe4363e25 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -371,6 +371,9 @@ #define NUM_LANE_FIELDS 0x8 /* 8051 general register Field IDs */ +#define LINK_OPTIMIZATION_SETTINGS 0x00 +#define LINK_TUNING_PARAMETERS 0x02 +#define DC_HOST_COMM_SETTINGS 0x03 #define TX_SETTINGS 0x06 #define VERIFY_CAP_LOCAL_PHY 0x07 #define VERIFY_CAP_LOCAL_FABRIC 0x08 @@ -647,10 +650,13 @@ void handle_link_down(struct work_struct *work); void handle_link_downgrade(struct work_struct *work); void handle_link_bounce(struct work_struct *work); void handle_sma_message(struct work_struct *work); +void reset_qsfp(struct hfi1_pportdata *ppd); +void qsfp_event(struct work_struct *work); void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); int send_idle_sma(struct hfi1_devdata *dd, u64 message); +int load_8051_config(struct hfi1_devdata *, u8, u8, u32); +int read_8051_config(struct hfi1_devdata *, u8, u8, u32 *); int start_link(struct hfi1_pportdata *ppd); -void init_qsfp(struct hfi1_pportdata *ppd); int bringup_serdes(struct hfi1_pportdata *ppd); void set_intr_state(struct hfi1_devdata *dd, u32 enable); void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 57014b017a0d..f79b07002d1f 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -74,7 +74,7 @@ #include "chip.h" #include "mad.h" #include "qsfp.h" -#include "platform_config.h" +#include "platform.h" /* bumped 1 from s/w major version of TrueScale */ #define HFI1_CHIP_VERS_MAJ 3U @@ -563,7 +563,8 @@ struct hfi1_pportdata { struct kobject sl2sc_kobj; struct kobject vl2mtu_kobj; - /* QSFP support */ + /* PHY support */ + u32 port_type; struct qsfp_data qsfp_info; /* GUID for this interface, in host order */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 7def3f33ac87..354935f20437 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -500,10 +500,13 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, INIT_WORK(&ppd->sma_message_work, handle_sma_message); INIT_WORK(&ppd->link_bounce_work, handle_link_bounce); INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work); + INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); + mutex_init(&ppd->hls_lock); spin_lock_init(&ppd->sdma_alllock); spin_lock_init(&ppd->qsfp_info.qsfp_lock); + ppd->qsfp_info.ppd = ppd; ppd->sm_trap_qp = 0x0; ppd->sa_qp = 0x1; @@ -781,13 +784,6 @@ done: for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; - /* initialize the qsfp if it exists - * Requires interrupts to be enabled so we are notified - * when the QSFP completes reset, and has - * to be done before bringing up the SERDES - */ - init_qsfp(ppd); - /* start the serdes - must be after interrupts are enabled so we are notified when the link goes up */ lastfail = bringup_serdes(ppd); diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c new file mode 100644 index 000000000000..9aa5e06633de --- /dev/null +++ b/drivers/staging/rdma/hfi1/platform.c @@ -0,0 +1,838 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include "hfi.h" + +int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) +{ + u8 tx_ctrl_byte = on ? 0x0 : 0xF; + int ret = 0; + + ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS, + &tx_ctrl_byte, 1); + /* we expected 1, so consider 0 an error */ + if (ret == 0) + ret = -EIO; + else if (ret == 1) + ret = 0; + return ret; +} + +static int qual_power(struct hfi1_pportdata *ppd) +{ + u32 cable_power_class = 0, power_class_max = 0; + u8 *cache = ppd->qsfp_info.cache; + int ret = 0; + + ret = get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0, + SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4); + if (ret) + return ret; + + if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4) + cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]); + else + cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]); + + if (cable_power_class <= 3 && cable_power_class > (power_class_max - 1)) + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY); + else if (cable_power_class > 4 && cable_power_class > (power_class_max)) + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY); + /* + * cable_power_class will never have value 4 as this simply + * means the high power settings are unused + */ + + if (ppd->offline_disabled_reason == + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) { + dd_dev_info( + ppd->dd, + "%s: Port disabled due to system power restrictions\n", + __func__); + ret = -EPERM; + } + return ret; +} + +static int qual_bitrate(struct hfi1_pportdata *ppd) +{ + u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; + u8 *cache = ppd->qsfp_info.cache; + + if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) && + cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64) + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY); + + if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) && + cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D) + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY); + + if (ppd->offline_disabled_reason == + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) { + dd_dev_info( + ppd->dd, + "%s: Cable failed bitrate check, disabling port\n", + __func__); + return -EPERM; + } + return 0; +} + +static int set_qsfp_high_power(struct hfi1_pportdata *ppd) +{ + u8 cable_power_class = 0, power_ctrl_byte = 0; + u8 *cache = ppd->qsfp_info.cache; + int ret; + + if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4) + cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]); + else + cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]); + + if (cable_power_class) { + power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS]; + + power_ctrl_byte |= 1; + power_ctrl_byte &= ~(0x2); + + ret = qsfp_write(ppd, ppd->dd->hfi1_id, + QSFP_PWR_CTRL_BYTE_OFFS, + &power_ctrl_byte, 1); + if (ret != 1) + return -EIO; + + if (cable_power_class > 3) { + /* > power class 4*/ + power_ctrl_byte |= (1 << 2); + ret = qsfp_write(ppd, ppd->dd->hfi1_id, + QSFP_PWR_CTRL_BYTE_OFFS, + &power_ctrl_byte, 1); + if (ret != 1) + return -EIO; + } + + /* SFF 8679 rev 1.7 LPMode Deassert time */ + msleep(300); + } + return 0; +} + +static void apply_rx_cdr(struct hfi1_pportdata *ppd, + u32 rx_preset_index, + u8 *cdr_ctrl_byte) +{ + u32 rx_preset; + u8 *cache = ppd->qsfp_info.cache; + + if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) && + (cache[QSFP_CDR_INFO_OFFS] & 0x40))) + return; + + /* rx_preset preset to zero to catch error */ + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY, + &rx_preset, 4); + + if (!rx_preset) { + dd_dev_info( + ppd->dd, + "%s: RX_CDR_APPLY is set to disabled\n", + __func__); + return; + } + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR, + &rx_preset, 4); + + /* Expand cdr setting to all 4 lanes */ + rx_preset = (rx_preset | (rx_preset << 1) | + (rx_preset << 2) | (rx_preset << 3)); + + if (rx_preset) { + *cdr_ctrl_byte |= rx_preset; + } else { + *cdr_ctrl_byte &= rx_preset; + /* Preserve current TX CDR status */ + *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0); + } +} + +static void apply_tx_cdr(struct hfi1_pportdata *ppd, + u32 tx_preset_index, + u8 *ctr_ctrl_byte) +{ + u32 tx_preset; + u8 *cache = ppd->qsfp_info.cache; + + if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) && + (cache[QSFP_CDR_INFO_OFFS] & 0x80))) + return; + + get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, + TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4); + + if (!tx_preset) { + dd_dev_info( + ppd->dd, + "%s: TX_CDR_APPLY is set to disabled\n", + __func__); + return; + } + get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_TX_PRESET_TABLE, + tx_preset_index, + TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4); + + /* Expand cdr setting to all 4 lanes */ + tx_preset = (tx_preset | (tx_preset << 1) | + (tx_preset << 2) | (tx_preset << 3)); + + if (tx_preset) + *ctr_ctrl_byte |= (tx_preset << 4); + else + /* Preserve current/determined RX CDR status */ + *ctr_ctrl_byte &= ((tx_preset << 4) | 0xF); +} + +static void apply_cdr_settings( + struct hfi1_pportdata *ppd, u32 rx_preset_index, + u32 tx_preset_index) +{ + u8 *cache = ppd->qsfp_info.cache; + u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS]; + + apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte); + + apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte); + + qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS, + &cdr_ctrl_byte, 1); +} + +static void apply_tx_eq_auto(struct hfi1_pportdata *ppd) +{ + u8 *cache = ppd->qsfp_info.cache; + u8 tx_eq; + + if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8)) + return; + /* Disable adaptive TX EQ if present */ + tx_eq = cache[(128 * 3) + 241]; + tx_eq &= 0xF0; + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1); +} + +static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index) +{ + u8 *cache = ppd->qsfp_info.cache; + u32 tx_preset; + u8 tx_eq; + + if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4)) + return; + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, + tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY, + &tx_preset, 4); + if (!tx_preset) { + dd_dev_info( + ppd->dd, + "%s: TX_EQ_APPLY is set to disabled\n", + __func__); + return; + } + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, + tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ, + &tx_preset, 4); + + if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) { + dd_dev_info( + ppd->dd, + "%s: TX EQ %x unsupported\n", + __func__, tx_preset); + + dd_dev_info( + ppd->dd, + "%s: Applying EQ %x\n", + __func__, cache[608] & 0xF0); + + tx_preset = (cache[608] & 0xF0) >> 4; + } + + tx_eq = tx_preset | (tx_preset << 4); + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1); + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1); +} + +static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index) +{ + u32 rx_preset; + u8 rx_eq, *cache = ppd->qsfp_info.cache; + + if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2)) + return; + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY, + &rx_preset, 4); + + if (!rx_preset) { + dd_dev_info( + ppd->dd, + "%s: RX_EMP_APPLY is set to disabled\n", + __func__); + return; + } + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP, + &rx_preset, 4); + + if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) { + dd_dev_info( + ppd->dd, + "%s: Requested RX EMP %x\n", + __func__, rx_preset); + + dd_dev_info( + ppd->dd, + "%s: Applying supported EMP %x\n", + __func__, cache[608] & 0xF); + + rx_preset = cache[608] & 0xF; + } + + rx_eq = rx_preset | (rx_preset << 4); + + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1); + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1); +} + +static void apply_eq_settings(struct hfi1_pportdata *ppd, + u32 rx_preset_index, u32 tx_preset_index) +{ + u8 *cache = ppd->qsfp_info.cache; + + /* no point going on w/o a page 3 */ + if (cache[2] & 4) { + dd_dev_info(ppd->dd, + "%s: Upper page 03 not present\n", + __func__); + return; + } + + apply_tx_eq_auto(ppd); + + apply_tx_eq_prog(ppd, tx_preset_index); + + apply_rx_eq_emp(ppd, rx_preset_index); +} + +static void apply_rx_amplitude_settings( + struct hfi1_pportdata *ppd, u32 rx_preset_index, + u32 tx_preset_index) +{ + u32 rx_preset; + u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache; + + /* no point going on w/o a page 3 */ + if (cache[2] & 4) { + dd_dev_info(ppd->dd, + "%s: Upper page 03 not present\n", + __func__); + return; + } + if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) { + dd_dev_info(ppd->dd, + "%s: RX_AMP_APPLY is set to disabled\n", + __func__); + return; + } + + get_platform_config_field(ppd->dd, + PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, + RX_PRESET_TABLE_QSFP_RX_AMP_APPLY, + &rx_preset, 4); + + if (!rx_preset) { + dd_dev_info(ppd->dd, + "%s: RX_AMP_APPLY is set to disabled\n", + __func__); + return; + } + get_platform_config_field(ppd->dd, + PLATFORM_CONFIG_RX_PRESET_TABLE, + rx_preset_index, + RX_PRESET_TABLE_QSFP_RX_AMP, + &rx_preset, 4); + + dd_dev_info(ppd->dd, + "%s: Requested RX AMP %x\n", + __func__, + rx_preset); + + for (i = 0; i < 4; i++) { + if (cache[(128 * 3) + 225] & (1 << i)) { + preferred = i; + if (preferred == rx_preset) + break; + } + } + + /* + * Verify that preferred RX amplitude is not just a + * fall through of the default + */ + if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) { + dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n"); + return; + } + + dd_dev_info(ppd->dd, + "%s: Applying RX AMP %x\n", __func__, preferred); + + rx_amp = preferred | (preferred << 4); + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1); + qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1); +} + +#define OPA_INVALID_INDEX 0xFFF + +static void apply_tx_lanes(struct hfi1_pportdata *ppd, u32 config_data, + const char *message) +{ + u8 i; + int ret = HCMD_SUCCESS; + + for (i = 0; i < 4; i++) { + ret = load_8051_config(ppd->dd, 0, i, config_data); + if (ret != HCMD_SUCCESS) { + dd_dev_err( + ppd->dd, + "%s: %s for lane %u failed\n", + message, __func__, i); + } + } +} + +static void apply_tunings( + struct hfi1_pportdata *ppd, u32 tx_preset_index, + u8 tuning_method, u32 total_atten, u8 limiting_active) +{ + int ret = 0; + u32 config_data = 0, tx_preset = 0; + u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0; + u8 *cache = ppd->qsfp_info.cache; + + read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, + GENERAL_CONFIG, &config_data); + config_data |= limiting_active; + ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, + GENERAL_CONFIG, config_data); + if (ret != HCMD_SUCCESS) + dd_dev_err( + ppd->dd, + "%s: Failed to set enable external device config\n", + __func__); + + config_data = 0; /* re-init */ + read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, + &config_data); + config_data |= tuning_method; + ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, + config_data); + if (ret != HCMD_SUCCESS) + dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n", + __func__); + + external_device_config = + ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) | + ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) | + ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) | + (cache[QSFP_EQ_INFO_OFFS] & 0x4); + + config_data = 0; /* re-init */ + read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, + &config_data); + config_data |= (external_device_config << 24); + ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, + config_data); + if (ret != HCMD_SUCCESS) + dd_dev_err( + ppd->dd, + "%s: Failed to set external device config parameters\n", + __func__); + + config_data = 0; /* re-init */ + read_8051_config(ppd->dd, TX_SETTINGS, GENERAL_CONFIG, &config_data); + if ((ppd->link_speed_supported & OPA_LINK_SPEED_25G) && + (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)) + config_data |= 0x02; + if ((ppd->link_speed_supported & OPA_LINK_SPEED_12_5G) && + (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)) + config_data |= 0x01; + ret = load_8051_config(ppd->dd, TX_SETTINGS, GENERAL_CONFIG, + config_data); + if (ret != HCMD_SUCCESS) + dd_dev_err( + ppd->dd, + "%s: Failed to set external device config parameters\n", + __func__); + + config_data = (total_atten << 8) | (total_atten); + + apply_tx_lanes(ppd, config_data, "Setting channel loss"); + + if (tx_preset_index == OPA_INVALID_INDEX) + return; + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, + TX_PRESET_TABLE_PRECUR, &tx_preset, 4); + precur = tx_preset; + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, + tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4); + attn = tx_preset; + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, + tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4); + postcur = tx_preset; + + config_data = precur | (attn << 8) | (postcur << 16); + + apply_tx_lanes(ppd, config_data, "Applying TX settings"); +} + +static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, + u32 *ptr_rx_preset, u32 *ptr_total_atten) +{ + int ret = 0; + u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; + u8 *cache = ppd->qsfp_info.cache; + + ppd->qsfp_info.limiting_active = 1; + + ret = set_qsfp_tx(ppd, 0); + if (ret) + return ret; + + ret = qual_power(ppd); + if (ret) + return ret; + + ret = qual_bitrate(ppd); + if (ret) + return ret; + + if (ppd->qsfp_info.reset_needed) { + reset_qsfp(ppd); + ppd->qsfp_info.reset_needed = 0; + refresh_qsfp_cache(ppd, &ppd->qsfp_info); + } else { + ppd->qsfp_info.reset_needed = 1; + } + + ret = set_qsfp_high_power(ppd); + if (ret) + return ret; + + if (cache[QSFP_EQ_INFO_OFFS] & 0x4) { + ret = get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ, + ptr_tx_preset, 4); + if (ret) { + *ptr_tx_preset = OPA_INVALID_INDEX; + return ret; + } + } else { + ret = get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ, + ptr_tx_preset, 4); + if (ret) { + *ptr_tx_preset = OPA_INVALID_INDEX; + return ret; + } + } + + ret = get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4); + if (ret) { + *ptr_rx_preset = OPA_INVALID_INDEX; + return ret; + } + + if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G)) + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4); + else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G)) + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4); + + apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); + + apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); + + apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); + + ret = set_qsfp_tx(ppd, 1); + return ret; +} + +static int tune_qsfp(struct hfi1_pportdata *ppd, + u32 *ptr_tx_preset, u32 *ptr_rx_preset, + u8 *ptr_tuning_method, u32 *ptr_total_atten) +{ + u32 cable_atten = 0, remote_atten = 0, platform_atten = 0; + u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; + int ret = 0; + u8 *cache = ppd->qsfp_info.cache; + + switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) { + case 0xA ... 0xB: + ret = get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_LOCAL_ATTEN_25G, + &platform_atten, 4); + if (ret) + return ret; + + if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G)) + cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS]; + else if ((lss & OPA_LINK_SPEED_12_5G) && + (lse & OPA_LINK_SPEED_12_5G)) + cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS]; + + /* Fallback to configured attenuation if cable memory is bad */ + if (cable_atten == 0 || cable_atten > 36) { + ret = get_platform_config_field( + ppd->dd, + PLATFORM_CONFIG_SYSTEM_TABLE, 0, + SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G, + &cable_atten, 4); + if (ret) + return ret; + } + + ret = get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4); + if (ret) + return ret; + + *ptr_total_atten = platform_atten + cable_atten + remote_atten; + + *ptr_tuning_method = OPA_PASSIVE_TUNING; + break; + case 0x0 ... 0x9: /* fallthrough */ + case 0xC: /* fallthrough */ + case 0xE: + ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset, + ptr_total_atten); + if (ret) + return ret; + + *ptr_tuning_method = OPA_ACTIVE_TUNING; + break; + case 0xD: /* fallthrough */ + case 0xF: + default: + dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n", + __func__); + break; + } + return ret; +} + +/* + * This function communicates its success or failure via ppd->driver_link_ready + * Thus, it depends on its association with start_link(...) which checks + * driver_link_ready before proceeding with the link negotiation and + * initialization process. + */ +void tune_serdes(struct hfi1_pportdata *ppd) +{ + int ret = 0; + u32 total_atten = 0; + u32 remote_atten = 0, platform_atten = 0; + u32 rx_preset_index, tx_preset_index; + u8 tuning_method = 0; + struct hfi1_devdata *dd = ppd->dd; + + rx_preset_index = OPA_INVALID_INDEX; + tx_preset_index = OPA_INVALID_INDEX; + + /* the link defaults to enabled */ + ppd->link_enabled = 1; + /* the driver link ready state defaults to not ready */ + ppd->driver_link_ready = 0; + ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); + + if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || + ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR || + !dd->pcfg_cache.cache_valid) { + ppd->driver_link_ready = 1; + return; + } + + ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_PORT_TYPE, &ppd->port_type, + 4); + if (ret) + goto bail; + + switch (ppd->port_type) { + case PORT_TYPE_DISCONNECTED: + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED); + dd_dev_info(dd, "%s: Port disconnected, disabling port\n", + __func__); + goto bail; + case PORT_TYPE_FIXED: + /* platform_atten, remote_atten pre-zeroed to catch error */ + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4); + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4); + + total_atten = platform_atten + remote_atten; + + tuning_method = OPA_PASSIVE_TUNING; + break; + case PORT_TYPE_VARIABLE: + if (qsfp_mod_present(ppd)) { + /* + * platform_atten, remote_atten pre-zeroed to + * catch error + */ + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_LOCAL_ATTEN_25G, + &platform_atten, 4); + + get_platform_config_field( + ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0, + PORT_TABLE_REMOTE_ATTEN_25G, + &remote_atten, 4); + + total_atten = platform_atten + remote_atten; + + tuning_method = OPA_PASSIVE_TUNING; + } else + ppd->offline_disabled_reason = + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG); + break; + case PORT_TYPE_QSFP: + if (qsfp_mod_present(ppd)) { + refresh_qsfp_cache(ppd, &ppd->qsfp_info); + + if (ppd->qsfp_info.cache_valid) { + ret = tune_qsfp(ppd, + &tx_preset_index, + &rx_preset_index, + &tuning_method, + &total_atten); + if (ret) + goto bail; + } else { + dd_dev_err(dd, + "%s: Reading QSFP memory failed\n", + __func__); + goto bail; + } + } else + ppd->offline_disabled_reason = + HFI1_ODR_MASK( + OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED); + break; + default: + dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__); + break; + } + if (ppd->offline_disabled_reason == + HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) + apply_tunings(ppd, tx_preset_index, tuning_method, + total_atten, + ppd->qsfp_info.limiting_active); + + if (ppd->port_type == PORT_TYPE_QSFP) + refresh_qsfp_cache(ppd, &ppd->qsfp_info); + + ppd->driver_link_ready = 1; + + return; +bail: + ppd->driver_link_ready = 0; +} diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/staging/rdma/hfi1/platform.h new file mode 100644 index 000000000000..5b53d71ddf96 --- /dev/null +++ b/drivers/staging/rdma/hfi1/platform.h @@ -0,0 +1,298 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __PLATFORM_H +#define __PLATFORM_H + +#define METADATA_TABLE_FIELD_START_SHIFT 0 +#define METADATA_TABLE_FIELD_START_LEN_BITS 15 +#define METADATA_TABLE_FIELD_LEN_SHIFT 16 +#define METADATA_TABLE_FIELD_LEN_LEN_BITS 16 + +/* Header structure */ +#define PLATFORM_CONFIG_HEADER_RECORD_IDX_SHIFT 0 +#define PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS 6 +#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT 16 +#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS 12 +#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT 28 +#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS 4 + +enum platform_config_table_type_encoding { + PLATFORM_CONFIG_TABLE_RESERVED, + PLATFORM_CONFIG_SYSTEM_TABLE, + PLATFORM_CONFIG_PORT_TABLE, + PLATFORM_CONFIG_RX_PRESET_TABLE, + PLATFORM_CONFIG_TX_PRESET_TABLE, + PLATFORM_CONFIG_QSFP_ATTEN_TABLE, + PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE, + PLATFORM_CONFIG_TABLE_MAX +}; + +enum platform_config_system_table_fields { + SYSTEM_TABLE_RESERVED, + SYSTEM_TABLE_NODE_STRING, + SYSTEM_TABLE_SYSTEM_IMAGE_GUID, + SYSTEM_TABLE_NODE_GUID, + SYSTEM_TABLE_REVISION, + SYSTEM_TABLE_VENDOR_OUI, + SYSTEM_TABLE_META_VERSION, + SYSTEM_TABLE_DEVICE_ID, + SYSTEM_TABLE_PARTITION_ENFORCEMENT_CAP, + SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, + SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_12G, + SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G, + SYSTEM_TABLE_VARIABLE_TABLE_ENTRIES_PER_PORT, + SYSTEM_TABLE_MAX +}; + +enum platform_config_port_table_fields { + PORT_TABLE_RESERVED, + PORT_TABLE_PORT_TYPE, + PORT_TABLE_LOCAL_ATTEN_12G, + PORT_TABLE_LOCAL_ATTEN_25G, + PORT_TABLE_LINK_SPEED_SUPPORTED, + PORT_TABLE_LINK_WIDTH_SUPPORTED, + PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED, + PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED, + PORT_TABLE_VL_CAP, + PORT_TABLE_MTU_CAP, + PORT_TABLE_TX_LANE_ENABLE_MASK, + PORT_TABLE_LOCAL_MAX_TIMEOUT, + PORT_TABLE_REMOTE_ATTEN_12G, + PORT_TABLE_REMOTE_ATTEN_25G, + PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ, + PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ, + PORT_TABLE_RX_PRESET_IDX, + PORT_TABLE_CABLE_REACH_CLASS, + PORT_TABLE_MAX +}; + +enum platform_config_rx_preset_table_fields { + RX_PRESET_TABLE_RESERVED, + RX_PRESET_TABLE_QSFP_RX_CDR_APPLY, + RX_PRESET_TABLE_QSFP_RX_EMP_APPLY, + RX_PRESET_TABLE_QSFP_RX_AMP_APPLY, + RX_PRESET_TABLE_QSFP_RX_CDR, + RX_PRESET_TABLE_QSFP_RX_EMP, + RX_PRESET_TABLE_QSFP_RX_AMP, + RX_PRESET_TABLE_MAX +}; + +enum platform_config_tx_preset_table_fields { + TX_PRESET_TABLE_RESERVED, + TX_PRESET_TABLE_PRECUR, + TX_PRESET_TABLE_ATTN, + TX_PRESET_TABLE_POSTCUR, + TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, + TX_PRESET_TABLE_QSFP_TX_EQ_APPLY, + TX_PRESET_TABLE_QSFP_TX_CDR, + TX_PRESET_TABLE_QSFP_TX_EQ, + TX_PRESET_TABLE_MAX +}; + +enum platform_config_qsfp_attn_table_fields { + QSFP_ATTEN_TABLE_RESERVED, + QSFP_ATTEN_TABLE_TX_PRESET_IDX, + QSFP_ATTEN_TABLE_RX_PRESET_IDX, + QSFP_ATTEN_TABLE_MAX +}; + +enum platform_config_variable_settings_table_fields { + VARIABLE_SETTINGS_TABLE_RESERVED, + VARIABLE_SETTINGS_TABLE_TX_PRESET_IDX, + VARIABLE_SETTINGS_TABLE_RX_PRESET_IDX, + VARIABLE_SETTINGS_TABLE_MAX +}; + +struct platform_config_data { + u32 *table; + u32 *table_metadata; + u32 num_table; +}; + +/* + * This struct acts as a quick reference into the platform_data binary image + * and is populated by parse_platform_config(...) depending on the specific + * META_VERSION + */ +struct platform_config_cache { + u8 cache_valid; + struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX]; +}; + +static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = { + 0, + SYSTEM_TABLE_MAX, + PORT_TABLE_MAX, + RX_PRESET_TABLE_MAX, + TX_PRESET_TABLE_MAX, + QSFP_ATTEN_TABLE_MAX, + VARIABLE_SETTINGS_TABLE_MAX +}; + +/* This section defines default values and encodings for the + * fields defined for each table above + */ + +/* + *===================================================== + * System table encodings + *==================================================== + */ +#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041 +#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4 + +/* + * These power classes are the same as defined in SFF 8636 spec rev 2.4 + * describing byte 129 in table 6-16, except enumerated in a different order + */ +enum platform_config_qsfp_power_class_encoding { + QSFP_POWER_CLASS_1 = 1, + QSFP_POWER_CLASS_2, + QSFP_POWER_CLASS_3, + QSFP_POWER_CLASS_4, + QSFP_POWER_CLASS_5, + QSFP_POWER_CLASS_6, + QSFP_POWER_CLASS_7 +}; + +/* + *===================================================== + * Port table encodings + *==================================================== + */ +enum platform_config_port_type_encoding { + PORT_TYPE_UNKNOWN, + PORT_TYPE_DISCONNECTED, + PORT_TYPE_FIXED, + PORT_TYPE_VARIABLE, + PORT_TYPE_QSFP, + PORT_TYPE_MAX +}; + +enum platform_config_link_speed_supported_encoding { + LINK_SPEED_SUPP_12G = 1, + LINK_SPEED_SUPP_25G, + LINK_SPEED_SUPP_12G_25G, + LINK_SPEED_SUPP_MAX +}; + +/* + * This is a subset (not strict) of the link downgrades + * supported. The link downgrades supported are expected + * to be supplied to the driver by another entity such as + * the fabric manager + */ +enum platform_config_link_width_supported_encoding { + LINK_WIDTH_SUPP_1X = 1, + LINK_WIDTH_SUPP_2X, + LINK_WIDTH_SUPP_2X_1X, + LINK_WIDTH_SUPP_3X, + LINK_WIDTH_SUPP_3X_1X, + LINK_WIDTH_SUPP_3X_2X, + LINK_WIDTH_SUPP_3X_2X_1X, + LINK_WIDTH_SUPP_4X, + LINK_WIDTH_SUPP_4X_1X, + LINK_WIDTH_SUPP_4X_2X, + LINK_WIDTH_SUPP_4X_2X_1X, + LINK_WIDTH_SUPP_4X_3X, + LINK_WIDTH_SUPP_4X_3X_1X, + LINK_WIDTH_SUPP_4X_3X_2X, + LINK_WIDTH_SUPP_4X_3X_2X_1X, + LINK_WIDTH_SUPP_MAX +}; + +enum platform_config_virtual_lane_capability_encoding { + VL_CAP_VL0 = 1, + VL_CAP_VL0_1, + VL_CAP_VL0_2, + VL_CAP_VL0_3, + VL_CAP_VL0_4, + VL_CAP_VL0_5, + VL_CAP_VL0_6, + VL_CAP_VL0_7, + VL_CAP_VL0_8, + VL_CAP_VL0_9, + VL_CAP_VL0_10, + VL_CAP_VL0_11, + VL_CAP_VL0_12, + VL_CAP_VL0_13, + VL_CAP_VL0_14, + VL_CAP_MAX +}; + +/* Max MTU */ +enum platform_config_mtu_capability_encoding { + MTU_CAP_256 = 1, + MTU_CAP_512 = 2, + MTU_CAP_1024 = 3, + MTU_CAP_2048 = 4, + MTU_CAP_4096 = 5, + MTU_CAP_8192 = 6, + MTU_CAP_10240 = 7 +}; + +enum platform_config_local_max_timeout_encoding { + LOCAL_MAX_TIMEOUT_10_MS = 1, + LOCAL_MAX_TIMEOUT_100_MS, + LOCAL_MAX_TIMEOUT_1_S, + LOCAL_MAX_TIMEOUT_10_S, + LOCAL_MAX_TIMEOUT_100_S, + LOCAL_MAX_TIMEOUT_1000_S +}; + +enum link_tuning_encoding { + OPA_PASSIVE_TUNING, + OPA_ACTIVE_TUNING, + OPA_UNKNOWN_TUNING +}; + +int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); +void tune_serdes(struct hfi1_pportdata *ppd); +#endif /*__PLATFORM_H*/ diff --git a/drivers/staging/rdma/hfi1/platform_config.h b/drivers/staging/rdma/hfi1/platform_config.h deleted file mode 100644 index 8a94a8342052..000000000000 --- a/drivers/staging/rdma/hfi1/platform_config.h +++ /dev/null @@ -1,286 +0,0 @@ -/* - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#ifndef __PLATFORM_CONFIG_H -#define __PLATFORM_CONFIG_H - -#define METADATA_TABLE_FIELD_START_SHIFT 0 -#define METADATA_TABLE_FIELD_START_LEN_BITS 15 -#define METADATA_TABLE_FIELD_LEN_SHIFT 16 -#define METADATA_TABLE_FIELD_LEN_LEN_BITS 16 - -/* Header structure */ -#define PLATFORM_CONFIG_HEADER_RECORD_IDX_SHIFT 0 -#define PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS 6 -#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT 16 -#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS 12 -#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT 28 -#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS 4 - -enum platform_config_table_type_encoding { - PLATFORM_CONFIG_TABLE_RESERVED, - PLATFORM_CONFIG_SYSTEM_TABLE, - PLATFORM_CONFIG_PORT_TABLE, - PLATFORM_CONFIG_RX_PRESET_TABLE, - PLATFORM_CONFIG_TX_PRESET_TABLE, - PLATFORM_CONFIG_QSFP_ATTEN_TABLE, - PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE, - PLATFORM_CONFIG_TABLE_MAX -}; - -enum platform_config_system_table_fields { - SYSTEM_TABLE_RESERVED, - SYSTEM_TABLE_NODE_STRING, - SYSTEM_TABLE_SYSTEM_IMAGE_GUID, - SYSTEM_TABLE_NODE_GUID, - SYSTEM_TABLE_REVISION, - SYSTEM_TABLE_VENDOR_OUI, - SYSTEM_TABLE_META_VERSION, - SYSTEM_TABLE_DEVICE_ID, - SYSTEM_TABLE_PARTITION_ENFORCEMENT_CAP, - SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, - SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_12G, - SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G, - SYSTEM_TABLE_VARIABLE_TABLE_ENTRIES_PER_PORT, - SYSTEM_TABLE_MAX -}; - -enum platform_config_port_table_fields { - PORT_TABLE_RESERVED, - PORT_TABLE_PORT_TYPE, - PORT_TABLE_ATTENUATION_12G, - PORT_TABLE_ATTENUATION_25G, - PORT_TABLE_LINK_SPEED_SUPPORTED, - PORT_TABLE_LINK_WIDTH_SUPPORTED, - PORT_TABLE_VL_CAP, - PORT_TABLE_MTU_CAP, - PORT_TABLE_TX_LANE_ENABLE_MASK, - PORT_TABLE_LOCAL_MAX_TIMEOUT, - PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED, - PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED, - PORT_TABLE_TX_PRESET_IDX_PASSIVE_CU, - PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ, - PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ, - PORT_TABLE_RX_PRESET_IDX, - PORT_TABLE_CABLE_REACH_CLASS, - PORT_TABLE_MAX -}; - -enum platform_config_rx_preset_table_fields { - RX_PRESET_TABLE_RESERVED, - RX_PRESET_TABLE_QSFP_RX_CDR_APPLY, - RX_PRESET_TABLE_QSFP_RX_EQ_APPLY, - RX_PRESET_TABLE_QSFP_RX_AMP_APPLY, - RX_PRESET_TABLE_QSFP_RX_CDR, - RX_PRESET_TABLE_QSFP_RX_EQ, - RX_PRESET_TABLE_QSFP_RX_AMP, - RX_PRESET_TABLE_MAX -}; - -enum platform_config_tx_preset_table_fields { - TX_PRESET_TABLE_RESERVED, - TX_PRESET_TABLE_PRECUR, - TX_PRESET_TABLE_ATTN, - TX_PRESET_TABLE_POSTCUR, - TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, - TX_PRESET_TABLE_QSFP_TX_EQ_APPLY, - TX_PRESET_TABLE_QSFP_TX_CDR, - TX_PRESET_TABLE_QSFP_TX_EQ, - TX_PRESET_TABLE_MAX -}; - -enum platform_config_qsfp_attn_table_fields { - QSFP_ATTEN_TABLE_RESERVED, - QSFP_ATTEN_TABLE_TX_PRESET_IDX, - QSFP_ATTEN_TABLE_RX_PRESET_IDX, - QSFP_ATTEN_TABLE_MAX -}; - -enum platform_config_variable_settings_table_fields { - VARIABLE_SETTINGS_TABLE_RESERVED, - VARIABLE_SETTINGS_TABLE_TX_PRESET_IDX, - VARIABLE_SETTINGS_TABLE_RX_PRESET_IDX, - VARIABLE_SETTINGS_TABLE_MAX -}; - -struct platform_config_data { - u32 *table; - u32 *table_metadata; - u32 num_table; -}; - -/* - * This struct acts as a quick reference into the platform_data binary image - * and is populated by parse_platform_config(...) depending on the specific - * META_VERSION - */ -struct platform_config_cache { - u8 cache_valid; - struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX]; -}; - -static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = { - 0, - SYSTEM_TABLE_MAX, - PORT_TABLE_MAX, - RX_PRESET_TABLE_MAX, - TX_PRESET_TABLE_MAX, - QSFP_ATTEN_TABLE_MAX, - VARIABLE_SETTINGS_TABLE_MAX -}; - -/* This section defines default values and encodings for the - * fields defined for each table above - */ - -/*===================================================== - * System table encodings - *====================================================*/ -#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041 -#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4 - -/* - * These power classes are the same as defined in SFF 8636 spec rev 2.4 - * describing byte 129 in table 6-16, except enumerated in a different order - */ -enum platform_config_qsfp_power_class_encoding { - QSFP_POWER_CLASS_1 = 1, - QSFP_POWER_CLASS_2, - QSFP_POWER_CLASS_3, - QSFP_POWER_CLASS_4, - QSFP_POWER_CLASS_5, - QSFP_POWER_CLASS_6, - QSFP_POWER_CLASS_7 -}; - - -/*===================================================== - * Port table encodings - *==================================================== */ -enum platform_config_port_type_encoding { - PORT_TYPE_RESERVED, - PORT_TYPE_DISCONNECTED, - PORT_TYPE_FIXED, - PORT_TYPE_VARIABLE, - PORT_TYPE_QSFP, - PORT_TYPE_MAX -}; - -enum platform_config_link_speed_supported_encoding { - LINK_SPEED_SUPP_12G = 1, - LINK_SPEED_SUPP_25G, - LINK_SPEED_SUPP_12G_25G, - LINK_SPEED_SUPP_MAX -}; - -/* - * This is a subset (not strict) of the link downgrades - * supported. The link downgrades supported are expected - * to be supplied to the driver by another entity such as - * the fabric manager - */ -enum platform_config_link_width_supported_encoding { - LINK_WIDTH_SUPP_1X = 1, - LINK_WIDTH_SUPP_2X, - LINK_WIDTH_SUPP_2X_1X, - LINK_WIDTH_SUPP_3X, - LINK_WIDTH_SUPP_3X_1X, - LINK_WIDTH_SUPP_3X_2X, - LINK_WIDTH_SUPP_3X_2X_1X, - LINK_WIDTH_SUPP_4X, - LINK_WIDTH_SUPP_4X_1X, - LINK_WIDTH_SUPP_4X_2X, - LINK_WIDTH_SUPP_4X_2X_1X, - LINK_WIDTH_SUPP_4X_3X, - LINK_WIDTH_SUPP_4X_3X_1X, - LINK_WIDTH_SUPP_4X_3X_2X, - LINK_WIDTH_SUPP_4X_3X_2X_1X, - LINK_WIDTH_SUPP_MAX -}; - -enum platform_config_virtual_lane_capability_encoding { - VL_CAP_VL0 = 1, - VL_CAP_VL0_1, - VL_CAP_VL0_2, - VL_CAP_VL0_3, - VL_CAP_VL0_4, - VL_CAP_VL0_5, - VL_CAP_VL0_6, - VL_CAP_VL0_7, - VL_CAP_VL0_8, - VL_CAP_VL0_9, - VL_CAP_VL0_10, - VL_CAP_VL0_11, - VL_CAP_VL0_12, - VL_CAP_VL0_13, - VL_CAP_VL0_14, - VL_CAP_MAX -}; - -/* Max MTU */ -enum platform_config_mtu_capability_encoding { - MTU_CAP_256 = 1, - MTU_CAP_512 = 2, - MTU_CAP_1024 = 3, - MTU_CAP_2048 = 4, - MTU_CAP_4096 = 5, - MTU_CAP_8192 = 6, - MTU_CAP_10240 = 7 -}; - -enum platform_config_local_max_timeout_encoding { - LOCAL_MAX_TIMEOUT_10_MS = 1, - LOCAL_MAX_TIMEOUT_100_MS, - LOCAL_MAX_TIMEOUT_1_S, - LOCAL_MAX_TIMEOUT_10_S, - LOCAL_MAX_TIMEOUT_100_S, - LOCAL_MAX_TIMEOUT_1000_S -}; - -#endif /*__PLATFORM_CONFIG_H*/ diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index 34222501cc33..b1b9e4a2329f 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -213,7 +213,8 @@ struct qsfp_data { u8 cache[QSFP_MAX_NUM_PAGES*128]; spinlock_t qsfp_lock; u8 check_interrupt_flags; - u8 qsfp_interrupt_functional; + u8 reset_needed; + u8 limiting_active; u8 cache_valid; u8 cache_refresh_required; }; -- cgit v1.2.3-59-g8ed1b From 1d01cf33e38a6aff87c25575286385daac11b8ca Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:31:22 -0800 Subject: staging/rdma/hfi1: Get port type from configuration file The current code employs a heuristic to guess the port type. The canonical location to identify the port type of the designed platform is from the platform configuration data. This patch uses the previously fetched port type from the platform configuration and removes the now obsolete heuristic routine and its associated defines. Reviewed-by: Arthur Kepner Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 12 +----------- drivers/staging/rdma/hfi1/opa_compat.h | 15 --------------- 2 files changed, 1 insertion(+), 26 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 303dfeeed2bc..5146f5df7a10 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -503,16 +503,6 @@ void read_ltp_rtt(struct hfi1_devdata *dd) write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg); } -static u8 __opa_porttype(struct hfi1_pportdata *ppd) -{ - if (qsfp_mod_present(ppd)) { - if (ppd->qsfp_info.cache_valid) - return OPA_PORT_TYPE_STANDARD; - return OPA_PORT_TYPE_DISCONNECTED; - } - return OPA_PORT_TYPE_UNKNOWN; -} - static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, u32 *resp_len) @@ -583,7 +573,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, if (start_of_sm_config && (state == IB_PORT_INIT)) ppd->is_sm_config_started = 1; - pi->port_phys_conf = __opa_porttype(ppd) & 0xf; + pi->port_phys_conf = (ppd->port_type & 0xf); #if PI_LED_ENABLE_SUP pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/staging/rdma/hfi1/opa_compat.h index f64eec1c2951..30f77077e30b 100644 --- a/drivers/staging/rdma/hfi1/opa_compat.h +++ b/drivers/staging/rdma/hfi1/opa_compat.h @@ -111,19 +111,4 @@ enum opa_port_phys_state { /* values 12-15 are reserved/ignored */ }; -/* OPA_PORT_TYPE_* definitions - these belong in opa_port_info.h */ -#define OPA_PORT_TYPE_UNKNOWN 0 -#define OPA_PORT_TYPE_DISCONNECTED 1 -/* port is not currently usable, CableInfo not available */ -#define OPA_PORT_TYPE_FIXED 2 -/* A fixed backplane port in a director class switch. All OPA ASICS */ -#define OPA_PORT_TYPE_VARIABLE 3 -/* A backplane port in a blade system, possibly mixed configuration */ -#define OPA_PORT_TYPE_STANDARD 4 -/* implies a SFF-8636 defined format for CableInfo (QSFP) */ -#define OPA_PORT_TYPE_SI_PHOTONICS 5 -/* A silicon photonics module implies TBD defined format for CableInfo - * as defined by Intel SFO group */ -/* 6 - 15 are reserved */ - #endif /* _LINUX_H */ -- cgit v1.2.3-59-g8ed1b From cbac386a120a7e8a26c013f496717b11cacbd99c Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:31:31 -0800 Subject: staging/rdma/hfi1: Support external device configuration requests from 8051 This patch implements support for turning on and off the clock data recovery mechanisms implemented in QSFP cable on request by the DC 8051 on a per-lane basis. Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 52 +++++++++++++++++++++++++++++++++++----- drivers/staging/rdma/hfi1/chip.h | 1 + drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/init.c | 1 + 4 files changed, 49 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 4d70a960ff54..41af05ec0ff7 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6085,13 +6085,19 @@ static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) } /* - * Handle requests from the 8051. + * Handle host requests from the 8051. + * + * This is a work-queue function outside of the interrupt. */ -static void handle_8051_request(struct hfi1_devdata *dd) +void handle_8051_request(struct work_struct *work) { + struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, + dc_host_req_work); + struct hfi1_devdata *dd = ppd->dd; u64 reg; - u16 data; - u8 type; + u16 data = 0; + u8 type, i, lanes, *cache = ppd->qsfp_info.cache; + u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS]; reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) @@ -6112,12 +6118,46 @@ static void handle_8051_request(struct hfi1_devdata *dd) case HREQ_READ_CONFIG: case HREQ_SET_TX_EQ_ABS: case HREQ_SET_TX_EQ_REL: - case HREQ_ENABLE: dd_dev_info(dd, "8051 request: request 0x%x not supported\n", type); hreq_response(dd, HREQ_NOT_SUPPORTED, 0); break; + case HREQ_ENABLE: + lanes = data & 0xF; + for (i = 0; lanes; lanes >>= 1, i++) { + if (!(lanes & 1)) + continue; + if (data & 0x200) { + /* enable TX CDR */ + if (cache[QSFP_MOD_PWR_OFFS] & 0x8 && + cache[QSFP_CDR_INFO_OFFS] & 0x80) + cdr_ctrl_byte |= (1 << (i + 4)); + } else { + /* disable TX CDR */ + if (cache[QSFP_MOD_PWR_OFFS] & 0x8 && + cache[QSFP_CDR_INFO_OFFS] & 0x80) + cdr_ctrl_byte &= ~(1 << (i + 4)); + } + + if (data & 0x800) { + /* enable RX CDR */ + if (cache[QSFP_MOD_PWR_OFFS] & 0x4 && + cache[QSFP_CDR_INFO_OFFS] & 0x40) + cdr_ctrl_byte |= (1 << i); + } else { + /* disable RX CDR */ + if (cache[QSFP_MOD_PWR_OFFS] & 0x4 && + cache[QSFP_CDR_INFO_OFFS] & 0x40) + cdr_ctrl_byte &= ~(1 << i); + } + } + qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS, + &cdr_ctrl_byte, 1); + hreq_response(dd, HREQ_SUCCESS, data); + refresh_qsfp_cache(ppd, &ppd->qsfp_info); + break; + case HREQ_CONFIG_DONE: hreq_response(dd, HREQ_SUCCESS, 0); break; @@ -7373,7 +7413,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) host_msg &= ~(u64)LINKUP_ACHIEVED; } if (host_msg & EXT_DEVICE_CFG_REQ) { - handle_8051_request(dd); + queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work); host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; } if (host_msg & VERIFY_CAP_FRAME) { diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 5e4fe4363e25..0e95f0b7f2bb 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -647,6 +647,7 @@ void handle_verify_cap(struct work_struct *work); void handle_freeze(struct work_struct *work); void handle_link_up(struct work_struct *work); void handle_link_down(struct work_struct *work); +void handle_8051_request(struct work_struct *work); void handle_link_downgrade(struct work_struct *work); void handle_link_bounce(struct work_struct *work); void handle_sma_message(struct work_struct *work); diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index f79b07002d1f..0fe630e77682 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -589,6 +589,7 @@ struct hfi1_pportdata { struct work_struct link_vc_work; struct work_struct link_up_work; struct work_struct link_down_work; + struct work_struct dc_host_req_work; struct work_struct sma_message_work; struct work_struct freeze_work; struct work_struct link_downgrade_work; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 354935f20437..2851e90d0cd3 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -495,6 +495,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, INIT_WORK(&ppd->link_vc_work, handle_verify_cap); INIT_WORK(&ppd->link_up_work, handle_link_up); INIT_WORK(&ppd->link_down_work, handle_link_down); + INIT_WORK(&ppd->dc_host_req_work, handle_8051_request); INIT_WORK(&ppd->freeze_work, handle_freeze); INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade); INIT_WORK(&ppd->sma_message_work, handle_sma_message); -- cgit v1.2.3-59-g8ed1b From 6b14e0ea48890e633ac69caa3ae13beccdd497be Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:31:40 -0800 Subject: staging/rdma/hfi1: Fix missing firmware NULL dereference The gen3 bump code must mark a firmware download failure as fatal. Otherwise a later load attempt will fail with a NULL dereference. Also: o Only do a firmware back-off for RTL. There are no alternates for FPGA or simulation. o Rearrange OS firmware request order to match what is actually loaded. This results in more coherent informational messages in the case of missing firmware. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/firmware.c | 22 +++++++++++----------- drivers/staging/rdma/hfi1/pcie.c | 5 ++++- 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 28ae42faa018..d954e1ab4bbe 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -593,27 +593,27 @@ retry: fw_pcie_serdes_name = ALT_FW_PCIE_NAME; } - if (fw_8051_load) { - err = obtain_one_firmware(dd, fw_8051_name, &fw_8051); + if (fw_sbus_load) { + err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus); if (err) goto done; } - if (fw_fabric_serdes_load) { - err = obtain_one_firmware(dd, fw_fabric_serdes_name, - &fw_fabric); + if (fw_pcie_serdes_load) { + err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie); if (err) goto done; } - if (fw_sbus_load) { - err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus); + if (fw_fabric_serdes_load) { + err = obtain_one_firmware(dd, fw_fabric_serdes_name, + &fw_fabric); if (err) goto done; } - if (fw_pcie_serdes_load) { - err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie); + if (fw_8051_load) { + err = obtain_one_firmware(dd, fw_8051_name, &fw_8051); if (err) goto done; } @@ -621,8 +621,8 @@ retry: done: if (err) { /* oops, had problems obtaining a firmware */ - if (fw_state == FW_EMPTY) { - /* retry with alternate */ + if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) { + /* retry with alternate (RTL only) */ fw_state = FW_TRY; goto retry; } diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index b2f553d86042..3cdc8047f16b 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -1032,8 +1032,11 @@ retry: /* step 4: download PCIe Gen3 SerDes firmware */ dd_dev_info(dd, "%s: downloading firmware\n", __func__); ret = load_pcie_firmware(dd); - if (ret) + if (ret) { + /* do not proceed if the firmware cannot be downloaded */ + return_error = 1; goto done; + } /* step 5: set up device parameter settings */ dd_dev_info(dd, "%s: setting PCIe registers\n", __func__); -- cgit v1.2.3-59-g8ed1b From 69a00b8e0508c8b98fba3b57a7c6c45b724553c3 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 3 Feb 2016 14:31:49 -0800 Subject: staging/rdma/hfi1: Fix per-VL transmit discard counts Implement per-VL transmit counters. Not all errors can be attributed to a particular VL, so make a best attempt. o Extend the egress error bits used to count toward transmit discard. o When an egress error or send error occur, try to map back to a VL. o Implement a SDMA engine to VL (back) map. o Add per-VL port transmit counters Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 138 ++++++++++++++++++++++++++++++++------- drivers/staging/rdma/hfi1/hfi.h | 3 +- drivers/staging/rdma/hfi1/sdma.c | 6 ++ drivers/staging/rdma/hfi1/sdma.h | 2 + 4 files changed, 123 insertions(+), 26 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 41af05ec0ff7..108cd48c9006 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -510,6 +510,12 @@ static struct flag_table sdma_err_status_flags[] = { | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) +/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ +#define PORT_DISCARD_EGRESS_ERRS \ + (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ + | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ + | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) + /* * TXE Egress Error flags */ @@ -1481,12 +1487,18 @@ static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, static u64 access_sw_xmit_discards(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { - struct hfi1_pportdata *ppd = context; + struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; + u64 zero = 0; + u64 *counter; - if (vl != CNTR_INVALID_VL) - return 0; + if (vl == CNTR_INVALID_VL) + counter = &ppd->port_xmit_discards; + else if (vl >= 0 && vl < C_VL_COUNT) + counter = &ppd->port_xmit_discards_vl[vl]; + else + counter = &zero; - return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data); + return read_write_sw(ppd->dd, counter, mode, data); } static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, @@ -5508,12 +5520,14 @@ static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) } } -static void count_port_inactive(struct hfi1_devdata *dd) +static inline void __count_port_discards(struct hfi1_pportdata *ppd) { - struct hfi1_pportdata *ppd = dd->pport; + incr_cntr64(&ppd->port_xmit_discards); +} - if (ppd->port_xmit_discards < ~(u64)0) - ppd->port_xmit_discards++; +static void count_port_inactive(struct hfi1_devdata *dd) +{ + __count_port_discards(dd->pport); } /* @@ -5525,7 +5539,8 @@ static void count_port_inactive(struct hfi1_devdata *dd) * egress error if more than one packet fails the same integrity check * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. */ -static void handle_send_egress_err_info(struct hfi1_devdata *dd) +static void handle_send_egress_err_info(struct hfi1_devdata *dd, + int vl) { struct hfi1_pportdata *ppd = dd->pport; u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ @@ -5540,10 +5555,24 @@ static void handle_send_egress_err_info(struct hfi1_devdata *dd) info, egress_err_info_string(buf, sizeof(buf), info), src); /* Eventually add other counters for each bit */ + if (info & PORT_DISCARD_EGRESS_ERRS) { + int weight, i; - if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) { - if (ppd->port_xmit_discards < ~(u64)0) - ppd->port_xmit_discards++; + /* + * Count all, in case multiple bits are set. Reminder: + * since there is only one info register for many sources, + * these may be attributed to the wrong VL if they occur + * too close together. + */ + weight = hweight64(info); + for (i = 0; i < weight; i++) { + __count_port_discards(ppd); + if (vl >= 0 && vl < TXE_NUM_DATA_VL) + incr_cntr64(&ppd->port_xmit_discards_vl[vl]); + else if (vl == 15) + incr_cntr64(&ppd->port_xmit_discards_vl + [C_VL_15]); + } } } @@ -5561,12 +5590,71 @@ static inline int port_inactive_err(u64 posn) * Input value is a bit position within the SEND_EGRESS_ERR_STATUS * register. Does it represent a 'disallowed packet' error? */ -static inline int disallowed_pkt_err(u64 posn) +static inline int disallowed_pkt_err(int posn) { return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); } +/* + * Input value is a bit position of one of the SDMA engine disallowed + * packet errors. Return which engine. Use of this must be guarded by + * disallowed_pkt_err(). + */ +static inline int disallowed_pkt_engine(int posn) +{ + return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); +} + +/* + * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot + * be done. + */ +static int engine_to_vl(struct hfi1_devdata *dd, int engine) +{ + struct sdma_vl_map *m; + int vl; + + /* range check */ + if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) + return -1; + + rcu_read_lock(); + m = rcu_dereference(dd->sdma_map); + vl = m->engine_to_vl[engine]; + rcu_read_unlock(); + + return vl; +} + +/* + * Translate the send context (sofware index) into a VL. Return -1 if the + * translation cannot be done. + */ +static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) +{ + struct send_context_info *sci; + struct send_context *sc; + int i; + + sci = &dd->send_contexts[sw_index]; + + /* there is no information for user (PSM) and ack contexts */ + if (sci->type != SC_KERNEL) + return -1; + + sc = sci->sc; + if (!sc) + return -1; + if (dd->vld[15].sc == sc) + return 15; + for (i = 0; i < num_vls; i++) + if (dd->vld[i].sc == sc) + return i; + + return -1; +} + static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) { u64 reg_copy = reg, handled = 0; @@ -5575,27 +5663,27 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) if (reg & ALL_TXE_EGRESS_FREEZE_ERR) start_freeze_handling(dd->pport, 0); - if (is_ax(dd) && (reg & - SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) - && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) + else if (is_ax(dd) && + (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && + (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) start_freeze_handling(dd->pport, 0); while (reg_copy) { int posn = fls64(reg_copy); - /* - * fls64() returns a 1-based offset, but we generally - * want 0-based offsets. - */ + /* fls64() returns a 1-based offset, we want it zero based */ int shift = posn - 1; + u64 mask = 1ULL << shift; if (port_inactive_err(shift)) { count_port_inactive(dd); - handled |= (1ULL << shift); + handled |= mask; } else if (disallowed_pkt_err(shift)) { - handle_send_egress_err_info(dd); - handled |= (1ULL << shift); + int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); + + handle_send_egress_err_info(dd, vl); + handled |= mask; } - clear_bit(shift, (unsigned long *)®_copy); + reg_copy &= ~mask; } reg &= ~handled; @@ -5739,7 +5827,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, send_context_err_status_string(flags, sizeof(flags), status)); if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) - handle_send_egress_err_info(dd); + handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); /* * Automatically restart halted kernel contexts out of interrupt diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 0fe630e77682..76c369a49830 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -709,8 +709,9 @@ struct hfi1_pportdata { u64 *cntrs; /* port relative synthetic counter buffer */ u64 *scntrs; - /* we synthesize port_xmit_discards from several egress errors */ + /* port_xmit_discards are synthesized from different egress errors */ u64 port_xmit_discards; + u64 port_xmit_discards_vl[C_VL_COUNT]; u64 port_xmit_constraint_errors; u64 port_rcv_constraint_errors; /* count of 'link_err' interrupts from DC */ diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 4eb55facfea2..ddaaaacaf038 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -890,6 +890,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) newmap->actual_vls = num_vls; newmap->vls = roundup_pow_of_two(num_vls); newmap->mask = (1 << ilog2(newmap->vls)) - 1; + /* initialize back-map */ + for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) + newmap->engine_to_vl[i] = -1; for (i = 0; i < newmap->vls; i++) { /* save for wrap around */ int first_engine = engine; @@ -913,6 +916,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) /* wrap back to first engine */ engine = first_engine; } + /* assign back-map */ + for (j = 0; j < vl_engines[i]; j++) + newmap->engine_to_vl[first_engine + j] = i; } else { /* just re-use entry without allocating */ newmap->map[i] = newmap->map[i % num_vls]; diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 0f51c45869d5..1d52d6e21bd0 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -1087,6 +1087,7 @@ struct sdma_map_elem { /** * struct sdma_map_el - mapping for a vl + * @engine_to_vl - map of an engine to a vl * @list - rcu head for free callback * @mask - vl mask to "mod" the vl to produce an index to map array * @actual_vls - number of vls @@ -1098,6 +1099,7 @@ struct sdma_map_elem { * in turn point to an array of sde's for that vl. */ struct sdma_vl_map { + s8 engine_to_vl[TXE_NUM_SDMA_ENGINES]; struct rcu_head list; u32 mask; u8 actual_vls; -- cgit v1.2.3-59-g8ed1b From 845f876d0819e2b941b1dbe92e0d219df8010035 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:31:57 -0800 Subject: staging/rdma/hfi1: Only warn when board description is not found Change-Id: Icc4ad27c4c67e51df8c8a203c4f16973793678ec Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 108cd48c9006..81b48d600cf8 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13941,7 +13941,7 @@ static int obtain_boardname(struct hfi1_devdata *dd) ret = read_hfi1_efi_var(dd, "description", &size, (void **)&dd->boardname); if (ret) { - dd_dev_err(dd, "Board description not found\n"); + dd_dev_info(dd, "Board description not found\n"); /* use generic description */ dd->boardname = kstrdup(generic, GFP_KERNEL); if (!dd->boardname) -- cgit v1.2.3-59-g8ed1b From fe072e205a685cfba285259d779dd3bda5e250f2 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:32:06 -0800 Subject: staging/rdma/hfi1: Make firmware failure messages warnings Make firmware validation failure and missing firmware messages a warning since alternates can be tried. Add an error message when all attempts fail. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/firmware.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index d954e1ab4bbe..817cbf94f973 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -433,8 +433,8 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev); if (ret) { - dd_dev_err(dd, "cannot find firmware \"%s\", err %d\n", - name, ret); + dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n", + name, ret); return ret; } @@ -572,7 +572,7 @@ retry: * We tried the original and it failed. Move to the * alternate. */ - dd_dev_info(dd, "using alternate firmware names\n"); + dd_dev_warn(dd, "using alternate firmware names\n"); /* * Let others run. Some systems, when missing firmware, does * something that holds for 30 seconds. If we do that twice @@ -626,6 +626,7 @@ done: fw_state = FW_TRY; goto retry; } + dd_dev_err(dd, "unable to obtain working firmware\n"); fw_state = FW_ERR; fw_err = -ENOENT; } else { @@ -896,16 +897,17 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who, MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK); /* - * All that is left are the current errors. Print failure details, - * if any. + * All that is left are the current errors. Print warnings on + * authorization failure details, if any. Firmware authorization + * can be retried, so these are only warnings. */ reg = read_csr(dd, MISC_ERR_STATUS); if (ret) { if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK) - dd_dev_err(dd, "%s firmware authorization failed\n", - who); + dd_dev_warn(dd, "%s firmware authorization failed\n", + who); if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK) - dd_dev_err(dd, "%s firmware key mismatch\n", who); + dd_dev_warn(dd, "%s firmware key mismatch\n", who); } return ret; -- cgit v1.2.3-59-g8ed1b From 715c430ca5b21c22648665045fc3718b4547d5c8 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:32:14 -0800 Subject: staging/rdma/hfi1: Don't attempt to qualify or tune loopback plugs Loopback plugs used for testing hardware don't need to be qualified to bring the link up unlike production cables. This patch adds an exception for loopback plugs to the QSFP and SerDes tuning algortihm. Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/platform.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index 9aa5e06633de..c3df1d892754 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -737,7 +737,8 @@ void tune_serdes(struct hfi1_pportdata *ppd) ppd->driver_link_ready = 0; ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); - if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || + /* Skip the tuning for testing (loopback != none) and simulations */ + if (loopback != LOOPBACK_NONE || ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR || !dd->pcfg_cache.cache_valid) { ppd->driver_link_ready = 1; -- cgit v1.2.3-59-g8ed1b From dcc68e528238c9e1173a8e98de29f0149122ac4c Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:32:23 -0800 Subject: staging/rdma/hfi1: No firmware retry for simulation Simulation has no firmware, so it will never move firmware acquire to the FINAL state. Avoid that by skiping the TRY state and moving directly to FINAL. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/firmware.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 817cbf94f973..4ba524b82edd 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -631,7 +631,8 @@ done: fw_err = -ENOENT; } else { /* success */ - if (fw_state == FW_EMPTY) + if (fw_state == FW_EMPTY && + dd->icode != ICODE_FUNCTIONAL_SIMULATOR) fw_state = FW_TRY; /* may retry later */ else fw_state = FW_FINAL; /* cannot try again */ -- cgit v1.2.3-59-g8ed1b From a59329d5e8977fb4b6d7535bdf14b0fe7ece5559 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:32:31 -0800 Subject: staging/rdma/hfi1: Skip lcb init for simulation The simulator does not correctly handle LCB cclk loopback. Skip that step for simulation - it is not needed. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 81b48d600cf8..d45e27105d50 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -9315,6 +9315,10 @@ static void init_qsfp_int(struct hfi1_devdata *dd) */ static void init_lcb(struct hfi1_devdata *dd) { + /* simulator does not correctly handle LCB cclk loopback, skip */ + if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) + return; + /* the DC has been reset earlier in the driver load */ /* set LCB for cclk loopback on the port */ -- cgit v1.2.3-59-g8ed1b From 11d2b114cdebd9b520de573d74c70fb04c2771cc Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:32:40 -0800 Subject: staging/rdma/hfi1: Fix for 32-bit counter overflow in driver and hfi1stats When 32-bit hardware counters overflow, hfi1stats misinterprets the counters as being 64 bits causing the deltas for the counters to be a huge number. This patch makes hfi1stats aware that a counter is 32 bits by making the driver write ,32 to debugfs. Reviewed-by: Dean Luick Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 98 ++++++++++++++++++++++++++++++---------- 1 file changed, 73 insertions(+), 25 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index d45e27105d50..a90e6e6699c0 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -11766,6 +11766,8 @@ static int init_cntrs(struct hfi1_devdata *dd) char *p; char name[C_MAX_NAME]; struct hfi1_pportdata *ppd; + const char *bit_type_32 = ",32"; + const int bit_type_32_sz = strlen(bit_type_32); /* set up the stats timer; the add_timer is done at the end */ setup_timer(&dd->synth_stats_timer, update_synth_timer, @@ -11795,6 +11797,9 @@ static int init_cntrs(struct hfi1_devdata *dd) dev_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); + /* Add ",32" for 32-bit counters */ + if (dev_cntrs[i].flags & CNTR_32BIT) + sz += bit_type_32_sz; sz++; hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; @@ -11809,13 +11814,19 @@ static int init_cntrs(struct hfi1_devdata *dd) snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, j); sz += strlen(name); + /* Add ",32" for 32-bit counters */ + if (dev_cntrs[i].flags & CNTR_32BIT) + sz += bit_type_32_sz; sz++; hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; } } else { - /* +1 for newline */ + /* +1 for newline. */ sz += strlen(dev_cntrs[i].name) + 1; + /* Add ",32" for 32-bit counters */ + if (dev_cntrs[i].flags & CNTR_32BIT) + sz += bit_type_32_sz; dev_cntrs[i].offset = dd->ndevcntrs; dd->ndevcntrs++; hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name); @@ -11842,33 +11853,50 @@ static int init_cntrs(struct hfi1_devdata *dd) for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { if (dev_cntrs[i].flags & CNTR_DISABLED) { /* Nothing */ - } else { - if (dev_cntrs[i].flags & CNTR_VL) { - for (j = 0; j < C_VL_COUNT; j++) { - memset(name, '\0', C_MAX_NAME); - snprintf(name, C_MAX_NAME, "%s%d", - dev_cntrs[i].name, - vl_from_idx(j)); - memcpy(p, name, strlen(name)); - p += strlen(name); - *p++ = '\n'; + } else if (dev_cntrs[i].flags & CNTR_VL) { + for (j = 0; j < C_VL_COUNT; j++) { + memset(name, '\0', C_MAX_NAME); + snprintf(name, C_MAX_NAME, "%s%d", + dev_cntrs[i].name, + vl_from_idx(j)); + memcpy(p, name, strlen(name)); + p += strlen(name); + + /* Counter is 32 bits */ + if (dev_cntrs[i].flags & CNTR_32BIT) { + memcpy(p, bit_type_32, bit_type_32_sz); + p += bit_type_32_sz; } - } else if (dev_cntrs[i].flags & CNTR_SDMA) { - for (j = 0; j < TXE_NUM_SDMA_ENGINES; - j++) { - memset(name, '\0', C_MAX_NAME); - snprintf(name, C_MAX_NAME, "%s%d", - dev_cntrs[i].name, j); - memcpy(p, name, strlen(name)); - p += strlen(name); - *p++ = '\n'; + + *p++ = '\n'; + } + } else if (dev_cntrs[i].flags & CNTR_SDMA) { + for (j = 0; j < dd->chip_sdma_engines; j++) { + memset(name, '\0', C_MAX_NAME); + snprintf(name, C_MAX_NAME, "%s%d", + dev_cntrs[i].name, j); + memcpy(p, name, strlen(name)); + p += strlen(name); + + /* Counter is 32 bits */ + if (dev_cntrs[i].flags & CNTR_32BIT) { + memcpy(p, bit_type_32, bit_type_32_sz); + p += bit_type_32_sz; } - } else { - memcpy(p, dev_cntrs[i].name, - strlen(dev_cntrs[i].name)); - p += strlen(dev_cntrs[i].name); + *p++ = '\n'; } + } else { + memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); + p += strlen(dev_cntrs[i].name); + + /* Counter is 32 bits */ + if (dev_cntrs[i].flags & CNTR_32BIT) { + memcpy(p, bit_type_32, bit_type_32_sz); + p += bit_type_32_sz; + } + + *p++ = '\n'; } } @@ -11906,13 +11934,19 @@ static int init_cntrs(struct hfi1_devdata *dd) port_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); + /* Add ",32" for 32-bit counters */ + if (port_cntrs[i].flags & CNTR_32BIT) + sz += bit_type_32_sz; sz++; hfi1_dbg_early("\t\t%s\n", name); dd->nportcntrs++; } } else { - /* +1 for newline */ + /* +1 for newline */ sz += strlen(port_cntrs[i].name) + 1; + /* Add ",32" for 32-bit counters */ + if (port_cntrs[i].flags & CNTR_32BIT) + sz += bit_type_32_sz; port_cntrs[i].offset = dd->nportcntrs; dd->nportcntrs++; hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name); @@ -11938,12 +11972,26 @@ static int init_cntrs(struct hfi1_devdata *dd) vl_from_idx(j)); memcpy(p, name, strlen(name)); p += strlen(name); + + /* Counter is 32 bits */ + if (port_cntrs[i].flags & CNTR_32BIT) { + memcpy(p, bit_type_32, bit_type_32_sz); + p += bit_type_32_sz; + } + *p++ = '\n'; } } else { memcpy(p, port_cntrs[i].name, strlen(port_cntrs[i].name)); p += strlen(port_cntrs[i].name); + + /* Counter is 32 bits */ + if (port_cntrs[i].flags & CNTR_32BIT) { + memcpy(p, bit_type_32, bit_type_32_sz); + p += bit_type_32_sz; + } + *p++ = '\n'; } } -- cgit v1.2.3-59-g8ed1b From 566c157cbd2113a18bfc40170de16227357434d7 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:32:49 -0800 Subject: staging/rdma/hfi1: Correctly set RcvCtxtCtrl register The RcvCtxtCtrl register was being incorrectly set upon context initialization and clean up resulting, in many cases, of contexts using settings from previous contexts' initialization. This resulted in bad and unexpected behavior. This was especially important for the TailUpd bit, which requires special handling and if set incorrectly could lead to severely degraded performance. This patch fixes the handling of the RcvCtxtCtrl register, ensuring that each context gets initialized with settings applicable only for that context. It also ensures the proper setting for the TailUpd bit by setting it to either 0 or 1 (as needed by the context's configuration) explicitly. Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 18 ++++++++++++++---- drivers/staging/rdma/hfi1/file_ops.c | 9 +++++++++ drivers/staging/rdma/hfi1/init.c | 2 +- 3 files changed, 24 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index a90e6e6699c0..d10ba6732e72 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6684,11 +6684,17 @@ static void rxe_freeze(struct hfi1_devdata *dd) */ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) { + u32 rcvmask; int i; /* enable all kernel contexts */ - for (i = 0; i < dd->n_krcv_queues; i++) - hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i); + for (i = 0; i < dd->n_krcv_queues; i++) { + rcvmask = HFI1_RCVCTRL_CTXT_ENB; + /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ + rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? + HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; + hfi1_rcvctrl(dd, rcvmask, i); + } /* enable port */ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); @@ -11255,6 +11261,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) if (dd->rcvhdrtail_dummy_physaddr) { write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, dd->rcvhdrtail_dummy_physaddr); + /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; } @@ -11266,8 +11273,11 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys) rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; - if (op & HFI1_RCVCTRL_TAILUPD_DIS) - rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; + if (op & HFI1_RCVCTRL_TAILUPD_DIS) { + /* See comment on RcvCtxtCtrl.TailUpd above */ + if (!(op & HFI1_RCVCTRL_CTXT_DIS)) + rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; + } if (op & HFI1_RCVCTRL_TIDFLOW_ENB) rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; if (op & HFI1_RCVCTRL_TIDFLOW_DIS) diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index d36588934f99..5c694fac3028 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -771,6 +771,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_TIDFLOW_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS | + HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_ONE_PKT_EGR_DIS | HFI1_RCVCTRL_NO_RHQ_DROP_DIS | HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt); @@ -1156,8 +1157,16 @@ static int user_init(struct file *fp) rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB; if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL)) rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB; + /* + * The RcvCtxtCtrl.TailUpd bit has to be explicitly written. + * We can't rely on the correct value to be set from prior + * uses of the chip or ctxt. Therefore, add the rcvctrl op + * for both cases. + */ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL)) rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB; + else + rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS; hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt); /* Notify any waiting slaves */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 2851e90d0cd3..fc3d40a58cf3 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -589,8 +589,8 @@ static void enable_chip(struct hfi1_devdata *dd) * Enable kernel ctxts' receive and receive interrupt. * Other ctxts done as user opens and initializes them. */ - rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; for (i = 0; i < dd->first_user_ctxt; ++i) { + rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB; rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR)) -- cgit v1.2.3-59-g8ed1b From 6c9e50f894458810591f7883bdd6f0f8474b06a0 Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Wed, 3 Feb 2016 14:32:57 -0800 Subject: staging/rdma/hfi1: Method to toggle "fast ECN" detection Add a per port sysfs paramter to toggle cc_prescan/Fast ECN Detection and remove the Kconfig option which was previously used to control this. While am updating the sysfs documentation, fix the name of CCMgtA. Reviewed-by: Arthur Kepner Reviewed-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- Documentation/infiniband/sysfs.txt | 3 +- drivers/staging/rdma/hfi1/Kconfig | 10 ------ drivers/staging/rdma/hfi1/driver.c | 24 +++++++------- drivers/staging/rdma/hfi1/hfi.h | 2 ++ drivers/staging/rdma/hfi1/sysfs.c | 66 +++++++++++++++++++++++++++++++++++--- 5 files changed, 77 insertions(+), 28 deletions(-) (limited to 'drivers/staging') diff --git a/Documentation/infiniband/sysfs.txt b/Documentation/infiniband/sysfs.txt index 9028b025501a..3ecf0c3a133f 100644 --- a/Documentation/infiniband/sysfs.txt +++ b/Documentation/infiniband/sysfs.txt @@ -78,9 +78,10 @@ HFI1 chip_reset - diagnostic (root only) boardversion - board version ports/1/ - CMgtA/ + CCMgtA/ cc_settings_bin - CCA tables used by PSM2 cc_table_bin + cc_prescan - enable prescaning for faster BECN response sc2v/ - 32 files (0 - 31) used to translate sl->vl sl2sc/ - 32 files (0 - 31) used to translate sl->sc vl2mtu/ - 16 (0 - 15) files used to determine MTU for vl diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig index 846c240c80aa..3e668d852f03 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/staging/rdma/hfi1/Kconfig @@ -26,13 +26,3 @@ config SDMA_VERBOSITY ---help--- This is a configuration flag to enable verbose SDMA debug -config PRESCAN_RXQ - bool "Enable prescanning of the RX queue for ECNs" - depends on INFINIBAND_HFI1 - default n - ---help--- - This option toggles the prescanning of the receive queue for - Explicit Congestion Notifications. If an ECN is detected, it - is processed as quickly as possible, the ECN is toggled off. - After the prescanning step, the receive queue is processed as - usual. diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index d848cc01f07a..59ce85f8d155 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -453,11 +453,6 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd, packet->rcv_flags = 0; } -#ifndef CONFIG_PRESCAN_RXQ -static void prescan_rxq(struct hfi1_packet *packet) {} -#else /* !CONFIG_PRESCAN_RXQ */ -static int prescan_receive_queue; - static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr, struct hfi1_other_headers *ohdr, u64 rhf, u32 bth1, struct ib_grh *grh) @@ -581,15 +576,19 @@ static inline void update_ps_mdata(struct ps_mdata *mdata, * containing Excplicit Congestion Notifications (FECNs, or BECNs). * When an ECN is found, process the Congestion Notification, and toggle * it off. + * This is declared as a macro to allow quick checking of the port to avoid + * the overhead of a function call if not enabled. */ -static void prescan_rxq(struct hfi1_packet *packet) +#define prescan_rxq(rcd, packet) \ + do { \ + if (rcd->ppd->cc_prescan) \ + __prescan_rxq(packet); \ + } while (0) +static void __prescan_rxq(struct hfi1_packet *packet) { struct hfi1_ctxtdata *rcd = packet->rcd; struct ps_mdata mdata; - if (!prescan_receive_queue) - return; - init_ps_mdata(&mdata, packet); while (1) { @@ -653,7 +652,6 @@ next: update_ps_mdata(&mdata, rcd); } } -#endif /* CONFIG_PRESCAN_RXQ */ static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread) { @@ -819,7 +817,7 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) goto bail; } - prescan_rxq(&packet); + prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); @@ -850,7 +848,7 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) } smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ - prescan_rxq(&packet); + prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { last = process_rcv_packet(&packet, thread); @@ -961,7 +959,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) } } - prescan_rxq(&packet); + prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 76c369a49830..d19d6b72352d 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -734,6 +734,8 @@ struct hfi1_pportdata { /* Error events that will cause a port bounce. */ u32 port_error_action; struct work_struct linkstate_active_work; + /* Does this port need to prescan for FECNs */ + bool cc_prescan; }; typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index d05b9f37da0a..f1d47e7f31d2 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -99,10 +99,6 @@ static void port_release(struct kobject *kobj) /* nothing to do since memory is freed by hfi1_free_devdata() */ } -static struct kobj_type port_cc_ktype = { - .release = port_release, -}; - static struct bin_attribute cc_table_bin_attr = { .attr = {.name = "cc_table_bin", .mode = 0444}, .read = read_cc_table_bin, @@ -151,6 +147,68 @@ static struct bin_attribute cc_setting_bin_attr = { .size = PAGE_SIZE, }; +struct hfi1_port_attr { + struct attribute attr; + ssize_t (*show)(struct hfi1_pportdata *, char *); + ssize_t (*store)(struct hfi1_pportdata *, const char *, size_t); +}; + +static ssize_t cc_prescan_show(struct hfi1_pportdata *ppd, char *buf) +{ + return sprintf(buf, "%s\n", ppd->cc_prescan ? "on" : "off"); +} + +static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf, + size_t count) +{ + if (!memcmp(buf, "on", 2)) + ppd->cc_prescan = true; + else if (!memcmp(buf, "off", 3)) + ppd->cc_prescan = false; + + return count; +} + +static struct hfi1_port_attr cc_prescan_attr = + __ATTR(cc_prescan, 0600, cc_prescan_show, cc_prescan_store); + +static ssize_t cc_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct hfi1_port_attr *port_attr = + container_of(attr, struct hfi1_port_attr, attr); + struct hfi1_pportdata *ppd = + container_of(kobj, struct hfi1_pportdata, pport_cc_kobj); + + return port_attr->show(ppd, buf); +} + +static ssize_t cc_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct hfi1_port_attr *port_attr = + container_of(attr, struct hfi1_port_attr, attr); + struct hfi1_pportdata *ppd = + container_of(kobj, struct hfi1_pportdata, pport_cc_kobj); + + return port_attr->store(ppd, buf, count); +} + +static const struct sysfs_ops port_cc_sysfs_ops = { + .show = cc_attr_show, + .store = cc_attr_store +}; + +static struct attribute *port_cc_default_attributes[] = { + &cc_prescan_attr.attr +}; + +static struct kobj_type port_cc_ktype = { + .release = port_release, + .sysfs_ops = &port_cc_sysfs_ops, + .default_attrs = port_cc_default_attributes +}; + /* Start sc2vl */ #define HFI1_SC2VL_ATTR(N) \ static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \ -- cgit v1.2.3-59-g8ed1b From affa48de8417ccdde467b075577f6e5154ff9943 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Wed, 3 Feb 2016 14:33:06 -0800 Subject: staging/rdma/hfi1: Add support for enabling/disabling PCIe ASPM hfi1 HW has a high PCIe ASPM L1 exit latency and also advertises an acceptable latency less than actual ASPM latencies. Additional mechanisms than those provided by BIOS/OS are therefore required to enable/disable ASPM for hfi1 to provide acceptable power/performance trade offs. This patch adds this support. By means of a module parameter ASPM can be either (a) always enabled (power save mode) (b) always disabled (performance mode) (c) enabled/disabled dynamically. The dynamic mode implements two heuristics to alleviate possible problems with high ASPM L1 exit latency. ASPM is normally enabled but is disabled if (a) there are any active user space PSM contexts, or (b) for verbs, ASPM is disabled as interrupt activity for a context starts to increase. A few more points about the verbs implementation. In order to reduce lock/cache contention between multiple verbs contexts, some processing is done at the context layer before contending for device layer locks. ASPM is disabled when two interrupts for a context happen within 1 millisec. A timer is scheduled which will re-enable ASPM after 1 second should the interrupt activity cease. Normally, every interrupt, or interrupt-pair should push the timer out further. However, since this might increase the processing load per interrupt, pushing the timer out is postponed for half a second. If after half a second we get two interrupts within 1 millisec the timer is pushed out by another second. Finally, the kernel ASPM API is not used in this patch. This is because this patch does several non-standard things as SW workarounds for HW issues. As mentioned above, it enables ASPM even when advertised actual latencies are greater than acceptable latencies. Also, whereas the kernel API only allows drivers to disable ASPM from driver probe, this patch enables/disables ASPM directly from interrupt context. Due to these reasons the kernel ASPM API was not used. Reviewed-by: Mike Marciniszyn Reviewed-by: Dean Luick Reviewed-by: Ira Weiny Signed-off-by: Ashutosh Dixit Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/aspm.h | 297 +++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/chip.c | 4 + drivers/staging/rdma/hfi1/chip_registers.h | 3 + drivers/staging/rdma/hfi1/file_ops.c | 11 +- drivers/staging/rdma/hfi1/hfi.h | 24 +++ drivers/staging/rdma/hfi1/init.c | 7 + drivers/staging/rdma/hfi1/pcie.c | 16 +- 7 files changed, 355 insertions(+), 7 deletions(-) create mode 100644 drivers/staging/rdma/hfi1/aspm.h (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/staging/rdma/hfi1/aspm.h new file mode 100644 index 000000000000..67fce1d0c7f0 --- /dev/null +++ b/drivers/staging/rdma/hfi1/aspm.h @@ -0,0 +1,297 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _ASPM_H +#define _ASPM_H + +#include "hfi.h" + +extern uint aspm_mode; + +enum aspm_mode { + ASPM_MODE_DISABLED = 0, /* ASPM always disabled, performance mode */ + ASPM_MODE_ENABLED = 1, /* ASPM always enabled, power saving mode */ + ASPM_MODE_DYNAMIC = 2, /* ASPM enabled/disabled dynamically */ +}; + +/* Time after which the timer interrupt will re-enable ASPM */ +#define ASPM_TIMER_MS 1000 +/* Time for which interrupts are ignored after a timer has been scheduled */ +#define ASPM_RESCHED_TIMER_MS (ASPM_TIMER_MS / 2) +/* Two interrupts within this time trigger ASPM disable */ +#define ASPM_TRIGGER_MS 1 +#define ASPM_TRIGGER_NS (ASPM_TRIGGER_MS * 1000 * 1000ull) +#define ASPM_L1_SUPPORTED(reg) \ + (((reg & PCI_EXP_LNKCAP_ASPMS) >> 10) & 0x2) + +static inline bool aspm_hw_l1_supported(struct hfi1_devdata *dd) +{ + struct pci_dev *parent = dd->pcidev->bus->self; + u32 up, dn; + + pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn); + dn = ASPM_L1_SUPPORTED(dn); + + pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &up); + up = ASPM_L1_SUPPORTED(up); + + /* ASPM works on A-step but is reported as not supported */ + return (!!dn || is_ax(dd)) && !!up; +} + +/* Set L1 entrance latency for slower entry to L1 */ +static inline void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd) +{ + u32 l1_ent_lat = 0x4u; + u32 reg32; + + pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, ®32); + reg32 &= ~PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK; + reg32 |= l1_ent_lat << PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT; + pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32); +} + +static inline void aspm_hw_enable_l1(struct hfi1_devdata *dd) +{ + struct pci_dev *parent = dd->pcidev->bus->self; + + /* Enable ASPM L1 first in upstream component and then downstream */ + pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + PCI_EXP_LNKCTL_ASPM_L1); + pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, + PCI_EXP_LNKCTL_ASPM_L1); +} + +static inline void aspm_hw_disable_l1(struct hfi1_devdata *dd) +{ + struct pci_dev *parent = dd->pcidev->bus->self; + + /* Disable ASPM L1 first in downstream component and then upstream */ + pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0x0); + pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0x0); +} + +static inline void aspm_enable(struct hfi1_devdata *dd) +{ + if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED || + !dd->aspm_supported) + return; + + aspm_hw_enable_l1(dd); + dd->aspm_enabled = true; +} + +static inline void aspm_disable(struct hfi1_devdata *dd) +{ + if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED) + return; + + aspm_hw_disable_l1(dd); + dd->aspm_enabled = false; +} + +static inline void aspm_disable_inc(struct hfi1_devdata *dd) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->aspm_lock, flags); + aspm_disable(dd); + atomic_inc(&dd->aspm_disabled_cnt); + spin_unlock_irqrestore(&dd->aspm_lock, flags); +} + +static inline void aspm_enable_dec(struct hfi1_devdata *dd) +{ + unsigned long flags; + + spin_lock_irqsave(&dd->aspm_lock, flags); + if (atomic_dec_and_test(&dd->aspm_disabled_cnt)) + aspm_enable(dd); + spin_unlock_irqrestore(&dd->aspm_lock, flags); +} + +/* ASPM processing for each receive context interrupt */ +static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd) +{ + bool restart_timer; + bool close_interrupts; + unsigned long flags; + ktime_t now, prev; + + /* Quickest exit for minimum impact */ + if (!rcd->aspm_intr_supported) + return; + + spin_lock_irqsave(&rcd->aspm_lock, flags); + /* PSM contexts are open */ + if (!rcd->aspm_intr_enable) + goto unlock; + + prev = rcd->aspm_ts_last_intr; + now = ktime_get(); + rcd->aspm_ts_last_intr = now; + + /* An interrupt pair close together in time */ + close_interrupts = ktime_to_ns(ktime_sub(now, prev)) < ASPM_TRIGGER_NS; + + /* Don't push out our timer till this much time has elapsed */ + restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) > + ASPM_RESCHED_TIMER_MS * NSEC_PER_MSEC; + restart_timer = restart_timer && close_interrupts; + + /* Disable ASPM and schedule timer */ + if (rcd->aspm_enabled && close_interrupts) { + aspm_disable_inc(rcd->dd); + rcd->aspm_enabled = false; + restart_timer = true; + } + + if (restart_timer) { + mod_timer(&rcd->aspm_timer, + jiffies + msecs_to_jiffies(ASPM_TIMER_MS)); + rcd->aspm_ts_timer_sched = now; + } +unlock: + spin_unlock_irqrestore(&rcd->aspm_lock, flags); +} + +/* Timer function for re-enabling ASPM in the absence of interrupt activity */ +static inline void aspm_ctx_timer_function(unsigned long data) +{ + struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data; + unsigned long flags; + + spin_lock_irqsave(&rcd->aspm_lock, flags); + aspm_enable_dec(rcd->dd); + rcd->aspm_enabled = true; + spin_unlock_irqrestore(&rcd->aspm_lock, flags); +} + +/* Disable interrupt processing for verbs contexts when PSM contexts are open */ +static inline void aspm_disable_all(struct hfi1_devdata *dd) +{ + struct hfi1_ctxtdata *rcd; + unsigned long flags; + unsigned i; + + for (i = 0; i < dd->first_user_ctxt; i++) { + rcd = dd->rcd[i]; + del_timer_sync(&rcd->aspm_timer); + spin_lock_irqsave(&rcd->aspm_lock, flags); + rcd->aspm_intr_enable = false; + spin_unlock_irqrestore(&rcd->aspm_lock, flags); + } + + aspm_disable(dd); + atomic_set(&dd->aspm_disabled_cnt, 0); +} + +/* Re-enable interrupt processing for verbs contexts */ +static inline void aspm_enable_all(struct hfi1_devdata *dd) +{ + struct hfi1_ctxtdata *rcd; + unsigned long flags; + unsigned i; + + aspm_enable(dd); + + if (aspm_mode != ASPM_MODE_DYNAMIC) + return; + + for (i = 0; i < dd->first_user_ctxt; i++) { + rcd = dd->rcd[i]; + spin_lock_irqsave(&rcd->aspm_lock, flags); + rcd->aspm_intr_enable = true; + rcd->aspm_enabled = true; + spin_unlock_irqrestore(&rcd->aspm_lock, flags); + } +} + +static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd) +{ + spin_lock_init(&rcd->aspm_lock); + setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function, + (unsigned long)rcd); + rcd->aspm_intr_supported = rcd->dd->aspm_supported && + aspm_mode == ASPM_MODE_DYNAMIC && + rcd->ctxt < rcd->dd->first_user_ctxt; +} + +static inline void aspm_init(struct hfi1_devdata *dd) +{ + unsigned i; + + spin_lock_init(&dd->aspm_lock); + dd->aspm_supported = aspm_hw_l1_supported(dd); + + for (i = 0; i < dd->first_user_ctxt; i++) + aspm_ctx_init(dd->rcd[i]); + + /* Start with ASPM disabled */ + aspm_hw_set_l1_ent_latency(dd); + dd->aspm_enabled = false; + aspm_hw_disable_l1(dd); + + /* Now turn on ASPM if configured */ + aspm_enable_all(dd); +} + +static inline void aspm_exit(struct hfi1_devdata *dd) +{ + aspm_disable_all(dd); + + /* Turn on ASPM on exit to conserve power */ + aspm_enable(dd); +} + +#endif /* _ASPM_H */ diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index d10ba6732e72..3577042423b2 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -65,6 +65,7 @@ #include "eprom.h" #include "efivar.h" #include "platform.h" +#include "aspm.h" #define NUM_IB_PORTS 1 @@ -8069,6 +8070,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data) trace_hfi1_receive_interrupt(dd, rcd->ctxt); this_cpu_inc(*dd->int_counter); + aspm_ctx_disable(rcd); /* receive interrupt remains blocked while processing packets */ disposition = rcd->do_interrupt(rcd, 0); @@ -12792,6 +12794,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd) dd->num_rcv_contexts = total_contexts; dd->n_krcv_queues = num_kernel_contexts; dd->first_user_ctxt = num_kernel_contexts; + dd->num_user_contexts = num_user_contexts; dd->freectxts = num_user_contexts; dd_dev_info(dd, "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", @@ -13948,6 +13951,7 @@ done: */ void hfi1_start_cleanup(struct hfi1_devdata *dd) { + aspm_exit(dd); free_cntrs(dd); free_rcverr(dd); clean_up_interrupts(dd); diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h index 3cd3352af2ce..23898ebbad24 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/staging/rdma/hfi1/chip_registers.h @@ -1281,6 +1281,9 @@ #define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT 0 #define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK 0xFFFFull #define PCIE_CFG_REG_PL2 (PCIE + 0x000000000708) +#define PCIE_CFG_REG_PL3 (PCIE + 0x00000000070C) +#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT 27 +#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK 0x38000000 #define PCIE_CFG_REG_PL102 (PCIE + 0x000000000898) #define PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT 12 #define PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT 6 diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 5c694fac3028..084581a42c98 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -60,6 +60,7 @@ #include "user_sdma.h" #include "user_exp_rcv.h" #include "eprom.h" +#include "aspm.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt @@ -798,7 +799,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); hfi1_stats.sps_ctxts--; - dd->freectxts++; + if (++dd->freectxts == dd->num_user_contexts) + aspm_enable_all(dd); mutex_unlock(&hfi1_mutex); hfi1_free_ctxtdata(dd, uctxt); done: @@ -1040,7 +1042,12 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, INIT_LIST_HEAD(&uctxt->sdma_queues); spin_lock_init(&uctxt->sdma_qlock); hfi1_stats.sps_ctxts++; - dd->freectxts--; + /* + * Disable ASPM when there are open user/PSM contexts to avoid + * issues with ASPM L1 exit latency + */ + if (dd->freectxts-- == dd->num_user_contexts) + aspm_disable_all(dd); fd->uctxt = uctxt; return 0; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index d19d6b72352d..cb2f90a0033c 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -314,6 +314,21 @@ struct hfi1_ctxtdata { struct list_head sdma_queues; spinlock_t sdma_qlock; + /* Is ASPM interrupt supported for this context */ + bool aspm_intr_supported; + /* ASPM state (enabled/disabled) for this context */ + bool aspm_enabled; + /* Timer for re-enabling ASPM if interrupt activity quietens down */ + struct timer_list aspm_timer; + /* Lock to serialize between intr, timer intr and user threads */ + spinlock_t aspm_lock; + /* Is ASPM processing enabled for this context (in intr context) */ + bool aspm_intr_enable; + /* Last interrupt timestamp */ + ktime_t aspm_ts_last_intr; + /* Last timestamp at which we scheduled a timer for this context */ + ktime_t aspm_ts_timer_sched; + /* * The interrupt handler for a particular receive context can vary * throughout it's lifetime. This is not a lock protected data member so @@ -893,6 +908,8 @@ struct hfi1_devdata { * number of ctxts available for PSM open */ u32 freectxts; + /* total number of available user/PSM contexts */ + u32 num_user_contexts; /* base receive interrupt timeout, in CSR units */ u32 rcv_intr_timeout_csr; @@ -1121,6 +1138,13 @@ struct hfi1_devdata { /* receive context tail dummy address */ __le64 *rcvhdrtail_dummy_kvaddr; dma_addr_t rcvhdrtail_dummy_physaddr; + + bool aspm_supported; /* Does HW support ASPM */ + bool aspm_enabled; /* ASPM state: enabled/disabled */ + /* Serialize ASPM enable/disable between multiple verbs contexts */ + spinlock_t aspm_lock; + /* Number of verbs contexts which have disabled ASPM */ + atomic_t aspm_disabled_cnt; }; /* 8051 firmware version helper */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index fc3d40a58cf3..ba52ee38c1f2 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -66,6 +66,7 @@ #include "sdma.h" #include "debugfs.h" #include "verbs.h" +#include "aspm.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt @@ -190,6 +191,12 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) } } + /* + * Initialize aspm, to be done after gen3 transition and setting up + * contexts and before enabling interrupts + */ + aspm_init(dd); + return 0; nomem: ret = -ENOMEM; diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 3cdc8047f16b..76cf80792516 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -57,6 +57,7 @@ #include "hfi.h" #include "chip_registers.h" +#include "aspm.h" /* link speed vector for Gen3 speed - not in Linux headers */ #define GEN1_SPEED_VECTOR 0x1 @@ -463,6 +464,10 @@ static int hfi1_pcie_caps; module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO); MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)"); +uint aspm_mode = ASPM_MODE_DISABLED; +module_param_named(aspm, aspm_mode, uint, S_IRUGO); +MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); + static void tune_pcie_caps(struct hfi1_devdata *dd) { struct pci_dev *parent; @@ -957,7 +962,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) int do_retry, retry_count = 0; uint default_pset; u16 target_vector, target_speed; - u16 lnkctl, lnkctl2, vendor; + u16 lnkctl2, vendor; u8 nsbr = 1; u8 div; const u8 (*eq)[3]; @@ -1147,11 +1152,12 @@ retry: */ write_xmt_margin(dd, __func__); - /* step 5e: disable active state power management (ASPM) */ + /* + * step 5e: disable active state power management (ASPM). It + * will be enabled if required later + */ dd_dev_info(dd, "%s: clearing ASPM\n", __func__); - pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &lnkctl); - lnkctl &= ~PCI_EXP_LNKCTL_ASPMC; - pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, lnkctl); + aspm_hw_disable_l1(dd); /* * step 5f: clear DirectSpeedChange -- cgit v1.2.3-59-g8ed1b From 31e7af1c5bcd9617c68f1bbd78a85e896caf87e4 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 3 Feb 2016 14:33:14 -0800 Subject: staging/rdma/hfi1: Fix SL->SC checks SLs which are mapped to SC15 are invalid and should fail the operation. For RC/UC QP types, verify the AH information at modify_qp time and fail the modify_qp if the SL is invalid. For other QP types check the SL during post_send via the new rdmavt callback. Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 20 ++++++++++++++++++++ drivers/staging/rdma/hfi1/verbs.c | 1 + drivers/staging/rdma/hfi1/verbs.h | 2 ++ 3 files changed, 23 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 52723c2bad37..05a9619752ae 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -185,6 +185,9 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_AV) { sc = ah_to_sc(ibqp->device, &attr->ah_attr); + if (sc == 0xf) + return -EINVAL; + if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; @@ -192,6 +195,9 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_ALT_PATH) { sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); + if (sc == 0xf) + return -EINVAL; + if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; @@ -220,6 +226,20 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, } } +int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr) +{ + struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); + struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah); + + if (qp->ibqp.qp_type != IB_QPT_RC && + qp->ibqp.qp_type != IB_QPT_UC && + qp->ibqp.qp_type != IB_QPT_SMI && + ibp->sl_to_sc[ah->attr.sl] == 0xf) { + return -EINVAL; + } + return 0; +} + /** * hfi1_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index a53d93a5245c..6d541caf4a51 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1561,6 +1561,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; + dd->verbs_dev.rdi.driver_f.check_send_wr = hfi1_check_send_wr; /* completeion queue */ snprintf(dd->verbs_dev.rdi.dparms.cq_name, diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 79bcab61d2ba..1571ae390042 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -439,6 +439,8 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); +int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr); + int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0); -- cgit v1.2.3-59-g8ed1b From d413c1a65292189eb729738c549732951a2e50ab Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:33:22 -0800 Subject: staging/rdma/hfi1: Remove unused code This comment and code was unused. Just remove it. Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index ba52ee38c1f2..1680808d0b7c 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1526,13 +1526,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) goto bail; } - /* Event mask is per device now and is in hfi1_devdata */ - /*if (rcd->ctxt >= dd->first_user_ctxt) { - rcd->user_event_mask = vmalloc_user(PAGE_SIZE); - if (!rcd->user_event_mask) - goto bail_free_hdrq; - }*/ - if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, -- cgit v1.2.3-59-g8ed1b From 2780739262e32b9c283b6b04f7899f9803993ebc Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:33:31 -0800 Subject: staging/rdma/hfi1: Remove unnecessary duplicated variable struct hfi1_devdata contained 2 variables which represented the numa node the device is attached to. Remove the duplicated one. Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 1 - drivers/staging/rdma/hfi1/init.c | 5 ----- drivers/staging/rdma/hfi1/qp.h | 2 +- drivers/staging/rdma/hfi1/verbs.c | 2 +- 4 files changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index cb2f90a0033c..897046cd8fa5 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1128,7 +1128,6 @@ struct hfi1_devdata { struct timer_list rcverr_timer; u32 rcv_ovfl_cnt; - int assigned_node_id; wait_queue_head_t event_queue; /* Save the enabled LCB error bits */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 1680808d0b7c..17b876d77037 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -130,15 +130,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) { unsigned i; int ret; - int local_node_id = pcibus_to_node(dd->pcidev->bus); /* Control context has to be always 0 */ BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); - if (local_node_id < 0) - local_node_id = numa_node_id(); - dd->assigned_node_id = local_node_id; - dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL); if (!dd->rcd) goto nomem; diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 36be54771205..973c14b5268a 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -157,7 +157,7 @@ static inline void _hfi1_schedule_send(struct rvt_qp *qp) iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? priv->s_sde->cpu : - cpumask_first(cpumask_of_node(dd->assigned_node_id))); + cpumask_first(cpumask_of_node(dd->node))); } /** diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 6d541caf4a51..d617324e3c48 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1567,7 +1567,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) snprintf(dd->verbs_dev.rdi.dparms.cq_name, sizeof(dd->verbs_dev.rdi.dparms.cq_name), "hfi1_cq%d", dd->unit); - dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id; + dd->verbs_dev.rdi.dparms.node = dd->node; /* misc settings */ dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */ -- cgit v1.2.3-59-g8ed1b From 957558c9668f06b04530b7ddbfd2dbea86630496 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:33:40 -0800 Subject: staging/rdma/hfi1: Consolidate CPU/IRQ affinity support This patch unifies the affinity support for CPU and IRQ allocations into a single code base. The goal is to allow the driver to make intelligent placement decision based on an overall view of processes and IRQs across as much of the driver as possible. Pulling all the scattered affinity code into a single code base lays the ground work for accomplishing the above goal. For example, previous implementations made user process placement decision solely based on other user processes. This algorithm is limited as it did not take into account IRQ placement and could result in overloading certain CPUs. A single code base also provides a much easier way to maintain and debug any performance issues related to affinity. Reviewed-by: Mike Marciniszyn Reviewed-by: Dean Luick Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 3 +- drivers/staging/rdma/hfi1/affinity.c | 433 +++++++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/affinity.h | 94 ++++++++ drivers/staging/rdma/hfi1/chip.c | 115 ++-------- drivers/staging/rdma/hfi1/file_ops.c | 27 ++- drivers/staging/rdma/hfi1/hfi.h | 8 +- drivers/staging/rdma/hfi1/init.c | 11 +- 7 files changed, 578 insertions(+), 113 deletions(-) create mode 100644 drivers/staging/rdma/hfi1/affinity.c create mode 100644 drivers/staging/rdma/hfi1/affinity.h (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 9b3f7e9f0796..6681b74b5cf3 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -7,7 +7,8 @@ # obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o -hfi1-y := chip.o device.o diag.o driver.o efivar.o eprom.o file_ops.o firmware.o \ +hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ + eprom.o file_ops.o firmware.o \ init.o intr.o mad.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/staging/rdma/hfi1/affinity.c new file mode 100644 index 000000000000..59b29725ea11 --- /dev/null +++ b/drivers/staging/rdma/hfi1/affinity.c @@ -0,0 +1,433 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include +#include + +#include "hfi.h" +#include "affinity.h" +#include "sdma.h" +#include "trace.h" + +struct cpu_mask_set { + struct cpumask mask; + struct cpumask used; + uint gen; +}; + +struct hfi1_affinity { + struct cpu_mask_set def_intr; + struct cpu_mask_set rcv_intr; + struct cpu_mask_set proc; + /* spin lock to protect affinity struct */ + spinlock_t lock; +}; + +/* Name of IRQ types, indexed by enum irq_type */ +static const char * const irq_type_names[] = { + "SDMA", + "RCVCTXT", + "GENERAL", + "OTHER", +}; + +static inline void init_cpu_mask_set(struct cpu_mask_set *set) +{ + cpumask_clear(&set->mask); + cpumask_clear(&set->used); + set->gen = 0; +} + +/* + * Interrupt affinity. + * + * non-rcv avail gets a default mask that + * starts as possible cpus with threads reset + * and each rcv avail reset. + * + * rcv avail gets node relative 1 wrapping back + * to the node relative 1 as necessary. + * + */ +int hfi1_dev_affinity_init(struct hfi1_devdata *dd) +{ + int node = pcibus_to_node(dd->pcidev->bus); + struct hfi1_affinity *info; + const struct cpumask *local_mask; + int curr_cpu, possible, i, ht; + + if (node < 0) + node = numa_node_id(); + dd->node = node; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + spin_lock_init(&info->lock); + + init_cpu_mask_set(&info->def_intr); + init_cpu_mask_set(&info->rcv_intr); + init_cpu_mask_set(&info->proc); + + local_mask = cpumask_of_node(dd->node); + if (cpumask_first(local_mask) >= nr_cpu_ids) + local_mask = topology_core_cpumask(0); + /* use local mask as default */ + cpumask_copy(&info->def_intr.mask, local_mask); + /* + * Remove HT cores from the default mask. Do this in two steps below. + */ + possible = cpumask_weight(&info->def_intr.mask); + ht = cpumask_weight(topology_sibling_cpumask( + cpumask_first(&info->def_intr.mask))); + /* + * Step 1. Skip over the first N HT siblings and use them as the + * "real" cores. Assumes that HT cores are not enumerated in + * succession (except in the single core case). + */ + curr_cpu = cpumask_first(&info->def_intr.mask); + for (i = 0; i < possible / ht; i++) + curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask); + /* + * Step 2. Remove the remaining HT siblings. Use cpumask_next() to + * skip any gaps. + */ + for (; i < possible; i++) { + cpumask_clear_cpu(curr_cpu, &info->def_intr.mask); + curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask); + } + + /* fill in the receive list */ + possible = cpumask_weight(&info->def_intr.mask); + curr_cpu = cpumask_first(&info->def_intr.mask); + if (possible == 1) { + /* only one CPU, everyone will use it */ + cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask); + } else { + /* + * Retain the first CPU in the default list for the control + * context. + */ + curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask); + /* + * Remove the remaining kernel receive queues from + * the default list and add them to the receive list. + */ + for (i = 0; i < dd->n_krcv_queues - 1; i++) { + cpumask_clear_cpu(curr_cpu, &info->def_intr.mask); + cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask); + curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask); + if (curr_cpu >= nr_cpu_ids) + break; + } + } + + cpumask_copy(&info->proc.mask, cpu_online_mask); + dd->affinity = info; + return 0; +} + +void hfi1_dev_affinity_free(struct hfi1_devdata *dd) +{ + kfree(dd->affinity); +} + +int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) +{ + int ret; + cpumask_var_t diff; + struct cpu_mask_set *set; + struct sdma_engine *sde = NULL; + struct hfi1_ctxtdata *rcd = NULL; + char extra[64]; + int cpu = -1; + + extra[0] = '\0'; + cpumask_clear(&msix->mask); + + ret = zalloc_cpumask_var(&diff, GFP_KERNEL); + if (!ret) + return -ENOMEM; + + switch (msix->type) { + case IRQ_SDMA: + sde = (struct sdma_engine *)msix->arg; + scnprintf(extra, 64, "engine %u", sde->this_idx); + /* fall through */ + case IRQ_GENERAL: + set = &dd->affinity->def_intr; + break; + case IRQ_RCVCTXT: + rcd = (struct hfi1_ctxtdata *)msix->arg; + if (rcd->ctxt == HFI1_CTRL_CTXT) { + set = &dd->affinity->def_intr; + cpu = cpumask_first(&set->mask); + } else { + set = &dd->affinity->rcv_intr; + } + scnprintf(extra, 64, "ctxt %u", rcd->ctxt); + break; + default: + dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); + return -EINVAL; + } + + /* + * The control receive context is placed on a particular CPU, which + * is set above. Skip accounting for it. Everything else finds its + * CPU here. + */ + if (cpu == -1) { + spin_lock(&dd->affinity->lock); + if (cpumask_equal(&set->mask, &set->used)) { + /* + * We've used up all the CPUs, bump up the generation + * and reset the 'used' map + */ + set->gen++; + cpumask_clear(&set->used); + } + cpumask_andnot(diff, &set->mask, &set->used); + cpu = cpumask_first(diff); + cpumask_set_cpu(cpu, &set->used); + spin_unlock(&dd->affinity->lock); + } + + switch (msix->type) { + case IRQ_SDMA: + sde->cpu = cpu; + break; + case IRQ_GENERAL: + case IRQ_RCVCTXT: + case IRQ_OTHER: + break; + } + + cpumask_set_cpu(cpu, &msix->mask); + dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n", + msix->msix.vector, irq_type_names[msix->type], + extra, cpu); + irq_set_affinity_hint(msix->msix.vector, &msix->mask); + + free_cpumask_var(diff); + return 0; +} + +void hfi1_put_irq_affinity(struct hfi1_devdata *dd, + struct hfi1_msix_entry *msix) +{ + struct cpu_mask_set *set = NULL; + struct hfi1_ctxtdata *rcd; + + switch (msix->type) { + case IRQ_SDMA: + case IRQ_GENERAL: + set = &dd->affinity->def_intr; + break; + case IRQ_RCVCTXT: + rcd = (struct hfi1_ctxtdata *)msix->arg; + /* only do accounting for non control contexts */ + if (rcd->ctxt != HFI1_CTRL_CTXT) + set = &dd->affinity->rcv_intr; + break; + default: + return; + } + + if (set) { + spin_lock(&dd->affinity->lock); + cpumask_andnot(&set->used, &set->used, &msix->mask); + if (cpumask_empty(&set->used) && set->gen) { + set->gen--; + cpumask_copy(&set->used, &set->mask); + } + spin_unlock(&dd->affinity->lock); + } + + irq_set_affinity_hint(msix->msix.vector, NULL); + cpumask_clear(&msix->mask); +} + +int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) +{ + int cpu = -1, ret; + cpumask_var_t diff, mask, intrs; + const struct cpumask *node_mask, + *proc_mask = tsk_cpus_allowed(current); + struct cpu_mask_set *set = &dd->affinity->proc; + char buf[1024]; + + /* + * check whether process/context affinity has already + * been set + */ + if (cpumask_weight(proc_mask) == 1) { + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", + current->pid, current->comm, buf); + /* + * Mark the pre-set CPU as used. This is atomic so we don't + * need the lock + */ + cpu = cpumask_first(proc_mask); + cpumask_set_cpu(cpu, &set->used); + goto done; + } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); + hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", + current->pid, current->comm, buf); + goto done; + } + + /* + * The process does not have a preset CPU affinity so find one to + * recommend. We prefer CPUs on the same NUMA as the device. + */ + + ret = zalloc_cpumask_var(&diff, GFP_KERNEL); + if (!ret) + goto done; + ret = zalloc_cpumask_var(&mask, GFP_KERNEL); + if (!ret) + goto free_diff; + ret = zalloc_cpumask_var(&intrs, GFP_KERNEL); + if (!ret) + goto free_mask; + + spin_lock(&dd->affinity->lock); + /* + * If we've used all available CPUs, clear the mask and start + * overloading. + */ + if (cpumask_equal(&set->mask, &set->used)) { + set->gen++; + cpumask_clear(&set->used); + } + + /* CPUs used by interrupt handlers */ + cpumask_copy(intrs, (dd->affinity->def_intr.gen ? + &dd->affinity->def_intr.mask : + &dd->affinity->def_intr.used)); + cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? + &dd->affinity->rcv_intr.mask : + &dd->affinity->rcv_intr.used)); + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); + hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); + + /* + * If we don't have a NUMA node requested, preference is towards + * device NUMA node + */ + if (node == -1) + node = dd->node; + node_mask = cpumask_of_node(node); + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); + hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); + + /* diff will hold all unused cpus */ + cpumask_andnot(diff, &set->mask, &set->used); + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); + hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); + + /* get cpumask of available CPUs on preferred NUMA */ + cpumask_and(mask, diff, node_mask); + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); + hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); + + /* + * At first, we don't want to place processes on the same + * CPUs as interrupt handlers. + */ + cpumask_andnot(diff, mask, intrs); + if (!cpumask_empty(diff)) + cpumask_copy(mask, diff); + + /* + * if we don't have a cpu on the preferred NUMA, get + * the list of the remaining available CPUs + */ + if (cpumask_empty(mask)) { + cpumask_andnot(diff, &set->mask, &set->used); + cpumask_andnot(mask, diff, node_mask); + } + scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); + hfi1_cdbg(PROC, "possible CPUs for process %s", buf); + + cpu = cpumask_first(mask); + if (cpu >= nr_cpu_ids) /* empty */ + cpu = -1; + else + cpumask_set_cpu(cpu, &set->used); + spin_unlock(&dd->affinity->lock); + + free_cpumask_var(intrs); +free_mask: + free_cpumask_var(mask); +free_diff: + free_cpumask_var(diff); +done: + return cpu; +} + +void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu) +{ + struct cpu_mask_set *set = &dd->affinity->proc; + + if (cpu < 0) + return; + spin_lock(&dd->affinity->lock); + cpumask_clear_cpu(cpu, &set->used); + if (cpumask_empty(&set->used) && set->gen) { + set->gen--; + cpumask_copy(&set->used, &set->mask); + } + spin_unlock(&dd->affinity->lock); +} + diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/staging/rdma/hfi1/affinity.h new file mode 100644 index 000000000000..2bdac9680e8e --- /dev/null +++ b/drivers/staging/rdma/hfi1/affinity.h @@ -0,0 +1,94 @@ +/* + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _HFI1_AFFINITY_H +#define _HFI1_AFFINITY_H + +#include "hfi.h" + +enum irq_type { + IRQ_SDMA, + IRQ_RCVCTXT, + IRQ_GENERAL, + IRQ_OTHER +}; + +/* Can be used for both memory and cpu */ +enum affinity_flags { + AFF_AUTO, + AFF_NUMA_LOCAL, + AFF_DEV_LOCAL, + AFF_IRQ_LOCAL +}; + +struct hfi1_msix_entry; + +/* Initialize driver affinity data */ +int hfi1_dev_affinity_init(struct hfi1_devdata *); +/* Free driver affinity data */ +void hfi1_dev_affinity_free(struct hfi1_devdata *); +/* + * Set IRQ affinity to a CPU. The function will determine the + * CPU and set the affinity to it. + */ +int hfi1_get_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *); +/* + * Remove the IRQ's CPU affinity. This function also updates + * any internal CPU tracking data + */ +void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *); +/* + * Determine a CPU affinity for a user process, if the process does not + * have an affinity set yet. + */ +int hfi1_get_proc_affinity(struct hfi1_devdata *, int); +/* Release a CPU used by a user process. */ +void hfi1_put_proc_affinity(struct hfi1_devdata *, int); + +#endif /* _HFI1_AFFINITY_H */ diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 3577042423b2..6045c9154a1f 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12349,9 +12349,8 @@ static void clean_up_interrupts(struct hfi1_devdata *dd) for (i = 0; i < dd->num_msix_entries; i++, me++) { if (me->arg == NULL) /* => no irq, no affinity */ - break; - irq_set_affinity_hint(dd->msix_entries[i].msix.vector, - NULL); + continue; + hfi1_put_irq_affinity(dd, &dd->msix_entries[i]); free_irq(me->msix.vector, me->arg); } } else { @@ -12372,8 +12371,6 @@ static void clean_up_interrupts(struct hfi1_devdata *dd) } /* clean structures */ - for (i = 0; i < dd->num_msix_entries; i++) - free_cpumask_var(dd->msix_entries[i].mask); kfree(dd->msix_entries); dd->msix_entries = NULL; dd->num_msix_entries = 0; @@ -12438,16 +12435,10 @@ static int request_intx_irq(struct hfi1_devdata *dd) static int request_msix_irqs(struct hfi1_devdata *dd) { - const struct cpumask *local_mask; - cpumask_var_t def, rcv; - bool def_ret, rcv_ret; int first_general, last_general; int first_sdma, last_sdma; int first_rx, last_rx; - int first_cpu, curr_cpu; - int rcv_cpu, sdma_cpu; - int i, ret = 0, possible; - int ht; + int i, ret = 0; /* calculate the ranges we are going to use */ first_general = 0; @@ -12455,52 +12446,6 @@ static int request_msix_irqs(struct hfi1_devdata *dd) first_rx = last_sdma = first_sdma + dd->num_sdma; last_rx = first_rx + dd->n_krcv_queues; - /* - * Interrupt affinity. - * - * non-rcv avail gets a default mask that - * starts as possible cpus with threads reset - * and each rcv avail reset. - * - * rcv avail gets node relative 1 wrapping back - * to the node relative 1 as necessary. - * - */ - local_mask = cpumask_of_pcibus(dd->pcidev->bus); - /* if first cpu is invalid, use NUMA 0 */ - if (cpumask_first(local_mask) >= nr_cpu_ids) - local_mask = topology_core_cpumask(0); - - def_ret = zalloc_cpumask_var(&def, GFP_KERNEL); - rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL); - if (!def_ret || !rcv_ret) - goto bail; - /* use local mask as default */ - cpumask_copy(def, local_mask); - possible = cpumask_weight(def); - /* disarm threads from default */ - ht = cpumask_weight( - topology_sibling_cpumask(cpumask_first(local_mask))); - for (i = possible/ht; i < possible; i++) - cpumask_clear_cpu(i, def); - /* def now has full cores on chosen node*/ - first_cpu = cpumask_first(def); - if (nr_cpu_ids >= first_cpu) - first_cpu++; - curr_cpu = first_cpu; - - /* One context is reserved as control context */ - for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) { - cpumask_clear_cpu(curr_cpu, def); - cpumask_set_cpu(curr_cpu, rcv); - curr_cpu = cpumask_next(curr_cpu, def); - if (curr_cpu >= nr_cpu_ids) - break; - } - /* def mask has non-rcv, rcv has recv mask */ - rcv_cpu = cpumask_first(rcv); - sdma_cpu = cpumask_first(def); - /* * Sanity check - the code expects all SDMA chip source * interrupts to be in the same CSR, starting at bit 0. Verify @@ -12526,6 +12471,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) snprintf(me->name, sizeof(me->name), DRIVER_NAME "_%d", dd->unit); err_info = "general"; + me->type = IRQ_GENERAL; } else if (first_sdma <= i && i < last_sdma) { idx = i - first_sdma; sde = &dd->per_sdma[idx]; @@ -12535,6 +12481,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) DRIVER_NAME "_%d sdma%d", dd->unit, idx); err_info = "sdma"; remap_sdma_interrupts(dd, idx, i); + me->type = IRQ_SDMA; } else if (first_rx <= i && i < last_rx) { idx = i - first_rx; rcd = dd->rcd[idx]; @@ -12555,6 +12502,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) DRIVER_NAME "_%d kctxt%d", dd->unit, idx); err_info = "receive context"; remap_intr(dd, IS_RCVAVAIL_START + idx, i); + me->type = IRQ_RCVCTXT; } else { /* not in our expected range - complain, then ignore it */ @@ -12582,52 +12530,13 @@ static int request_msix_irqs(struct hfi1_devdata *dd) */ me->arg = arg; - if (!zalloc_cpumask_var( - &dd->msix_entries[i].mask, - GFP_KERNEL)) - goto bail; - if (handler == sdma_interrupt) { - dd_dev_info(dd, "sdma engine %d cpu %d\n", - sde->this_idx, sdma_cpu); - sde->cpu = sdma_cpu; - cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask); - sdma_cpu = cpumask_next(sdma_cpu, def); - if (sdma_cpu >= nr_cpu_ids) - sdma_cpu = cpumask_first(def); - } else if (handler == receive_context_interrupt) { - dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt, - (rcd->ctxt == HFI1_CTRL_CTXT) ? - cpumask_first(def) : rcv_cpu); - if (rcd->ctxt == HFI1_CTRL_CTXT) { - /* map to first default */ - cpumask_set_cpu(cpumask_first(def), - dd->msix_entries[i].mask); - } else { - cpumask_set_cpu(rcv_cpu, - dd->msix_entries[i].mask); - rcv_cpu = cpumask_next(rcv_cpu, rcv); - if (rcv_cpu >= nr_cpu_ids) - rcv_cpu = cpumask_first(rcv); - } - } else { - /* otherwise first def */ - dd_dev_info(dd, "%s cpu %d\n", - err_info, cpumask_first(def)); - cpumask_set_cpu( - cpumask_first(def), dd->msix_entries[i].mask); - } - irq_set_affinity_hint( - dd->msix_entries[i].msix.vector, - dd->msix_entries[i].mask); + ret = hfi1_get_irq_affinity(dd, me); + if (ret) + dd_dev_err(dd, + "unable to pin IRQ %d\n", ret); } -out: - free_cpumask_var(def); - free_cpumask_var(rcv); return ret; -bail: - ret = -ENOMEM; - goto out; } /* @@ -14238,6 +14147,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, /* set up KDETH QP prefix in both RX and TX CSRs */ init_kdeth_qp(dd); + ret = hfi1_dev_affinity_init(dd); + if (ret) + goto bail_cleanup; + /* send contexts must be set up before receive contexts */ ret = init_send_contexts(dd); if (ret) diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 084581a42c98..c9172a039242 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -749,6 +749,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) /* drain user sdma queue */ hfi1_user_sdma_free_queues(fdata); + /* release the cpu */ + hfi1_put_proc_affinity(dd, fdata->rec_cpu_num); + /* * Clear any left over, unhandled events so the next process that * gets this context doesn't get confused. @@ -842,8 +845,16 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) mutex_lock(&hfi1_mutex); /* First, lets check if we need to setup a shared context? */ - if (uinfo->subctxt_cnt) + if (uinfo->subctxt_cnt) { + struct hfi1_filedata *fd = fp->private_data; + ret = find_shared_ctxt(fp, uinfo); + if (ret < 0) + goto done_unlock; + if (ret) + fd->rec_cpu_num = hfi1_get_proc_affinity( + fd->uctxt->dd, fd->uctxt->numa_id); + } /* * We execute the following block if we couldn't find a @@ -853,6 +864,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo) i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; ret = get_user_context(fp, uinfo, i_minor - 1, alg); } +done_unlock: mutex_unlock(&hfi1_mutex); done: return ret; @@ -978,7 +990,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt; unsigned ctxt; - int ret; + int ret, numa; if (dd->flags & HFI1_FROZEN) { /* @@ -998,12 +1010,21 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, if (ctxt == dd->num_rcv_contexts) return -EBUSY; - uctxt = hfi1_create_ctxtdata(dd->pport, ctxt); + fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1); + if (fd->rec_cpu_num != -1) + numa = cpu_to_node(fd->rec_cpu_num); + else + numa = numa_node_id(); + uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa); if (!uctxt) { dd_dev_err(dd, "Unable to allocate ctxtdata memory, failing open\n"); return -ENOMEM; } + hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)", + uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, + uctxt->numa_id); + /* * Allocate and enable a PIO send context. */ diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 897046cd8fa5..571e7b10cd11 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -75,6 +75,7 @@ #include "mad.h" #include "qsfp.h" #include "platform.h" +#include "affinity.h" /* bumped 1 from s/w major version of TrueScale */ #define HFI1_CHIP_VERS_MAJ 3U @@ -529,10 +530,11 @@ static inline void incr_cntr32(u32 *cntr) #define MAX_NAME_SIZE 64 struct hfi1_msix_entry { + enum irq_type type; struct msix_entry msix; void *arg; char name[MAX_NAME_SIZE]; - cpumask_var_t mask; + cpumask_t mask; }; /* per-SL CCA information */ @@ -1144,6 +1146,8 @@ struct hfi1_devdata { spinlock_t aspm_lock; /* Number of verbs contexts which have disabled ASPM */ atomic_t aspm_disabled_cnt; + + struct hfi1_affinity *affinity; }; /* 8051 firmware version helper */ @@ -1197,7 +1201,7 @@ void handle_user_interrupt(struct hfi1_ctxtdata *rcd); int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *); int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *); int hfi1_create_ctxts(struct hfi1_devdata *dd); -struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32); +struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32, int); void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *, struct hfi1_devdata *, u8, u8); void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *); diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 17b876d77037..98b3fc145660 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -144,7 +144,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) struct hfi1_ctxtdata *rcd; ppd = dd->pport + (i % dd->num_pports); - rcd = hfi1_create_ctxtdata(ppd, i); + rcd = hfi1_create_ctxtdata(ppd, i, dd->node); if (!rcd) { dd_dev_err(dd, "Unable to allocate kernel receive context, failing\n"); @@ -204,7 +204,8 @@ bail: /* * Common code for user and kernel context setup. */ -struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) +struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, + int numa) { struct hfi1_devdata *dd = ppd->dd; struct hfi1_ctxtdata *rcd; @@ -227,7 +228,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) rcd->cnt = 1; rcd->ctxt = ctxt; dd->rcd[ctxt] = rcd; - rcd->numa_id = numa_node_id(); + rcd->numa_id = numa; rcd->rcv_array_groups = dd->rcv_entries.ngroups; mutex_init(&rcd->exp_lock); @@ -982,6 +983,7 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); + hfi1_dev_affinity_free(dd); ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); } @@ -1010,9 +1012,6 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) dd->pport = (struct hfi1_pportdata *)(dd + 1); INIT_LIST_HEAD(&dd->list); - dd->node = dev_to_node(&pdev->dev); - if (dd->node < 0) - dd->node = 0; idr_preload(GFP_KERNEL); spin_lock_irqsave(&hfi1_devs_lock, flags); -- cgit v1.2.3-59-g8ed1b From cc57236f5515cd343fa47e9664090b54bdb410a3 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:33:49 -0800 Subject: staging/rdma/hfi1: Allocate send ctxt on device NUMA node Allocate the user mode send context memory on the numa node which the device is attached to for better performance. Reviewed-by: Dean Luick Reviewed-by: Mike Marciniszyn Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/file_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index c9172a039242..2de9032857d3 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1029,7 +1029,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd, * Allocate and enable a PIO send context. */ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, - uctxt->numa_id); + uctxt->dd->node); if (!uctxt->sc) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 377f111ee81bcef1a788a396f6d813a6b966acbb Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:33:58 -0800 Subject: staging/rdma/hfi1: Verbs Mem affinity support Change verbs memory allocations to the device numa node. This keeps memory close to the device for optimal performance. Reviewed-by: Mike Marciniszyn Reviewed-by: Ira Weiny Signed-off-by: Mitko Haralanov Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 3 ++- drivers/staging/rdma/hfi1/qp.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 98b3fc145660..629e92aa4f0a 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -134,7 +134,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) /* Control context has to be always 0 */ BUILD_BUG_ON(HFI1_CTRL_CTXT != 0); - dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL); + dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd), + GFP_KERNEL, dd->node); if (!dd->rcd) goto nomem; diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 05a9619752ae..b78c8eadcd95 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -596,13 +596,13 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, { struct hfi1_qp_priv *priv; - priv = kzalloc(sizeof(*priv), gfp); + priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node); if (!priv->s_hdr) { kfree(priv); return ERR_PTR(-ENOMEM); -- cgit v1.2.3-59-g8ed1b From 89abfc8d64dd1ad32e6d96404eb0a1ea6cbb4ca4 Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Wed, 3 Feb 2016 14:34:07 -0800 Subject: staging/rdma/hfi1: Change send_schedule counter to a per cpu counter A patch to fix fairness issues in QP scheduling requires n_send_schedule counter to be converted to a per cpu counter to reduce cache misses. Reviewed-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 3 ++- drivers/staging/rdma/hfi1/hfi.h | 4 +++- drivers/staging/rdma/hfi1/init.c | 9 +++++++++ drivers/staging/rdma/hfi1/ruc.c | 2 +- drivers/staging/rdma/hfi1/verbs.h | 1 - 5 files changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 6045c9154a1f..13b92a3d3d8b 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1609,7 +1609,8 @@ static u64 access_sw_send_schedule(const struct cntr_entry *entry, { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; - return dd->verbs_dev.n_send_schedule; + return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, + mode, data); } /* Software counters for the error status bits within MISC_ERR_STATUS */ diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 571e7b10cd11..112f7902c623 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -898,10 +898,11 @@ struct hfi1_devdata { /* reset value */ u64 z_int_counter; u64 z_rcv_limit; + u64 z_send_schedule; /* percpu int_counter */ u64 __percpu *int_counter; u64 __percpu *rcv_limit; - + u64 __percpu *send_schedule; /* number of receive contexts in use by the driver */ u32 num_rcv_contexts; /* number of pio send contexts in use by the driver */ @@ -1884,6 +1885,7 @@ static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd) dd->z_int_counter = get_all_cpu_total(dd->int_counter); dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit); + dd->z_send_schedule = get_all_cpu_total(dd->send_schedule); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 629e92aa4f0a..6ddf3c8bcc2e 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -985,6 +985,7 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); hfi1_dev_affinity_free(dd); + free_percpu(dd->send_schedule); ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); } @@ -1063,6 +1064,14 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) goto bail; } + dd->send_schedule = alloc_percpu(u64); + if (!dd->send_schedule) { + ret = -ENOMEM; + hfi1_early_err(&pdev->dev, + "Could not allocate per-cpu int_counter\n"); + goto bail; + } + if (!hfi1_cpulist_count) { u32 count = num_online_cpus(); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 6379df53fa72..ae28b85b8475 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -875,7 +875,7 @@ void hfi1_do_send(struct rvt_qp *qp) /* allow other tasks to run */ if (unlikely(time_after(jiffies, timeout))) { cond_resched(); - ps.ppd->dd->verbs_dev.n_send_schedule++; + this_cpu_inc(*ps.ppd->dd->send_schedule); timeout = jiffies + SEND_RESCHED_TIMEOUT; } } while (make_req(qp)); diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 1571ae390042..ac84dd70c6c7 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -270,7 +270,6 @@ struct hfi1_ibdev { u64 n_piowait; u64 n_txwait; u64 n_kmem_wait; - u64 n_send_schedule; #ifdef CONFIG_DEBUG_FS /* per HFI debugfs */ -- cgit v1.2.3-59-g8ed1b From f1bf29634057f56507945589aa40c96c649073ee Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:34:15 -0800 Subject: staging/rdma/hfi1: Fix for generic I2C interface The original I2C interface was geared for QSFP accesses. Modify the interface to behave more like a generic I2C controller such that reads and writes can accept multi-byte offsets. Removed reads following writes and moved reset to top level. Reviewed-by: Easwar Hariharan Reviewed-by: Dean Luick Signed-off-by: Pablo Cacho Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/debugfs.c | 6 +- drivers/staging/rdma/hfi1/qsfp.c | 88 ++++++++++++++--------- drivers/staging/rdma/hfi1/qsfp.h | 4 ++ drivers/staging/rdma/hfi1/twsi.c | 134 ++++++++++++++++-------------------- 4 files changed, 122 insertions(+), 110 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index acd2269e9f14..d6dc339fb2a3 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -463,7 +463,8 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, goto _free; } - i2c_addr = (*ppos >> 16) & 0xff; + /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ + i2c_addr = (*ppos >> 16) & 0xffff; offset = *ppos & 0xffff; total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count); @@ -517,7 +518,8 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, goto _return; } - i2c_addr = (*ppos >> 16) & 0xff; + /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ + i2c_addr = (*ppos >> 16) & 0xffff; offset = *ppos & 0xffff; total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 0d2ec972ea9f..0e1a49294d99 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -71,14 +71,6 @@ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int ret, cnt; u8 *buff = bp; - /* Make sure TWSI bus is in sane state. */ - ret = hfi1_twsi_reset(dd, target); - if (ret) { - hfi1_dev_porterr(dd, ppd->port, - "I2C interface Reset for write failed\n"); - return -EIO; - } - cnt = 0; while (cnt < len) { int wlen = len - cnt; @@ -106,11 +98,22 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, int ret; ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex); - if (!ret) { - ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); - mutex_unlock(&dd->qsfp_i2c_mutex); + if (ret) + return ret; + + /* make sure the TWSI bus is in a sane state */ + ret = hfi1_twsi_reset(ppd->dd, target); + if (ret) { + hfi1_dev_porterr(ppd->dd, ppd->port, + "I2C write interface reset failed\n"); + ret = -EIO; + goto done; } + ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); + +done: + mutex_unlock(&dd->qsfp_i2c_mutex); return ret; } @@ -125,16 +128,6 @@ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int stuck = 0; u8 *buff = bp; - /* Make sure TWSI bus is in sane state. */ - ret = hfi1_twsi_reset(dd, target); - if (ret) { - hfi1_dev_porterr(dd, ppd->port, - "I2C interface Reset for read failed\n"); - ret = -EIO; - stuck = 1; - goto exit; - } - cnt = 0; while (cnt < len) { int rlen = len - cnt; @@ -178,11 +171,22 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, int ret; ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex); - if (!ret) { - ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); - mutex_unlock(&dd->qsfp_i2c_mutex); + if (ret) + return ret; + + /* make sure the TWSI bus is in a sane state */ + ret = hfi1_twsi_reset(ppd->dd, target); + if (ret) { + hfi1_dev_porterr(ppd->dd, ppd->port, + "I2C read interface reset failed\n"); + ret = -EIO; + goto done; } + ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); + +done: + mutex_unlock(&dd->qsfp_i2c_mutex); return ret; } @@ -203,6 +207,15 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, if (ret) return ret; + /* make sure the TWSI bus is in a sane state */ + ret = hfi1_twsi_reset(ppd->dd, target); + if (ret) { + hfi1_dev_porterr(ppd->dd, ppd->port, + "QSFP write interface reset failed\n"); + mutex_unlock(&ppd->dd->qsfp_i2c_mutex); + return -EIO; + } + while (count < len) { /* * Set the qsfp page based on a zero-based addresss @@ -210,8 +223,8 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, */ page = (u8)(addr / QSFP_PAGESIZE); - ret = __i2c_write(ppd, target, QSFP_DEV, - QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); + ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, + QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); if (ret != 1) { hfi1_dev_porterr( ppd->dd, @@ -227,8 +240,8 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY) nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); - ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count, - nwrite); + ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, + offset, bp + count, nwrite); if (ret <= 0) /* stop on error or nothing written */ break; @@ -260,14 +273,23 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, if (ret) return ret; + /* make sure the TWSI bus is in a sane state */ + ret = hfi1_twsi_reset(ppd->dd, target); + if (ret) { + hfi1_dev_porterr(ppd->dd, ppd->port, + "QSFP read interface reset failed\n"); + mutex_unlock(&ppd->dd->qsfp_i2c_mutex); + return -EIO; + } + while (count < len) { /* * Set the qsfp page based on a zero-based address * and a page size of QSFP_PAGESIZE bytes. */ page = (u8)(addr / QSFP_PAGESIZE); - ret = __i2c_write(ppd, target, QSFP_DEV, - QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); + ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, + QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); if (ret != 1) { hfi1_dev_porterr( ppd->dd, @@ -283,8 +305,10 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY) nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY); - ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count, - nread); + /* QSFPs require a 5-10msec delay after write operations */ + mdelay(5); + ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, + offset, bp + count, nread); if (ret <= 0) /* stop on error or nothing read */ break; diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index b1b9e4a2329f..af59a43b2d5f 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -70,6 +70,10 @@ /* Reads/writes cannot cross 128 byte boundaries */ #define QSFP_RW_BOUNDARY 128 +/* number of bytes in i2c offset for QSFP devices */ +#define __QSFP_OFFSET_SIZE 1 /* num address bytes */ +#define QSFP_OFFSET_SIZE (__QSFP_OFFSET_SIZE << 8) /* shifted value */ + /* Defined fields that Intel requires of qualified cables */ /* Byte 0 is Identifier, not checked */ /* Byte 1 is reserved "status MSB" */ diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c index ea54fd2700ad..7c579b343844 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/staging/rdma/hfi1/twsi.c @@ -365,17 +365,25 @@ static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags) * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part, * which responded to all TWSI device codes, interpreting them as * address within device. On all other devices found on board handled by - * this driver, the device is followed by a one-byte "address" which selects + * this driver, the device is followed by a N-byte "address" which selects * the "register" or "offset" within the device from which data should * be read. */ int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr, void *buffer, int len) { - int ret; u8 *bp = buffer; + int ret = 1; + int i; + int offset_size; + + /* obtain the offset size, strip it from the device address */ + offset_size = (dev >> 8) & 0xff; + dev &= 0xff; - ret = 1; + /* allow at most a 2 byte offset */ + if (offset_size > 2) + goto bail; if (dev == HFI1_TWSI_NO_DEV) { /* legacy not-really-I2C */ @@ -383,34 +391,29 @@ int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr, ret = twsi_wr(dd, target, addr, HFI1_TWSI_START); } else { /* Actual I2C */ - ret = twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START); - if (ret) { - stop_cmd(dd, target); - ret = 1; - goto bail; - } - /* - * SFF spec claims we do _not_ stop after the addr - * but simply issue a start with the "read" dev-addr. - * Since we are implicitly waiting for ACK here, - * we need t_buf (nominally 20uSec) before that start, - * and cannot rely on the delay built in to the STOP - */ - ret = twsi_wr(dd, target, addr, 0); - udelay(TWSI_BUF_WAIT_USEC); + if (offset_size) { + ret = twsi_wr(dd, target, + dev | WRITE_CMD, HFI1_TWSI_START); + if (ret) { + stop_cmd(dd, target); + goto bail; + } - if (ret) { - dd_dev_err(dd, - "Failed to write interface read addr %02X\n", - addr); - ret = 1; - goto bail; + for (i = 0; i < offset_size; i++) { + ret = twsi_wr(dd, target, + (addr >> (i * 8)) & 0xff, 0); + udelay(TWSI_BUF_WAIT_USEC); + if (ret) { + dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n", + i, addr); + goto bail; + } + } } ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START); } if (ret) { stop_cmd(dd, target); - ret = 1; goto bail; } @@ -442,76 +445,55 @@ bail: * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part, * which responded to all TWSI device codes, interpreting them as * address within device. On all other devices found on board handled by - * this driver, the device is followed by a one-byte "address" which selects + * this driver, the device is followed by a N-byte "address" which selects * the "register" or "offset" within the device to which data should * be written. */ int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr, const void *buffer, int len) { - int sub_len; const u8 *bp = buffer; - int max_wait_time, i; int ret = 1; + int i; + int offset_size; - while (len > 0) { - if (dev == HFI1_TWSI_NO_DEV) { - if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD, - HFI1_TWSI_START)) { - goto failed_write; - } - } else { - /* Real I2C */ - if (twsi_wr(dd, target, - dev | WRITE_CMD, HFI1_TWSI_START)) - goto failed_write; - ret = twsi_wr(dd, target, addr, 0); - if (ret) { - dd_dev_err(dd, - "Failed to write interface write addr %02X\n", - addr); - goto failed_write; - } - } + /* obtain the offset size, strip it from the device address */ + offset_size = (dev >> 8) & 0xff; + dev &= 0xff; - sub_len = min(len, 4); - addr += sub_len; - len -= sub_len; - - for (i = 0; i < sub_len; i++) - if (twsi_wr(dd, target, *bp++, 0)) - goto failed_write; + /* allow at most a 2 byte offset */ + if (offset_size > 2) + goto bail; - stop_cmd(dd, target); + if (dev == HFI1_TWSI_NO_DEV) { + if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD, + HFI1_TWSI_START)) { + goto failed_write; + } + } else { + /* Real I2C */ + if (twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START)) + goto failed_write; + } - /* - * Wait for write complete by waiting for a successful - * read (the chip replies with a zero after the write - * cmd completes, and before it writes to the eeprom. - * The startcmd for the read will fail the ack until - * the writes have completed. We do this inline to avoid - * the debug prints that are in the real read routine - * if the startcmd fails. - * We also use the proper device address, so it doesn't matter - * whether we have real eeprom_dev. Legacy likes any address. - */ - max_wait_time = 100; - while (twsi_wr(dd, target, - dev | READ_CMD, HFI1_TWSI_START)) { - stop_cmd(dd, target); - if (!--max_wait_time) - goto failed_write; + for (i = 0; i < offset_size; i++) { + ret = twsi_wr(dd, target, (addr >> (i * 8)) & 0xff, 0); + udelay(TWSI_BUF_WAIT_USEC); + if (ret) { + dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n", + i, addr); + goto bail; } - /* now read (and ignore) the resulting byte */ - rd_byte(dd, target, 1); } + for (i = 0; i < len; i++) + if (twsi_wr(dd, target, *bp++, 0)) + goto failed_write; + ret = 0; - goto bail; failed_write: stop_cmd(dd, target); - ret = 1; bail: return ret; -- cgit v1.2.3-59-g8ed1b From 23cd4716b7e2792f1fdc31b83feb9e9a9812c25f Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Wed, 3 Feb 2016 14:34:23 -0800 Subject: staging/rdma/hfi1: Allow a fair scheduling of QPs This patch fixes the fairness issues in QP scheduling - the timeout for cond_resched is changed to a ratio of qp->timeout_jiffies - workqueue_congested is used to determine if qp needs to reschedule itself Reviewed-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/ruc.c | 52 ++++++++++++++++++++++++++++++----------- 1 file changed, 38 insertions(+), 14 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index ae28b85b8475..f09badbfa51c 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -822,29 +822,42 @@ void _hfi1_do_send(struct work_struct *work) void hfi1_do_send(struct rvt_qp *qp) { struct hfi1_pkt_state ps; + struct hfi1_qp_priv *priv = qp->priv; int (*make_req)(struct rvt_qp *qp); unsigned long flags; unsigned long timeout; + unsigned long timeout_int; + int cpu; ps.dev = to_idev(qp->ibqp.device); ps.ibp = to_iport(qp->ibqp.device, qp->port_num); ps.ppd = ppd_from_ibp(ps.ibp); - if ((qp->ibqp.qp_type == IB_QPT_RC || - qp->ibqp.qp_type == IB_QPT_UC) && - !loopback && - (qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc) - 1)) == - ps.ppd->lid) { - ruc_loopback(qp); - return; - } - - if (qp->ibqp.qp_type == IB_QPT_RC) + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc + ) - 1)) == + ps.ppd->lid)) { + ruc_loopback(qp); + return; + } make_req = hfi1_make_rc_req; - else if (qp->ibqp.qp_type == IB_QPT_UC) + timeout_int = (qp->timeout_jiffies); + break; + case IB_QPT_UC: + if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc + ) - 1)) == + ps.ppd->lid)) { + ruc_loopback(qp); + return; + } make_req = hfi1_make_uc_req; - else + timeout_int = SEND_RESCHED_TIMEOUT; + break; + default: make_req = hfi1_make_ud_req; + timeout_int = SEND_RESCHED_TIMEOUT; + } spin_lock_irqsave(&qp->s_lock, flags); @@ -858,7 +871,9 @@ void hfi1_do_send(struct rvt_qp *qp) spin_unlock_irqrestore(&qp->s_lock, flags); - timeout = jiffies + SEND_RESCHED_TIMEOUT; + timeout = jiffies + (timeout_int) / 8; + cpu = priv->s_sde ? priv->s_sde->cpu : + cpumask_first(cpumask_of_node(ps.ppd->dd->node)); do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { @@ -874,9 +889,18 @@ void hfi1_do_send(struct rvt_qp *qp) /* allow other tasks to run */ if (unlikely(time_after(jiffies, timeout))) { + if (workqueue_congested(cpu, ps.ppd->hfi1_wq)) { + spin_lock_irqsave(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; + hfi1_schedule_send(qp); + spin_unlock_irqrestore(&qp->s_lock, + flags); + this_cpu_inc(*ps.ppd->dd->send_schedule); + return; + } cond_resched(); this_cpu_inc(*ps.ppd->dd->send_schedule); - timeout = jiffies + SEND_RESCHED_TIMEOUT; + timeout = jiffies + (timeout_int) / 8; } } while (make_req(qp)); } -- cgit v1.2.3-59-g8ed1b From e002dcc0abd318b0c5d7b2d05ba41ef4d00abe73 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:34:32 -0800 Subject: staging/rdma/hfi1: Fix for module parameter rcvhdrcnt when it's 2097152 The driver crashes when loaded with parameter rcvhdrcnt=2097152. The root cause was that rcvhdrcnt was initially a 32 bit variable and its value was assigned to a 16 bit variable, truncating the upper 16 bits. This patch prevents the user from passing a value for rcvhdrcnt greater than 16352 (Maximum number for rcvhdrcnt). Reviewed-by: Dean Luick Reviewed-by: Mitko Haralanov Signed-off-by: Sebastian Sanchez Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 6ddf3c8bcc2e..eec91305516a 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -77,6 +77,7 @@ #define HFI1_MIN_USER_CTXT_BUFCNT 7 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2 +#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ @@ -1355,6 +1356,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ret = -EINVAL; goto bail; } + if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { + hfi1_early_err(&pdev->dev, + "Receive header queue count cannot be greater than %u\n", + HFI1_MAX_HDRQ_EGRBUF_CNT); + ret = -EINVAL; + goto bail; + } /* use the encoding function as a sanitization check */ if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", -- cgit v1.2.3-59-g8ed1b From a92ba6d628d362811c8112280826de0e8b178e67 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:34:41 -0800 Subject: staging/rdma/hfi1: Improve performance of TID cache look up When TID caching was enabled, the way the driver found RB nodes when PSM was unprogramming TID entries was by traversing the RB tree, looking for a match on the RcvArray entry index. The performance of this algorithm was not only poor but also inconsistent depending on how many RB nodes would have to be traversed before a match was found. The lower performance was especially evident in cases where there was a cache miss with the cache full, requiring the unprogramming of several TID entries. This commit changes how RB nodes are looked up when being free'd by PSM to a index-based lookup into a flat array on the index of the RcvArray entry. This turns the entire look-up process into an O(1) algorithm. Special care needs to be taken for situations when TID caching is disabled. In those cases, there is no need to insert the RB nodes into an actual RB tree. Since the entire RcvArray management mechanism is managed by an index-based algorithm, the RB nodes can be saved into the flat array, making both "insertion" and "removal" faster. Reviewed-by: Arthur Kepner Reviewed-by: Dennis Dalessandro Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 6 +- drivers/staging/rdma/hfi1/user_exp_rcv.c | 149 ++++++++++++++++--------------- 2 files changed, 83 insertions(+), 72 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 112f7902c623..f3c1e6722dd4 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1171,6 +1171,7 @@ struct hfi1_filedata { int rec_cpu_num; struct mmu_notifier mn; struct rb_root tid_rb_root; + struct mmu_rb_node **entry_to_rb; spinlock_t tid_lock; /* protect tid_[limit,used] counters */ u32 tid_limit; u32 tid_used; @@ -1178,7 +1179,10 @@ struct hfi1_filedata { u32 *invalid_tids; u32 invalid_tid_idx; spinlock_t invalid_lock; /* protect the invalid_tids array */ - int (*mmu_rb_insert)(struct rb_root *, struct mmu_rb_node *); + int (*mmu_rb_insert)(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); + void (*mmu_rb_remove)(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); }; extern struct list_head hfi1_dev_list; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 79612a2bd07d..36b61b5b6997 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -102,12 +102,15 @@ static int set_rcvarray_entry(struct file *, unsigned long, u32, struct tid_group *, struct page **, unsigned); static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); -static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *, - unsigned long); -static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *, - u32); -static int mmu_rb_insert_by_addr(struct rb_root *, struct mmu_rb_node *); -static int mmu_rb_insert_by_entry(struct rb_root *, struct mmu_rb_node *); +static struct mmu_rb_node *mmu_rb_search(struct rb_root *, unsigned long); +static int mmu_rb_insert_by_addr(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); +static int mmu_rb_insert_by_entry(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); +static void mmu_rb_remove_by_addr(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); +static void mmu_rb_remove_by_entry(struct hfi1_filedata *, struct rb_root *, + struct mmu_rb_node *); static void mmu_notifier_mem_invalidate(struct mmu_notifier *, unsigned long, unsigned long, enum mmu_call_types); @@ -219,6 +222,12 @@ int hfi1_user_exp_rcv_init(struct file *fp) } } + fd->entry_to_rb = kcalloc(uctxt->expected_count, + sizeof(struct rb_node *), + GFP_KERNEL); + if (!fd->entry_to_rb) + return -ENOMEM; + if (!HFI1_CAP_IS_USET(TID_UNMAP)) { fd->invalid_tid_idx = 0; fd->invalid_tids = kzalloc(uctxt->expected_count * @@ -226,27 +235,30 @@ int hfi1_user_exp_rcv_init(struct file *fp) if (!fd->invalid_tids) { ret = -ENOMEM; goto done; - } else { - /* - * Register MMU notifier callbacks. If the registration - * fails, continue but turn off the TID caching for - * all user contexts. - */ - ret = mmu_notifier_register(&fd->mn, current->mm); - if (ret) { - dd_dev_info(dd, - "Failed MMU notifier registration %d\n", - ret); - HFI1_CAP_USET(TID_UNMAP); - ret = 0; - } + } + + /* + * Register MMU notifier callbacks. If the registration + * fails, continue but turn off the TID caching for + * all user contexts. + */ + ret = mmu_notifier_register(&fd->mn, current->mm); + if (ret) { + dd_dev_info(dd, + "Failed MMU notifier registration %d\n", + ret); + HFI1_CAP_USET(TID_UNMAP); + ret = 0; } } - if (HFI1_CAP_IS_USET(TID_UNMAP)) + if (HFI1_CAP_IS_USET(TID_UNMAP)) { fd->mmu_rb_insert = mmu_rb_insert_by_entry; - else + fd->mmu_rb_remove = mmu_rb_remove_by_entry; + } else { fd->mmu_rb_insert = mmu_rb_insert_by_addr; + fd->mmu_rb_remove = mmu_rb_remove_by_addr; + } /* * PSM does not have a good way to separate, count, and @@ -318,6 +330,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) spin_unlock(&fd->rb_lock); hfi1_clear_tids(uctxt); } + + kfree(fd->entry_to_rb); return 0; } @@ -890,7 +904,7 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, memcpy(node->pages, pages, sizeof(struct page *) * npages); spin_lock(&fd->rb_lock); - ret = fd->mmu_rb_insert(root, node); + ret = fd->mmu_rb_insert(fd, root, node); spin_unlock(&fd->rb_lock); if (ret) { @@ -915,8 +929,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, struct hfi1_devdata *dd = uctxt->dd; struct mmu_rb_node *node; u8 tidctrl = EXP_TID_GET(tidinfo, CTRL); - u32 tidbase = uctxt->expected_base, - tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; + u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; if (tididx >= uctxt->expected_count) { dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n", @@ -927,15 +940,15 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, if (tidctrl == 0x3) return -EINVAL; - rcventry = tidbase + tididx + (tidctrl - 1); + rcventry = tididx + (tidctrl - 1); spin_lock(&fd->rb_lock); - node = mmu_rb_search_by_entry(&fd->tid_rb_root, rcventry); - if (!node) { + node = fd->entry_to_rb[rcventry]; + if (!node || node->rcventry != (uctxt->expected_base + rcventry)) { spin_unlock(&fd->rb_lock); return -EBADF; } - rb_erase(&node->rbnode, &fd->tid_rb_root); + fd->mmu_rb_remove(fd, &fd->tid_rb_root, node); spin_unlock(&fd->rb_lock); if (grp) *grp = node->grp; @@ -993,10 +1006,11 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, u16 rcventry = grp->base + i; struct mmu_rb_node *node; - node = mmu_rb_search_by_entry(root, rcventry); - if (!node) + node = fd->entry_to_rb[rcventry - + uctxt->expected_base]; + if (!node || node->rcventry != rcventry) continue; - rb_erase(&node->rbnode, root); + fd->mmu_rb_remove(fd, root, node); clear_tid_node(fd, -1, node); } } @@ -1034,7 +1048,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, spin_lock(&fd->rb_lock); while (addr < end) { - node = mmu_rb_search_by_addr(root, addr); + node = mmu_rb_search(root, addr); if (!node) { /* @@ -1116,8 +1130,8 @@ static inline int mmu_entry_cmp(struct mmu_rb_node *node, u32 entry) return 0; } -static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *root, - unsigned long addr) +static struct mmu_rb_node *mmu_rb_search(struct rb_root *root, + unsigned long addr) { struct rb_node *node = root->rb_node; @@ -1142,48 +1156,21 @@ static struct mmu_rb_node *mmu_rb_search_by_addr(struct rb_root *root, return NULL; } -static inline struct mmu_rb_node *mmu_rb_search_by_entry(struct rb_root *root, - u32 index) -{ - struct mmu_rb_node *rbnode; - struct rb_node *node; - - if (root && !RB_EMPTY_ROOT(root)) - for (node = rb_first(root); node; node = rb_next(node)) { - rbnode = rb_entry(node, struct mmu_rb_node, rbnode); - if (rbnode->rcventry == index) - return rbnode; - } - return NULL; -} - -static int mmu_rb_insert_by_entry(struct rb_root *root, +static int mmu_rb_insert_by_entry(struct hfi1_filedata *fdata, + struct rb_root *root, struct mmu_rb_node *node) { - struct rb_node **new = &root->rb_node, *parent = NULL; + u32 base = fdata->uctxt->expected_base; - while (*new) { - struct mmu_rb_node *this = - container_of(*new, struct mmu_rb_node, rbnode); - int result = mmu_entry_cmp(this, node->rcventry); - - parent = *new; - if (result < 0) - new = &((*new)->rb_left); - else if (result > 0) - new = &((*new)->rb_right); - else - return 1; - } - - rb_link_node(&node->rbnode, parent, new); - rb_insert_color(&node->rbnode, root); + fdata->entry_to_rb[node->rcventry - base] = node; return 0; } -static int mmu_rb_insert_by_addr(struct rb_root *root, struct mmu_rb_node *node) +static int mmu_rb_insert_by_addr(struct hfi1_filedata *fdata, + struct rb_root *root, struct mmu_rb_node *node) { struct rb_node **new = &root->rb_node, *parent = NULL; + u32 base = fdata->uctxt->expected_base; /* Figure out where to put new node */ while (*new) { @@ -1204,5 +1191,25 @@ static int mmu_rb_insert_by_addr(struct rb_root *root, struct mmu_rb_node *node) rb_link_node(&node->rbnode, parent, new); rb_insert_color(&node->rbnode, root); + fdata->entry_to_rb[node->rcventry - base] = node; return 0; } + +static void mmu_rb_remove_by_entry(struct hfi1_filedata *fdata, + struct rb_root *root, + struct mmu_rb_node *node) +{ + u32 base = fdata->uctxt->expected_base; + + fdata->entry_to_rb[node->rcventry - base] = NULL; +} + +static void mmu_rb_remove_by_addr(struct hfi1_filedata *fdata, + struct rb_root *root, + struct mmu_rb_node *node) +{ + u32 base = fdata->uctxt->expected_base; + + fdata->entry_to_rb[node->rcventry - base] = NULL; + rb_erase(&node->rbnode, root); +} -- cgit v1.2.3-59-g8ed1b From 82ab09e131ffb0497c9631e2c53b44fbf9ad5e1c Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:34:49 -0800 Subject: staging/rdma/hfi1: Reduce syslog message severity and provide speed information The syslog message causes unnecessary alarm for the single and dual port x8 cards by reporting at an error level. This patch reduces the severity to informational only and adds speed information. Reviewed-by: Dennis Dalessandro Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/pcie.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 76cf80792516..6605a6afbb1d 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -398,9 +398,7 @@ int pcie_speeds(struct hfi1_devdata *dd) /* obtain the link width and current speed */ update_lbus_info(dd); - /* check against expected pcie width and complain if "wrong" */ - if (dd->lbus_width < 16) - dd_dev_err(dd, "PCIe width %u (x16 HFI)\n", dd->lbus_width); + dd_dev_info(dd, "%s\n", dd->lbus_info); return 0; } -- cgit v1.2.3-59-g8ed1b From d24bc6481e376da3b7f226b57b39b0ae4088b8d9 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:34:58 -0800 Subject: staging/rdma/hfi1: Use device file minor to identify EPROM When writing to the EPROM, the driver will always use the "first" device. This is incorrect for multiple cards. Use the device file minor to determine the device to use. Reject the generic device file. Reviewed-by: Mitko Haralanov Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/eprom.c | 15 ++++++++++----- drivers/staging/rdma/hfi1/eprom.h | 2 +- drivers/staging/rdma/hfi1/file_ops.c | 2 +- 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index fb620c97f592..8104a1121bf2 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -353,21 +353,26 @@ static inline u32 extract_rstart(u32 composite) * * Return 0 on success, -ERRNO on error */ -int handle_eprom_command(const struct hfi1_cmd *cmd) +int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) { struct hfi1_devdata *dd; u32 dev_id; u32 rlen; /* range length */ u32 rstart; /* range start */ + int i_minor; int ret = 0; /* - * The EPROM is per-device, so use unit 0 as that will always - * exist. + * Map the device file to device data using the relative minor. + * The device file minor number is the unit number + 1. 0 is + * the generic device file - reject it. */ - dd = hfi1_lookup(0); + i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE; + if (i_minor <= 0) + return -EINVAL; + dd = hfi1_lookup(i_minor - 1); if (!dd) { - pr_err("%s: cannot find unit 0!\n", __func__); + pr_err("%s: cannot find unit %d!\n", __func__, i_minor); return -EINVAL; } diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/staging/rdma/hfi1/eprom.h index 64a64276be81..5a61ba3ba810 100644 --- a/drivers/staging/rdma/hfi1/eprom.h +++ b/drivers/staging/rdma/hfi1/eprom.h @@ -52,4 +52,4 @@ struct hfi1_cmd; struct hfi1_devdata; int eprom_init(struct hfi1_devdata *dd); -int handle_eprom_command(const struct hfi1_cmd *cmd); +int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd); diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 2de9032857d3..cc681f7bc570 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -409,7 +409,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: - ret = handle_eprom_command(&cmd); + ret = handle_eprom_command(fp, &cmd); break; } -- cgit v1.2.3-59-g8ed1b From 0f2d87d2827eb4f3c1319e69b67ba30d61cabe83 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:35:06 -0800 Subject: staging/rdma/hfi1: Improve performance of SDMA transfers Commit a0d406934a46 ("staging/rdma/hfi1: Add page lock limit check for SDMA requests") added a mechanism to delay the clean-up of user SDMA requests in order to facilitate proper locked page counting. This delayed processing was done using a kernel workqueue, which meant that a kernel thread would have to spin up and take CPU cycles to do the clean-up. This proved detrimental to performance because now there are two execution threads (the kernel workqueue and the user process) needing cycles on the same CPU. Performance-wise, it is much better to do as much of the clean-up as can be done in interrupt context (during the callback) and do the remaining work in-line during subsequent calls of the user process into the driver. The changes required to implement the above also significantly simplify the entire SDMA completion processing code and eliminate a memory corruption causing the following observed crash: [ 2881.703362] BUG: unable to handle kernel NULL pointer dereference at (null) [ 2881.703389] IP: [] user_sdma_send_pkts+0xcd4/0x18e0 [hfi1] [ 2881.703422] PGD 7d4d25067 PUD 77d96d067 PMD 0 [ 2881.703427] Oops: 0000 [#1] SMP [ 2881.703431] Modules linked in: [ 2881.703504] CPU: 28 PID: 6668 Comm: mpi_stress Tainted: G OENX 3.12.28-4-default #1 [ 2881.703508] Hardware name: Intel Corporation S2600KP/S2600KP, BIOS SE5C610.86B.11.01.0044.090 [ 2881.703512] task: ffff88077da8e0c0 ti: ffff880856772000 task.ti: ffff880856772000 [ 2881.703515] RIP: 0010:[] [] user_sdma_send_pkts+0xcd4/0x [ 2881.703529] RSP: 0018:ffff880856773c48 EFLAGS: 00010287 [ 2881.703531] RAX: 0000000000000000 RBX: 0000000000001000 RCX: 0000000000002000 [ 2881.703534] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000002000 [ 2881.703537] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000 [ 2881.703540] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 [ 2881.703543] R13: 0000000000000000 R14: ffff88071e782e68 R15: ffff8810532955c0 [ 2881.703546] FS: 00007f9c4375e700(0000) GS:ffff88107eec0000(0000) knlGS:0000000000000000 [ 2881.703549] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2881.703551] CR2: 0000000000000000 CR3: 00000007d4cba000 CR4: 00000000003407e0 [ 2881.703554] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 2881.703556] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 2881.703558] Stack: [ 2881.703559] ffffffff00002000 ffff881000001800 ffffffff00000000 00000000000080d0 [ 2881.703570] 0000000000000000 0000200000000000 0000000000000000 ffff88071e782db8 [ 2881.703580] ffff8807d4d08d80 ffff881053295600 0000000000000008 ffff88071e782fc8 [ 2881.703589] Call Trace: [ 2881.703691] [] hfi1_user_sdma_process_request+0x84a/0xab0 [hfi1] [ 2881.703777] [] hfi1_aio_write+0xd2/0x110 [hfi1] [ 2881.703828] [] do_sync_readv_writev+0x48/0x80 [ 2881.703837] [] do_readv_writev+0xbb/0x230 [ 2881.703843] [] SyS_writev+0x48/0xc0 This commit also addresses issues related to notification of user processes of SDMA request slot availability. The slot should be cleaned up first before the user processes is notified of its availability. Reviewed-by: Arthur Kepner Reviewed-by: Dennis Dalessandro Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 293 +++++++++++++++------------------- drivers/staging/rdma/hfi1/user_sdma.h | 3 +- 2 files changed, 128 insertions(+), 168 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index d3de771a0770..2d238f331247 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -147,6 +147,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 /* Last packet in the request */ #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) + +/* Last packet that uses a particular io vector */ #define TXREQ_FLAGS_IOVEC_LAST_PKT BIT(0) #define SDMA_REQ_IN_USE 0 @@ -171,6 +173,7 @@ static unsigned initial_pkt_count = 8; #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ struct user_sdma_iovec { + struct list_head list; struct iovec iov; /* number of pages in this vector */ unsigned npages; @@ -213,15 +216,6 @@ struct user_sdma_request { * to 0. */ u8 omfactor; - /* - * pointer to the user's mm_struct. We are going to - * get a reference to it so it doesn't get freed - * since we might not be in process context when we - * are processing the iov's. - * Using this mm_struct, we can get vma based on the - * iov's address (find_vma()). - */ - struct mm_struct *user_mm; /* * We copy the iovs for this request (based on * info.iovcnt). These are only the data vectors @@ -239,13 +233,13 @@ struct user_sdma_request { u16 tididx; u32 sent; u64 seqnum; + u64 seqcomp; struct list_head txps; spinlock_t txcmp_lock; /* protect txcmp list */ struct list_head txcmp; unsigned long flags; /* status of the last txreq completed */ int status; - struct work_struct worker; }; /* @@ -281,20 +275,20 @@ struct user_sdma_txreq { static int user_sdma_send_pkts(struct user_sdma_request *, unsigned); static int num_user_pages(const struct iovec *); static void user_sdma_txreq_cb(struct sdma_txreq *, int, int); -static void user_sdma_delayed_completion(struct work_struct *); -static void user_sdma_free_request(struct user_sdma_request *); +static inline void pq_update(struct hfi1_user_sdma_pkt_q *); +static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, struct user_sdma_iovec *); -static void unpin_vector_pages(struct user_sdma_request *, - struct user_sdma_iovec *); +static void unpin_vector_pages(struct user_sdma_iovec *); static int check_header_template(struct user_sdma_request *, struct hfi1_pkt_header *, u32, u32); static int set_txreq_header(struct user_sdma_request *, struct user_sdma_txreq *, u32); static int set_txreq_header_ahg(struct user_sdma_request *, struct user_sdma_txreq *, u32); -static inline void set_comp_state(struct user_sdma_request *, - enum hfi1_sdma_comp_state, int); +static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *, + struct hfi1_user_sdma_comp_q *, + u16, enum hfi1_sdma_comp_state, int); static inline u32 set_pkt_bth_psn(__be32, u8, u32); static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len); @@ -381,17 +375,19 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) goto pq_nomem; memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size; - pq->reqs = kmalloc(memsize, GFP_KERNEL); + pq->reqs = kzalloc(memsize, GFP_KERNEL); if (!pq->reqs) goto pq_reqs_nomem; INIT_LIST_HEAD(&pq->list); + INIT_LIST_HEAD(&pq->iovec_list); pq->dd = dd; pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; pq->n_max_reqs = hfi1_sdma_comp_ring_size; pq->state = SDMA_PKT_Q_INACTIVE; atomic_set(&pq->n_reqs, 0); + spin_lock_init(&pq->iovec_lock); init_waitqueue_head(&pq->wait); iowait_init(&pq->busy, 0, NULL, defer_packet_queue, @@ -447,6 +443,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_user_sdma_pkt_q *pq; + struct user_sdma_iovec *iov; unsigned long flags; hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit, @@ -462,6 +459,15 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) wait_event_interruptible( pq->wait, (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); + /* Unpin any left over buffers. */ + while (!list_empty(&pq->iovec_list)) { + spin_lock_irqsave(&pq->iovec_lock, flags); + iov = list_first_entry(&pq->iovec_list, + struct user_sdma_iovec, list); + list_del_init(&iov->list); + spin_unlock_irqrestore(&pq->iovec_lock, flags); + unpin_vector_pages(iov); + } kfree(pq->reqs); kmem_cache_destroy(pq->txreq_cache); kfree(pq); @@ -479,16 +485,17 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, unsigned long dim, unsigned long *count) { - int ret = 0, i = 0, sent; + int ret = 0, i = 0; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_devdata *dd = pq->dd; - unsigned long idx = 0; + unsigned long idx = 0, flags; u8 pcount = initial_pkt_count; struct sdma_req_info info; struct user_sdma_request *req; + struct user_sdma_iovec *ioptr; u8 opcode, sc, vl; if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { @@ -505,9 +512,21 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, dd->unit, uctxt->ctxt, fd->subctxt, ret); return -EFAULT; } + + /* Process any completed vectors */ + while (!list_empty(&pq->iovec_list)) { + spin_lock_irqsave(&pq->iovec_lock, flags); + ioptr = list_first_entry(&pq->iovec_list, + struct user_sdma_iovec, list); + list_del_init(&ioptr->list); + spin_unlock_irqrestore(&pq->iovec_lock, flags); + unpin_vector_pages(ioptr); + } + trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, (u16 *)&info); - if (cq->comps[info.comp_idx].status == QUEUED) { + if (cq->comps[info.comp_idx].status == QUEUED || + test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) { hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state", dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); @@ -534,10 +553,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, req->cq = cq; req->status = -1; INIT_LIST_HEAD(&req->txps); - INIT_LIST_HEAD(&req->txcmp); - INIT_WORK(&req->worker, user_sdma_delayed_completion); - spin_lock_init(&req->txcmp_lock); memcpy(&req->info, &info, sizeof(info)); if (req_opcode(info.ctrl) == EXPECTED) @@ -606,6 +622,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, /* Save all the IO vector structures */ while (i < req->data_iovs) { + INIT_LIST_HEAD(&req->iovs[i].list); memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec)); req->iovs[i].offset = 0; req->data_len += req->iovs[i++].iov.iov_len; @@ -671,47 +688,52 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, } } - set_comp_state(req, QUEUED, 0); + set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); + atomic_inc(&pq->n_reqs); /* Send the first N packets in the request to buy us some time */ - sent = user_sdma_send_pkts(req, pcount); - if (unlikely(sent < 0)) { - if (sent != -EBUSY) { - req->status = sent; - set_comp_state(req, ERROR, req->status); - return sent; - } else - sent = 0; + ret = user_sdma_send_pkts(req, pcount); + if (unlikely(ret < 0 && ret != -EBUSY)) { + req->status = ret; + atomic_dec(&pq->n_reqs); + goto free_req; } - atomic_inc(&pq->n_reqs); - xchg(&pq->state, SDMA_PKT_Q_ACTIVE); - if (sent < req->info.npkts) { - /* - * This is a somewhat blocking send implementation. - * The driver will block the caller until all packets of the - * request have been submitted to the SDMA engine. However, it - * will not wait for send completions. - */ - while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) { - ret = user_sdma_send_pkts(req, pcount); - if (ret < 0) { - if (ret != -EBUSY) { - req->status = ret; - return ret; - } - wait_event_interruptible_timeout( - pq->busy.wait_dma, - (pq->state == SDMA_PKT_Q_ACTIVE), - msecs_to_jiffies( - SDMA_IOWAIT_TIMEOUT)); + /* + * It is possible that the SDMA engine would have processed all the + * submitted packets by the time we get here. Therefore, only set + * packet queue state to ACTIVE if there are still uncompleted + * requests. + */ + if (atomic_read(&pq->n_reqs)) + xchg(&pq->state, SDMA_PKT_Q_ACTIVE); + + /* + * This is a somewhat blocking send implementation. + * The driver will block the caller until all packets of the + * request have been submitted to the SDMA engine. However, it + * will not wait for send completions. + */ + while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) { + ret = user_sdma_send_pkts(req, pcount); + if (ret < 0) { + if (ret != -EBUSY) { + req->status = ret; + set_bit(SDMA_REQ_DONE_ERROR, &req->flags); + return ret; } + wait_event_interruptible_timeout( + pq->busy.wait_dma, + (pq->state == SDMA_PKT_Q_ACTIVE), + msecs_to_jiffies( + SDMA_IOWAIT_TIMEOUT)); } } *count += idx; return 0; free_req: - user_sdma_free_request(req); + user_sdma_free_request(req, true); + set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); return ret; } @@ -937,16 +959,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) iovec->pages[pageidx], offset, len); if (ret) { - int i; - SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret); - /* Mark all assigned vectors as complete so they - * are unpinned in the callback. */ - for (i = tx->idx; i >= 0; i--) { - tx->iovecs[i].flags |= - TXREQ_FLAGS_IOVEC_LAST_PKT; - } goto free_txreq; } iov_offset += len; @@ -1043,12 +1057,6 @@ static int pin_vector_pages(struct user_sdma_request *req, return -ENOMEM; } - /* - * Get a reference to the process's mm so we can use it when - * unpinning the io vectors. - */ - req->pq->user_mm = get_task_mm(current); - pinned = hfi1_acquire_user_pages((unsigned long)iovec->iov.iov_base, npages, 0, iovec->pages); @@ -1058,34 +1066,20 @@ static int pin_vector_pages(struct user_sdma_request *req, iovec->npages = pinned; if (pinned != npages) { SDMA_DBG(req, "Failed to pin pages (%d/%u)", pinned, npages); - unpin_vector_pages(req, iovec); + unpin_vector_pages(iovec); return -EFAULT; } + /* + * Get a reference to the process's mm so we can use it when + * unpinning the io vectors. + */ return 0; } -static void unpin_vector_pages(struct user_sdma_request *req, - struct user_sdma_iovec *iovec) +static void unpin_vector_pages(struct user_sdma_iovec *iovec) { - /* - * Unpinning is done through the workqueue so use the - * process's mm if we have a reference to it. - */ - if ((current->flags & PF_KTHREAD) && req->pq->user_mm) - use_mm(req->pq->user_mm); - hfi1_release_user_pages(iovec->pages, iovec->npages, 0); - /* - * Unuse the user's mm (see above) and release the - * reference to it. - */ - if (req->pq->user_mm) { - if (current->flags & PF_KTHREAD) - unuse_mm(req->pq->user_mm); - mmput(req->pq->user_mm); - } - kfree(iovec->pages); iovec->pages = NULL; iovec->npages = 0; @@ -1365,18 +1359,17 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, struct user_sdma_txreq *tx = container_of(txreq, struct user_sdma_txreq, txreq); struct user_sdma_request *req; - bool defer; + struct hfi1_user_sdma_pkt_q *pq; + struct hfi1_user_sdma_comp_q *cq; + u16 idx; int i; if (!tx->req) return; req = tx->req; - /* - * If this is the callback for the last packet of the request, - * queue up the request for clean up. - */ - defer = (tx->seqnum == req->info.npkts - 1); + pq = req->pq; + cq = req->cq; /* * If we have any io vectors associated with this txreq, @@ -1385,87 +1378,52 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, */ for (i = tx->idx; i >= 0; i--) { if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) { - defer = true; - break; + spin_lock(&pq->iovec_lock); + list_add_tail(&tx->iovecs[i].vec->list, + &pq->iovec_list); + spin_unlock(&pq->iovec_lock); } } - req->status = status; if (status != SDMA_TXREQ_S_OK) { SDMA_DBG(req, "SDMA completion with error %d", status); set_bit(SDMA_REQ_HAS_ERROR, &req->flags); - defer = true; } - /* - * Defer the clean up of the iovectors and the request until later - * so it can be done outside of interrupt context. - */ - if (defer) { - spin_lock(&req->txcmp_lock); - list_add_tail(&tx->list, &req->txcmp); - spin_unlock(&req->txcmp_lock); - schedule_work(&req->worker); + req->seqcomp = tx->seqnum; + kmem_cache_free(pq->txreq_cache, tx); + tx = NULL; + + idx = req->info.comp_idx; + if (req->status == -1 && status == SDMA_TXREQ_S_OK) { + if (req->seqcomp == req->info.npkts - 1) { + req->status = 0; + user_sdma_free_request(req, false); + pq_update(pq); + set_comp_state(pq, cq, idx, COMPLETE, 0); + } } else { - kmem_cache_free(req->pq->txreq_cache, tx); + if (status != SDMA_TXREQ_S_OK) + req->status = status; + if (req->seqcomp == ACCESS_ONCE(req->seqnum) && + test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) { + user_sdma_free_request(req, false); + pq_update(pq); + set_comp_state(pq, cq, idx, ERROR, req->status); + } } } -static void user_sdma_delayed_completion(struct work_struct *work) +static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) { - struct user_sdma_request *req = - container_of(work, struct user_sdma_request, worker); - struct hfi1_user_sdma_pkt_q *pq = req->pq; - struct user_sdma_txreq *tx = NULL; - unsigned long flags; - u64 seqnum; - int i; - - while (1) { - spin_lock_irqsave(&req->txcmp_lock, flags); - if (!list_empty(&req->txcmp)) { - tx = list_first_entry(&req->txcmp, - struct user_sdma_txreq, list); - list_del(&tx->list); - } - spin_unlock_irqrestore(&req->txcmp_lock, flags); - if (!tx) - break; - - for (i = tx->idx; i >= 0; i--) - if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) - unpin_vector_pages(req, tx->iovecs[i].vec); - - seqnum = tx->seqnum; - kmem_cache_free(pq->txreq_cache, tx); - tx = NULL; - - if (req->status != SDMA_TXREQ_S_OK) { - if (seqnum == ACCESS_ONCE(req->seqnum) && - test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) { - atomic_dec(&pq->n_reqs); - set_comp_state(req, ERROR, req->status); - user_sdma_free_request(req); - break; - } - } else { - if (seqnum == req->info.npkts - 1) { - atomic_dec(&pq->n_reqs); - set_comp_state(req, COMPLETE, 0); - user_sdma_free_request(req); - break; - } - } - } - - if (!atomic_read(&pq->n_reqs)) { + if (atomic_dec_and_test(&pq->n_reqs)) { xchg(&pq->state, SDMA_PKT_Q_INACTIVE); wake_up(&pq->wait); } } -static void user_sdma_free_request(struct user_sdma_request *req) +static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) { if (!list_empty(&req->txps)) { struct sdma_txreq *t, *p; @@ -1478,26 +1436,27 @@ static void user_sdma_free_request(struct user_sdma_request *req) kmem_cache_free(req->pq->txreq_cache, tx); } } - if (req->data_iovs) { + if (req->data_iovs && unpin) { int i; for (i = 0; i < req->data_iovs; i++) if (req->iovs[i].npages && req->iovs[i].pages) - unpin_vector_pages(req, &req->iovs[i]); + unpin_vector_pages(&req->iovs[i]); } kfree(req->tids); clear_bit(SDMA_REQ_IN_USE, &req->flags); } -static inline void set_comp_state(struct user_sdma_request *req, - enum hfi1_sdma_comp_state state, - int ret) +static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, + struct hfi1_user_sdma_comp_q *cq, + u16 idx, enum hfi1_sdma_comp_state state, + int ret) { - SDMA_DBG(req, "Setting completion status %u %d", state, ret); - req->cq->comps[req->info.comp_idx].status = state; + hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d", + pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret); + cq->comps[idx].status = state; if (state == ERROR) - req->cq->comps[req->info.comp_idx].errcode = -ret; - trace_hfi1_sdma_user_completion(req->pq->dd, req->pq->ctxt, - req->pq->subctxt, req->info.comp_idx, - state, ret); + cq->comps[idx].errcode = -ret; + trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, + idx, state, ret); } diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h index 0afa28508a8a..317f0e8cffb6 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/staging/rdma/hfi1/user_sdma.h @@ -69,7 +69,8 @@ struct hfi1_user_sdma_pkt_q { struct iowait busy; unsigned state; wait_queue_head_t wait; - struct mm_struct *user_mm; + struct list_head iovec_list; + spinlock_t iovec_lock; /* protect iovec_list */ }; struct hfi1_user_sdma_comp_q { -- cgit v1.2.3-59-g8ed1b From ecd42f8df2b9a0a77f2638c7780cda96de2b489b Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:35:14 -0800 Subject: staging/rdma/hfi1: correctly check for post-interrupt packets At the end of the packet processing interrupt and thread handler, the RcvAvail interrupt is finally cleared down. There is a window between the last packet check (via DMA to memory) and interrupt clear-down. The code to recheck for a packet once the RcvAVail interrupt is enabled must ultimately use a CSR read of RcvHdrTail rather than depend on DMA'ed memory. This change adds a CSR read of RcvHdrTail if the memory check does not show a packet preset. The memory check is retained as a quick test before doing the more expensive, but always correct, CSR read. In the ASIC, the CSR read used to force the RcvAvail clear-down write to complete may bypass queued DMA writes to memory. The only correct way to decide if a packet has arrived without an interrupt to push DMA to memory ahead of itself is to read the tail directly after RcvAvail has been cleared down. It is not sufficient to just read the tail and skip pushing the clear-down. Both must be done. The tail read will not push clear-down write due to it being in a different area of the chip. At this point, it is OK to have packet data still being DMA'ed to memory. This is the end of packet processing for previous packets. If the driver detects a new packet has arrived before interrputs were re-enabled, it will force a new interrupt and the interrupt will push the packet DMAs to memory, where the driver will then react to the interrupt and do normal packet processing. Reviewed-by: Mike Marciniszyn Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 13b92a3d3d8b..a67483e2ee96 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -8022,9 +8022,9 @@ static irqreturn_t sdma_interrupt(int irq, void *data) } /* - * Clear the receive interrupt, forcing the write and making sure - * we have data from the chip, pushing everything in front of it - * back to the host. + * Clear the receive interrupt. Use a read of the interrupt clear CSR + * to insure that the write completed. This does NOT guarantee that + * queued DMA writes to memory from the chip are pushed. */ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) { @@ -8043,15 +8043,33 @@ void force_recv_intr(struct hfi1_ctxtdata *rcd) write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); } -/* return non-zero if a packet is present */ +/* + * Return non-zero if a packet is present. + * + * This routine is called when rechecking for packets after the RcvAvail + * interrupt has been cleared down. First, do a quick check of memory for + * a packet present. If not found, use an expensive CSR read of the context + * tail to determine the actual tail. The CSR read is necessary because there + * is no method to push pending DMAs to memory other than an interrupt and we + * are trying to determine if we need to force an interrupt. + */ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) { + u32 tail; + int present; + if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) - return (rcd->seq_cnt == + present = (rcd->seq_cnt == rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); + else /* is RDMA rtail */ + present = (rcd->head != get_rcvhdrtail(rcd)); + + if (present) + return 1; - /* else is RDMA rtail */ - return (rcd->head != get_rcvhdrtail(rcd)); + /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ + tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); + return rcd->head != tail; } /* -- cgit v1.2.3-59-g8ed1b From c7cbf2fabbe6e7cbf4f82b6f79bc8e499761c4d2 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:35:23 -0800 Subject: staging/rdma/hfi1: Properly determine error status of SDMA slots To ensure correct operation between the driver and PSM with respect to managing the SDMA request ring, it is important that the status for a particular request slot is set at the correct time. Otherwise, PSM can get out of sync with the driver, which could lead to hangs or errors on new requests. Properly determining of when to set the error status of a SDMA slot depends on knowing exactly when the last txreq for that request has been completed. This in turn requires that the driver knows exactly how many requests have been generated and how many of those requests have been successfully submitted to the SDMA queue. The previous implementation of the mid-layer SDMA API did not provide a way for the caller of sdma_send_txlist() to know how many of the txreqs in the input list have actually been submitted without traversing the list and counting. Since sdma_send_txlist() already traverses the list in order to process it, requiring such traversal in the caller is completely unnecessary. Therefore, it is much easier to enhance sdma_send_txlist() to return the number of successfully submitted txreqs. This, in turn, allows the caller to accurately determine the progress of the SDMA request and, therefore, correctly set the error status at the right time. Reviewed-by: Mike Marciniszyn Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/sdma.c | 6 +++--- drivers/staging/rdma/hfi1/user_sdma.c | 15 +++++++++++---- 2 files changed, 14 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index ddaaaacaf038..579d82109932 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -2144,8 +2144,8 @@ nodesc: * side locking. * * Return: - * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring - * (wait == NULL) + * > 0 - Success (value is number of sdma_txreq's submitted), + * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ int sdma_send_txlist(struct sdma_engine *sde, @@ -2185,7 +2185,7 @@ update_tail: if (tail != INVALID_TAIL) sdma_update_tail(sde, tail); spin_unlock_irqrestore(&sde->tail_lock, flags); - return ret; + return ret == 0 ? count : ret; unlock_noconn: spin_lock(&sde->flushlist_lock); list_for_each_entry_safe(tx, tx_next, tx_list, list) { diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 2d238f331247..0c32eaf25afc 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -234,6 +234,7 @@ struct user_sdma_request { u32 sent; u64 seqnum; u64 seqcomp; + u64 seqsubmitted; struct list_head txps; spinlock_t txcmp_lock; /* protect txcmp list */ struct list_head txcmp; @@ -1001,18 +1002,19 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) TXREQ_FLAGS_IOVEC_LAST_PKT; } + list_add_tail(&tx->txreq.list, &req->txps); /* * It is important to increment this here as it is used to * generate the BTH.PSN and, therefore, can't be bulk-updated * outside of the loop. */ tx->seqnum = req->seqnum++; - list_add_tail(&tx->txreq.list, &req->txps); npkts++; } dosend: ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps); - if (list_empty(&req->txps)) + if (list_empty(&req->txps)) { + req->seqsubmitted = req->seqnum; if (req->seqnum == req->info.npkts) { set_bit(SDMA_REQ_SEND_DONE, &req->flags); /* @@ -1024,6 +1026,10 @@ dosend: if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) sdma_ahg_free(req->sde, req->ahg_idx); } + } else if (ret > 0) { + req->seqsubmitted += ret; + ret = 0; + } return ret; free_txreq: @@ -1406,8 +1412,9 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, } else { if (status != SDMA_TXREQ_S_OK) req->status = status; - if (req->seqcomp == ACCESS_ONCE(req->seqnum) && - test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) { + if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && + (test_bit(SDMA_REQ_SEND_DONE, &req->flags) || + test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) { user_sdma_free_request(req, false); pq_update(pq); set_comp_state(pq, cq, idx, ERROR, req->status); -- cgit v1.2.3-59-g8ed1b From f45c8dc8543783701fbad39a995e7a074a233b9d Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:35:31 -0800 Subject: staging/rdma/hfi1: Report physical state changes per device instead of globally Make physical state change reporting be per-device, not global to reduce excessive reports of "physical state changed" Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 +++--- drivers/staging/rdma/hfi1/hfi.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index a67483e2ee96..5b8fb021e0ae 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12201,18 +12201,17 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd) { - static u32 remembered_state = 0xff; u32 pstate; u32 ib_pstate; pstate = read_physical_state(ppd->dd); ib_pstate = chip_to_opa_pstate(ppd->dd, pstate); - if (remembered_state != ib_pstate) { + if (ppd->last_pstate != ib_pstate) { dd_dev_info(ppd->dd, "%s: physical state changed to %s (0x%x), phy 0x%x\n", __func__, opa_pstate_name(ib_pstate), ib_pstate, pstate); - remembered_state = ib_pstate; + ppd->last_pstate = ib_pstate; } return ib_pstate; } @@ -14019,6 +14018,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, /* start in offline */ ppd->host_link_state = HLS_DN_OFFLINE; init_vl_arb_caches(ppd); + ppd->last_pstate = 0xff; /* invalid value */ } dd->link_default = HLS_DN_POLL; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index f3c1e6722dd4..da429915e5db 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -663,6 +663,7 @@ struct hfi1_pportdata { u8 link_enabled; /* link enabled? */ u8 linkinit_reason; u8 local_tx_rate; /* rate given to 8051 firmware */ + u8 last_pstate; /* info only */ /* placeholders for IB MAD packet settings */ u8 overrun_threshold; -- cgit v1.2.3-59-g8ed1b From 53f449e4bf04ac5dce6385a1546ab6108666def2 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:35:40 -0800 Subject: staging/rdma/hfi1: Fix fabric serdes reset by re-downloading firmware A host fabric serdes reset is required to go back to polling. However, access to the fabric serdes may have been invalidated by the sibling HFI when it downloads its fabric serdes firmware. Work around this by re-downloading and re-validating the serdes firmware at reset time on Bx hardware. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/firmware.c | 57 ++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 4ba524b82edd..0b23e3eaf574 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -233,6 +233,8 @@ static const u8 all_pcie_serdes_broadcast = 0xe0; /* forwards */ static void dispose_one_firmware(struct firmware_details *fdet); +static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, + struct firmware_details *fdet); /* * Read a single 64-bit value from 8051 data memory. @@ -1092,27 +1094,56 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags) } /* - * Reset all of the fabric serdes for our HFI. + * Reset all of the fabric serdes for this HFI in preparation to take the + * link to Polling. + * + * To do a reset, we need to write to to the serdes registers. Unfortunately, + * the fabric serdes download to the other HFI on the ASIC will have turned + * off the firmware validation on this HFI. This means we can't write to the + * registers to reset the serdes. Work around this by performing a complete + * re-download and validation of the fabric serdes firmware. This, as a + * by-product, will reset the serdes. NOTE: the re-download requires that + * the 8051 be in the Offline state. I.e. not actively trying to use the + * serdes. This routine is called at the point where the link is Offline and + * is getting ready to go to Polling. */ void fabric_serdes_reset(struct hfi1_devdata *dd) { - u8 ra; - - if (dd->icode != ICODE_RTL_SILICON) /* only for RTL */ + if (!fw_fabric_serdes_load) return; - ra = fabric_serdes_broadcast[dd->hfi1_id]; + if (is_ax(dd)) { + /* A0 serdes do not work with a re-download */ + u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; + + acquire_hw_mutex(dd); + set_sbus_fast_mode(dd); + /* place SerDes in reset and disable SPICO */ + sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); + /* wait 100 refclk cycles @ 156.25MHz => 640ns */ + udelay(1); + /* remove SerDes reset */ + sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); + /* turn SPICO enable on */ + sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); + clear_sbus_fast_mode(dd); + release_hw_mutex(dd); + return; + } acquire_hw_mutex(dd); set_sbus_fast_mode(dd); - /* place SerDes in reset and disable SPICO */ - sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); - /* wait 100 refclk cycles @ 156.25MHz => 640ns */ - udelay(1); - /* remove SerDes reset */ - sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); - /* turn SPICO enable on */ - sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); + + turn_off_spicos(dd, SPICO_FABRIC); + /* + * No need for firmware retry - what to download has already been + * decided. + * No need to pay attention to the load return - the only failure + * is a validation failure, which has already been checked by the + * initial download. + */ + (void)load_fabric_serdes_firmware(dd, &fw_fabric); + clear_sbus_fast_mode(dd); release_hw_mutex(dd); } -- cgit v1.2.3-59-g8ed1b From 7b0b01aa8f48cd237322cbffa05662a9c6b156f8 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:35:49 -0800 Subject: staging/rdma/hfi1: Split last 8 bytes of copy to user buffer Copy the last 8 bytes of user mode RC WRITE_ONLY and WRITE_LAST opcodes separately from the rest of the data. It is a de-facto standard for some MPI implementations to use a poll on the last few bytes of a verbs message to indicate that the message has been received rather than follow the required function method. The driver uses the kernel memcpy routine, which becomes "rep movsb" on modern machines. This copy, while very fast, does not guarantee in-order copy completion and the result is an occasional perceived corrupted packet. Avoid the issue by splitting the last 8 bytes to copy from the verbs opcodes where it matters and performing an in-order byte copy. Reviewed-by: Mike Marciniszyn Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 17 +++++++++++------ drivers/staging/rdma/hfi1/ruc.c | 8 ++++++-- drivers/staging/rdma/hfi1/uc.c | 10 +++++----- drivers/staging/rdma/hfi1/ud.c | 9 +++++---- drivers/staging/rdma/hfi1/verbs.c | 31 +++++++++++++++++++++++++++++-- drivers/staging/rdma/hfi1/verbs.h | 2 +- 6 files changed, 57 insertions(+), 20 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 50559fd14a70..371edc3dd4f6 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1539,7 +1539,7 @@ read_middle: qp->s_rdma_read_len -= pmtu; update_last_psn(qp, psn); spin_unlock_irqrestore(&qp->s_lock, flags); - hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); + hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0); goto bail; case OP(RDMA_READ_RESPONSE_ONLY): @@ -1583,7 +1583,7 @@ read_last: if (unlikely(tlen != qp->s_rdma_read_len)) goto ack_len_err; aeth = be32_to_cpu(ohdr->u.aeth); - hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); + hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0); WARN_ON(qp->s_rdma_read_sge.num_sge); (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST), 0, rcd); @@ -1977,6 +1977,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) unsigned long flags; u32 bth1; int ret, is_fecn = 0; + int copy_last = 0; bth0 = be32_to_cpu(ohdr->bth[0]); if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0)) @@ -2081,7 +2082,7 @@ send_middle: qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto nack_inv; - hfi1_copy_sge(&qp->r_sge, data, pmtu, 1); + hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): @@ -2109,8 +2110,10 @@ send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; - case OP(SEND_LAST): case OP(RDMA_WRITE_LAST): + copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user; + /* fall through */ + case OP(SEND_LAST): no_immediate_data: wc.wc_flags = 0; wc.ex.imm_data = 0; @@ -2126,7 +2129,7 @@ send_last: wc.byte_len = tlen + qp->r_rcv_len; if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; - hfi1_copy_sge(&qp->r_sge, data, tlen, 1); + hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last); rvt_put_ss(&qp->r_sge); qp->r_msn++; if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) @@ -2163,8 +2166,10 @@ send_last: (bth0 & IB_BTH_SOLICITED) != 0); break; - case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): + copy_last = 1; + /* fall through */ + case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto nack_inv; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index f09badbfa51c..6aeea6c4b236 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -370,6 +370,7 @@ static void ruc_loopback(struct rvt_qp *sqp) enum ib_wc_status send_status; int release; int ret; + int copy_last = 0; rcu_read_lock(); @@ -459,10 +460,13 @@ again: goto op_err; if (!ret) goto rnr_nak; - /* FALLTHROUGH */ + /* skip copy_last set and qp_access_flags recheck */ + goto do_write; case IB_WR_RDMA_WRITE: + copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto inv_err; +do_write: if (wqe->length == 0) if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, @@ -526,7 +530,7 @@ again: if (len > sge->sge_length) len = sge->sge_length; WARN_ON_ONCE(len == 0); - hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release); + hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 1e50d303c024..0aa604b7557b 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -418,7 +418,7 @@ send_first: qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto rewind; - hfi1_copy_sge(&qp->r_sge, data, pmtu, 0); + hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0); break; case OP(SEND_LAST_WITH_IMMEDIATE): @@ -443,7 +443,7 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto rewind; wc.opcode = IB_WC_RECV; - hfi1_copy_sge(&qp->r_sge, data, tlen, 0); + hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0); rvt_put_ss(&qp->s_rdma_read_sge); last_imm: wc.wr_id = qp->r_wr_id; @@ -518,7 +518,7 @@ rdma_first: qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto drop; - hfi1_copy_sge(&qp->r_sge, data, pmtu, 1); + hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): @@ -547,7 +547,7 @@ rdma_last_imm: } wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; - hfi1_copy_sge(&qp->r_sge, data, tlen, 1); + hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0); rvt_put_ss(&qp->r_sge); goto last_imm; @@ -563,7 +563,7 @@ rdma_last: tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; - hfi1_copy_sge(&qp->r_sge, data, tlen, 1); + hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0); rvt_put_ss(&qp->r_sge); break; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 2eae16769688..fdf6e3bee8f1 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -187,7 +187,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) if (ah_attr->ah_flags & IB_AH_GRH) { hfi1_copy_sge(&qp->r_sge, &ah_attr->grh, - sizeof(struct ib_grh), 1); + sizeof(struct ib_grh), 1, 0); wc.wc_flags |= IB_WC_GRH; } else hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); @@ -203,7 +203,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) if (len > sge->sge_length) len = sge->sge_length; WARN_ON_ONCE(len == 0); - hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1); + hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1, 0); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; @@ -836,11 +836,12 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } if (has_grh) { hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh, - sizeof(struct ib_grh), 1); + sizeof(struct ib_grh), 1, 0); wc.wc_flags |= IB_WC_GRH; } else hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); - hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); + hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), + 1, 0); rvt_put_ss(&qp->r_sge); if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) return; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index d617324e3c48..8f351bc157df 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -242,14 +242,28 @@ __be64 ib_hfi1_sys_image_guid; * @ss: the SGE state * @data: the data to copy * @length: the length of the data + * @copy_last: do a separate copy of the last 8 bytes */ void hfi1_copy_sge( struct rvt_sge_state *ss, void *data, u32 length, - int release) + int release, + int copy_last) { struct rvt_sge *sge = &ss->sge; + int in_last = 0; + int i; + + if (copy_last) { + if (length > 8) { + length -= 8; + } else { + copy_last = 0; + in_last = 1; + } + } +again: while (length) { u32 len = sge->length; @@ -258,7 +272,13 @@ void hfi1_copy_sge( if (len > sge->sge_length) len = sge->sge_length; WARN_ON_ONCE(len == 0); - memcpy(sge->vaddr, data, len); + if (in_last) { + /* enforce byte transer ordering */ + for (i = 0; i < len; i++) + ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; + } else { + memcpy(sge->vaddr, data, len); + } sge->vaddr += len; sge->length -= len; sge->sge_length -= len; @@ -281,6 +301,13 @@ void hfi1_copy_sge( data += len; length -= len; } + + if (copy_last) { + copy_last = 0; + in_last = 1; + length = 8; + goto again; + } } /** diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index ac84dd70c6c7..afb2d7fd6ae6 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -398,7 +398,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx); int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps); void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, - int release); + int release, int copy_last); void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release); -- cgit v1.2.3-59-g8ed1b From 91ab4ed334d0ea2f6c720ecb6204c3de350aaa08 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:35:57 -0800 Subject: staging/rdma/hfi1: Implement LED beaconing for maintenance This patch implements LED beaconing for maintenance. A MAD packet with the LEDInfo attribute set to 1 will enable LED beaconing with a duty cycle of 2s on and 1.5s off. A MAD packet with the LEDInfo attribute set to 0 will disable beaconing and return the LED to normal operation. Reviewed-by: Dennis Dalessandro Signed-off-by: Easwar Hariharan Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 81 ++++++++++++++++++-------------------- drivers/staging/rdma/hfi1/hfi.h | 29 ++++++++------ drivers/staging/rdma/hfi1/init.c | 6 ++- drivers/staging/rdma/hfi1/mad.c | 9 +++-- 4 files changed, 64 insertions(+), 61 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 59ce85f8d155..5d012feaa4d4 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -1172,63 +1172,64 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) return 0; } -/* - * Following deal with the "obviously simple" task of overriding the state - * of the LEDs, which normally indicate link physical and logical status. - * The complications arise in dealing with different hardware mappings - * and the board-dependent routine being called from interrupts. - * and then there's the requirement to _flash_ them. - */ -#define LED_OVER_FREQ_SHIFT 8 -#define LED_OVER_FREQ_MASK (0xFF<dd; + + if (atomic_read(&ppd->led_override_timer_active)) { + del_timer_sync(&ppd->led_override_timer); + atomic_set(&ppd->led_override_timer_active, 0); + } + + /* Shut off LEDs after we are sure timer is not running */ + setextled(dd, 0); +} static void run_led_override(unsigned long opaque) { struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque; struct hfi1_devdata *dd = ppd->dd; - int timeoff; - int ph_idx; + unsigned long timeout; + int phase_idx; if (!(dd->flags & HFI1_INITTED)) return; - ph_idx = ppd->led_override_phase++ & 1; - ppd->led_override = ppd->led_override_vals[ph_idx]; - timeoff = ppd->led_override_timeoff; + phase_idx = ppd->led_override_phase & 1; + setextled(dd, phase_idx); + + timeout = ppd->led_override_vals[phase_idx]; + /* Set up for next phase */ + ppd->led_override_phase = !ppd->led_override_phase; /* * don't re-fire the timer if user asked for it to be off; we let * it fire one more time after they turn it off to simplify */ if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) - mod_timer(&ppd->led_override_timer, jiffies + timeoff); + mod_timer(&ppd->led_override_timer, jiffies + timeout); + else + /* Hand control of the LED to the DC for normal operation */ + write_csr(dd, DCC_CFG_LED_CNTRL, 0); } -void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val) +/* + * To have the LED blink in a particular pattern, provide timeon and timeoff + * in milliseconds. To turn off custom blinking and return to normal operation, + * provide timeon = timeoff = 0. + */ +void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, + unsigned int timeoff) { struct hfi1_devdata *dd = ppd->dd; - int timeoff, freq; if (!(dd->flags & HFI1_INITTED)) return; - /* First check if we are blinking. If not, use 1HZ polling */ - timeoff = HZ; - freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; - - if (freq) { - /* For blink, set each phase from one nybble of val */ - ppd->led_override_vals[0] = val & 0xF; - ppd->led_override_vals[1] = (val >> 4) & 0xF; - timeoff = (HZ << 4)/freq; - } else { - /* Non-blink set both phases the same. */ - ppd->led_override_vals[0] = val & 0xF; - ppd->led_override_vals[1] = val & 0xF; - } - ppd->led_override_timeoff = timeoff; + /* Convert to jiffies for direct use in timer */ + ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); + ppd->led_override_vals[1] = msecs_to_jiffies(timeon); + ppd->led_override_phase = 1; /* Arbitrarily start from LED on phase */ /* * If the timer has not already been started, do so. Use a "quick" @@ -1293,14 +1294,8 @@ int hfi1_reset_device(int unit) for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; - if (atomic_read(&ppd->led_override_timer_active)) { - /* Need to stop LED timer, _then_ shut off LEDs */ - del_timer_sync(&ppd->led_override_timer); - atomic_set(&ppd->led_override_timer_active, 0); - } - /* Shut off LEDs after we are sure timer is not running */ - ppd->led_override = LED_OVER_BOTH_OFF; + shutdown_led_override(ppd); } if (dd->flags & HFI1_HAS_SEND_DMA) sdma_exit(dd); diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index da429915e5db..18508c9423a9 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -7,7 +7,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -669,14 +669,17 @@ struct hfi1_pportdata { u8 overrun_threshold; u8 phy_error_threshold; - /* used to override LED behavior */ - u8 led_override; /* Substituted for normal value, if non-zero */ - u16 led_override_timeoff; /* delta to next timer event */ - u8 led_override_vals[2]; /* Alternates per blink-frame */ - u8 led_override_phase; /* Just counts, LSB picks from vals[] */ + /* Used to override LED behavior for things like maintenance beaconing*/ + /* + * Alternates per phase of blink + * [0] holds LED off duration, [1] holds LED on duration + */ + unsigned long led_override_vals[2]; + u8 led_override_phase; /* LSB picks from vals[] */ atomic_t led_override_timer_active; /* Used to flash LEDs in override mode */ struct timer_list led_override_timer; + u32 sm_trap_qp; u32 sa_qp; @@ -1599,14 +1602,14 @@ void hfi1_free_devdata(struct hfi1_devdata *); void cc_state_reclaim(struct rcu_head *rcu); struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); +void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, + unsigned int timeoff); /* - * Set LED override, only the two LSBs have "public" meaning, but - * any non-zero value substitutes them for the Link and LinkTrain - * LED states. + * Only to be used for driver unload or device reset where we cannot allow + * the timer to fire even the one extra time, else use hfi1_set_led_override + * with timeon = timeoff = 0 */ -#define HFI1_LED_PHYS 1 /* Physical (linktraining) GREEN LED */ -#define HFI1_LED_LOG 2 /* Logical (link) YELLOW LED */ -void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val); +void shutdown_led_override(struct hfi1_pportdata *ppd); #define HFI1_CREDIT_RETURN_RATE (100) diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index eec91305516a..fe5e1e57307b 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -908,6 +908,8 @@ static void shutdown_device(struct hfi1_devdata *dd) /* disable the send device */ pio_send_control(dd, PSC_GLOBAL_DISABLE); + shutdown_led_override(ppd); + /* * Clear SerdesEnable. * We can't count on interrupts since we are stopping. diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 5146f5df7a10..6976f93bd36f 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -3449,7 +3449,10 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - setextled(dd, on); + if (on) + hfi1_set_led_override(dd->pport, 2000, 1500); + else + hfi1_set_led_override(dd->pport, 0, 0); return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len); } -- cgit v1.2.3-59-g8ed1b From 0096765be01926e7efcc22032032347448743de5 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:36:06 -0800 Subject: staging/rdma/hfi1: Remove PCIe AER diagnostic message There are several reasons why PCIE AER cannot be enabled. Do not report the failure to enable as an error. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/pcie.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 6605a6afbb1d..3d0c2e21d3e1 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -132,13 +132,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - ret = pci_enable_pcie_error_reporting(pdev); - if (ret) { - hfi1_early_err(&pdev->dev, - "Unable to enable pcie error reporting: %d\n", - ret); - ret = 0; - } + (void)pci_enable_pcie_error_reporting(pdev); goto done; bail: -- cgit v1.2.3-59-g8ed1b From cfe3e656d8cd5ff03b8f0ce24f920f306313b013 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:36:14 -0800 Subject: staging/rdma/hfi1: Correct TWSI reset Change the TWSI reset function so it will stop the reset once the lines are in an expected state. Reviewed-by: Easwar Hariharan Reviewed-by: Dean Luick Signed-off-by: Pablo Cacho Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qsfp.c | 10 +++---- drivers/staging/rdma/hfi1/twsi.c | 64 ++++++++++++++++++---------------------- drivers/staging/rdma/hfi1/twsi.h | 7 +++-- 3 files changed, 36 insertions(+), 45 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 0e1a49294d99..c9d1e64ef681 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -106,7 +106,6 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, "I2C write interface reset failed\n"); - ret = -EIO; goto done; } @@ -179,7 +178,6 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, "I2C read interface reset failed\n"); - ret = -EIO; goto done; } @@ -213,7 +211,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP write interface reset failed\n"); mutex_unlock(&ppd->dd->qsfp_i2c_mutex); - return -EIO; + return ret; } while (count < len) { @@ -279,7 +277,7 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP read interface reset failed\n"); mutex_unlock(&ppd->dd->qsfp_i2c_mutex); - return -EIO; + return ret; } while (count < len) { diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c index 7c579b343844..d7dfdd231669 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/staging/rdma/hfi1/twsi.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -136,6 +136,19 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit) i2c_wait_for_writes(dd, target); } +static u8 scl_in(struct hfi1_devdata *dd, u32 target, int wait) +{ + u32 read_val, mask; + + mask = QSFP_HFI0_I2CCLK; + /* SCL is meant to be bare-drain, so never set "OUT", just DIR */ + hfi1_gpio_mod(dd, target, 0, 0, mask); + read_val = hfi1_gpio_mod(dd, target, 0, 0, 0); + if (wait) + i2c_wait_for_writes(dd, target); + return (read_val & mask) >> GPIO_SCL_NUM; +} + static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit) { u32 mask; @@ -274,13 +287,12 @@ static void stop_cmd(struct hfi1_devdata *dd, u32 target) /** * hfi1_twsi_reset - reset I2C communication * @dd: the hfi1_ib device + * returns 0 if ok, -EIO on error */ - int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target) { int clock_cycles_left = 9; - int was_high = 0; - u32 pins, mask; + u32 mask; /* Both SCL and SDA should be high. If not, there * is something wrong. @@ -294,43 +306,23 @@ int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target) */ hfi1_gpio_mod(dd, target, 0, 0, mask); - /* - * Clock nine times to get all listeners into a sane state. - * If SDA does not go high at any point, we are wedged. - * One vendor recommends then issuing START followed by STOP. - * we cannot use our "normal" functions to do that, because - * if SCL drops between them, another vendor's part will - * wedge, dropping SDA and keeping it low forever, at the end of - * the next transaction (even if it was not the device addressed). - * So our START and STOP take place with SCL held high. + /* Check if SCL is low, if it is low then we have a slave device + * misbehaving and there is not much we can do. + */ + if (!scl_in(dd, target, 0)) + return -EIO; + + /* Check if SDA is low, if it is low then we have to clock SDA + * up to 9 times for the device to release the bus */ while (clock_cycles_left--) { + if (sda_in(dd, target, 0)) + return 0; scl_out(dd, target, 0); scl_out(dd, target, 1); - /* Note if SDA is high, but keep clocking to sync slave */ - was_high |= sda_in(dd, target, 0); - } - - if (was_high) { - /* - * We saw a high, which we hope means the slave is sync'd. - * Issue START, STOP, pause for T_BUF. - */ - - pins = hfi1_gpio_mod(dd, target, 0, 0, 0); - if ((pins & mask) != mask) - dd_dev_err(dd, "GPIO pins not at rest: %d\n", - pins & mask); - /* Drop SDA to issue START */ - udelay(1); /* Guarantee .6 uSec setup */ - sda_out(dd, target, 0); - udelay(1); /* Guarantee .6 uSec hold */ - /* At this point, SCL is high, SDA low. Raise SDA for STOP */ - sda_out(dd, target, 1); - udelay(TWSI_BUF_WAIT_USEC); } - return !was_high; + return -EIO; } #define HFI1_TWSI_START 0x100 diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h index 5907e029613d..6cb30e59b00f 100644 --- a/drivers/staging/rdma/hfi1/twsi.h +++ b/drivers/staging/rdma/hfi1/twsi.h @@ -7,7 +7,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -54,8 +54,9 @@ struct hfi1_devdata; -/* Bit position of SDA pin in ASIC_QSFP* registers */ +/* Bit position of SDA/SCL pins in ASIC_QSFP* registers */ #define GPIO_SDA_NUM 1 +#define GPIO_SCL_NUM 0 /* these functions must be called with qsfp_lock held */ int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target); -- cgit v1.2.3-59-g8ed1b From 2ef907b80d1cc289a4352287bbb9fc5a19eed212 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:36:22 -0800 Subject: staging/rdma/hfi1: Fix snoop packet length calculation The LRH has a 12 bit packet length field, not 11 bit. This caused a snoop packet length miscalculation leading to a crash when sending a large ping over IPoIB while running opapacketcapture. Reviewed-by: Mike Marciniszyn Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/diag.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index d9889d430698..fafb3d7f8367 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -85,10 +85,9 @@ static u8 snoop_flags; /* * Extract packet length from LRH header. - * Why & 0x7FF? Because len is only 11 bits in case it wasn't 0'd we throw the - * bogus bits away. This is in Dwords so multiply by 4 to get size in bytes + * This is in Dwords so multiply by 4 to get size in bytes */ -#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0x7FF)) << 2) +#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2) enum hfi1_filter_status { HFI1_FILTER_HIT, -- cgit v1.2.3-59-g8ed1b From d05de3413da29d635ccaff528af6e37dd932b393 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Wed, 3 Feb 2016 14:36:31 -0800 Subject: staging/rdma/hfi1: Clean up init_cntrs() Clean up init_cntrs() by removing unnecessary memsets and debug statements Suggested-by: Dan Carpenter Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 5b8fb021e0ae..8c06e3b0185a 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -11813,17 +11813,14 @@ static int init_cntrs(struct hfi1_devdata *dd) sz = 0; for (i = 0; i < DEV_CNTR_LAST; i++) { - hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name); if (dev_cntrs[i].flags & CNTR_DISABLED) { hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name); continue; } if (dev_cntrs[i].flags & CNTR_VL) { - hfi1_dbg_early("\tProcessing VL cntr\n"); dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < C_VL_COUNT; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, vl_from_idx(j)); @@ -11832,16 +11829,11 @@ static int init_cntrs(struct hfi1_devdata *dd) if (dev_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; - hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; } } else if (dev_cntrs[i].flags & CNTR_SDMA) { - hfi1_dbg_early( - "\tProcessing per SDE counters chip enginers %u\n", - dd->chip_sdma_engines); dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < dd->chip_sdma_engines; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, j); sz += strlen(name); @@ -11849,7 +11841,6 @@ static int init_cntrs(struct hfi1_devdata *dd) if (dev_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; - hfi1_dbg_early("\t\t%s\n", name); dd->ndevcntrs++; } } else { @@ -11860,7 +11851,6 @@ static int init_cntrs(struct hfi1_devdata *dd) sz += bit_type_32_sz; dev_cntrs[i].offset = dd->ndevcntrs; dd->ndevcntrs++; - hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name); } } @@ -11886,7 +11876,6 @@ static int init_cntrs(struct hfi1_devdata *dd) /* Nothing */ } else if (dev_cntrs[i].flags & CNTR_VL) { for (j = 0; j < C_VL_COUNT; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, vl_from_idx(j)); @@ -11903,7 +11892,6 @@ static int init_cntrs(struct hfi1_devdata *dd) } } else if (dev_cntrs[i].flags & CNTR_SDMA) { for (j = 0; j < dd->chip_sdma_engines; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", dev_cntrs[i].name, j); memcpy(p, name, strlen(name)); @@ -11950,17 +11938,14 @@ static int init_cntrs(struct hfi1_devdata *dd) sz = 0; dd->nportcntrs = 0; for (i = 0; i < PORT_CNTR_LAST; i++) { - hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name); if (port_cntrs[i].flags & CNTR_DISABLED) { hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name); continue; } if (port_cntrs[i].flags & CNTR_VL) { - hfi1_dbg_early("\tProcessing VL cntr\n"); port_cntrs[i].offset = dd->nportcntrs; for (j = 0; j < C_VL_COUNT; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", port_cntrs[i].name, vl_from_idx(j)); @@ -11969,7 +11954,6 @@ static int init_cntrs(struct hfi1_devdata *dd) if (port_cntrs[i].flags & CNTR_32BIT) sz += bit_type_32_sz; sz++; - hfi1_dbg_early("\t\t%s\n", name); dd->nportcntrs++; } } else { @@ -11980,7 +11964,6 @@ static int init_cntrs(struct hfi1_devdata *dd) sz += bit_type_32_sz; port_cntrs[i].offset = dd->nportcntrs; dd->nportcntrs++; - hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name); } } @@ -11997,7 +11980,6 @@ static int init_cntrs(struct hfi1_devdata *dd) if (port_cntrs[i].flags & CNTR_VL) { for (j = 0; j < C_VL_COUNT; j++) { - memset(name, '\0', C_MAX_NAME); snprintf(name, C_MAX_NAME, "%s%d", port_cntrs[i].name, vl_from_idx(j)); -- cgit v1.2.3-59-g8ed1b From 251314635ad5043e9438a18b2de17ddf86309641 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Wed, 3 Feb 2016 14:36:40 -0800 Subject: staging/rdma/hfi1: Support query gid in rdmavt Query gid is in rdmavt, but still relies on the driver to maintain the guid table. Add the necessary driver call back and remove the existing verb handler. Reviewed-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 8f351bc157df..a85fd81505c7 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1345,28 +1345,20 @@ static int modify_port(struct ib_device *ibdev, u8 port, return ret; } -static int query_gid(struct ib_device *ibdev, u8 port, - int index, union ib_gid *gid) +static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, + int guid_index, __be64 *guid) { - struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - int ret = 0; + struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - if (!port || port > dd->num_pports) - ret = -EINVAL; - else { - struct hfi1_ibport *ibp = to_iport(ibdev, port); - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - - gid->global.subnet_prefix = ibp->rvp.gid_prefix; - if (index == 0) - gid->global.interface_id = cpu_to_be64(ppd->guid); - else if (index < HFI1_GUIDS_PER_PORT) - gid->global.interface_id = ibp->guids[index - 1]; - else - ret = -EINVAL; - } + if (guid_index == 0) + *guid = cpu_to_be64(ppd->guid); + else if (guid_index < HFI1_GUIDS_PER_PORT) + *guid = ibp->guids[guid_index - 1]; + else + return -EINVAL; - return ret; + return 0; } /* @@ -1538,7 +1530,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->modify_device = modify_device; ibdev->query_port = query_port; ibdev->modify_port = modify_port; - ibdev->query_gid = query_gid; /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; @@ -1555,6 +1546,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev; dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; + dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; /* * Fill in rvt info device attributes. */ -- cgit v1.2.3-59-g8ed1b From 45b59eefcca95a3dc75b68e063390f7a1aedd90b Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Wed, 3 Feb 2016 14:36:49 -0800 Subject: staging/rdma/hfi1: Remove modify_port and port_immutable functions Delete code from query_port which has been moved into rvt_query_port Create a call back function to shut down a port which may be called from rvt_modify_port Signed-off-by: Harish Chegondi Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 5 +++ drivers/staging/rdma/hfi1/mad.c | 5 ++- drivers/staging/rdma/hfi1/verbs.c | 71 ++++++++++----------------------------- drivers/staging/rdma/hfi1/verbs.h | 2 +- 4 files changed, 28 insertions(+), 55 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 18508c9423a9..363e6ef3dafd 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1536,6 +1536,11 @@ static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp) return container_of(ibp, struct hfi1_pportdata, ibport_data); } +static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi) +{ + return container_of(rdi, struct hfi1_ibdev, rdi); +} + static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 6976f93bd36f..3df1c8eeb4f0 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -234,9 +234,12 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad, /* * Send a Port Capability Mask Changed trap (ch. 14.3.11). */ -void hfi1_cap_mask_chg(struct hfi1_ibport *ibp) +void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) { struct opa_mad_notice_attr data; + struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); + struct hfi1_devdata *dd = dd_from_dev(verbs_dev); + struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data; u32 lid = ppd_from_ibp(ibp)->lid; memset(&data, 0, sizeof(data)); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index a85fd81505c7..0ee6b1debd05 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1220,33 +1220,24 @@ static inline u16 opa_width_to_ib(u16 in) } } -static int query_port(struct ib_device *ibdev, u8 port, +static int query_port(struct rvt_dev_info *rdi, u8 port_num, struct ib_port_attr *props) { - struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - struct hfi1_ibport *ibp = to_iport(ibdev, port); - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); + struct hfi1_devdata *dd = dd_from_dev(verbs_dev); + struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; u16 lid = ppd->lid; - memset(props, 0, sizeof(*props)); props->lid = lid ? lid : 0; props->lmc = ppd->lmc; - props->sm_lid = ibp->rvp.sm_lid; - props->sm_sl = ibp->rvp.sm_sl; /* OPA logical states match IB logical states */ props->state = driver_lstate(ppd); props->phys_state = hfi1_ibphys_portstate(ppd); - props->port_cap_flags = ibp->rvp.port_cap_flags; props->gid_tbl_len = HFI1_GUIDS_PER_PORT; - props->max_msg_sz = 0x80000000; - props->pkey_tbl_len = hfi1_get_npkeys(dd); - props->bad_pkey_cntr = ibp->rvp.pkey_violations; - props->qkey_viol_cntr = ibp->rvp.qkey_violations; props->active_width = (u8)opa_width_to_ib(ppd->link_width_active); /* see rate_show() in ib core/sysfs.c */ props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active); props->max_vl_num = ppd->vls_supported; - props->init_type_reply = 0; /* Once we are a "first class" citizen and have added the OPA MTUs to * the core we can advertise the larger MTU enum to the ULPs, for now @@ -1260,27 +1251,6 @@ static int query_port(struct ib_device *ibdev, u8 port, 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_2048); - props->subnet_timeout = ibp->rvp.subnet_timeout; - - return 0; -} - -static int port_immutable(struct ib_device *ibdev, u8 port_num, - struct ib_port_immutable *immutable) -{ - struct ib_port_attr attr; - int err; - - err = query_port(ibdev, port_num, &attr); - if (err) - return err; - - memset(immutable, 0, sizeof(*immutable)); - - immutable->pkey_tbl_len = attr.pkey_tbl_len; - immutable->gid_tbl_len = attr.gid_tbl_len; - immutable->core_cap_flags = RDMA_CORE_PORT_INTEL_OPA; - immutable->max_mad_size = OPA_MGMT_MAD_SIZE; return 0; } @@ -1324,24 +1294,16 @@ bail: return ret; } -static int modify_port(struct ib_device *ibdev, u8 port, - int port_modify_mask, struct ib_port_modify *props) +static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num) { - struct hfi1_ibport *ibp = to_iport(ibdev, port); - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - int ret = 0; + struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi); + struct hfi1_devdata *dd = dd_from_dev(verbs_dev); + struct hfi1_pportdata *ppd = &dd->pport[port_num - 1]; + int ret; - ibp->rvp.port_cap_flags |= props->set_port_cap_mask; - ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask; - if (props->set_port_cap_mask || props->clr_port_cap_mask) - hfi1_cap_mask_chg(ibp); - if (port_modify_mask & IB_PORT_SHUTDOWN) { - set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, - OPA_LINKDOWN_REASON_UNKNOWN); - ret = set_link_state(ppd, HLS_DN_DOWNDEF); - } - if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) - ibp->rvp.qkey_violations = 0; + set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0, + OPA_LINKDOWN_REASON_UNKNOWN); + ret = set_link_state(ppd, HLS_DN_DOWNDEF); return ret; } @@ -1528,12 +1490,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ibdev->phys_port_cnt = dd->num_pports; ibdev->dma_device = &dd->pcidev->dev; ibdev->modify_device = modify_device; - ibdev->query_port = query_port; - ibdev->modify_port = modify_port; /* keep process mad in the driver */ ibdev->process_mad = hfi1_process_mad; - ibdev->get_port_immutable = port_immutable; strncpy(ibdev->node_desc, init_utsname()->nodename, sizeof(ibdev->node_desc)); @@ -1547,6 +1506,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah; dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah; dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be; + dd->verbs_dev.rdi.driver_f.query_port_state = query_port; + dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port; + dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg; /* * Fill in rvt info device attributes. */ @@ -1564,6 +1526,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK; dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT; dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK; + dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA; + dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE; + dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc; dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index afb2d7fd6ae6..a157e6458ab0 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -327,7 +327,7 @@ static inline int hfi1_send_ok(struct rvt_qp *qp) */ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl, u32 qp1, u32 qp2, u16 lid1, u16 lid2); -void hfi1_cap_mask_chg(struct hfi1_ibport *ibp); +void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); void hfi1_sys_guid_chg(struct hfi1_ibport *ibp); void hfi1_node_desc_chg(struct hfi1_ibport *ibp); int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, -- cgit v1.2.3-59-g8ed1b From e1bf0d5ecdc49cd4e2014da0d60efa74f5714fba Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Wed, 3 Feb 2016 14:36:58 -0800 Subject: staging/rdma/hfi1, IB/core: Fix LinkDownReason define for consistency LinkDownReason LocalMediaNotInstalled lacked an underscore and was inconsistent with other defines in the same family. This patch fixes this. Reviewed-by: Ira Weiny Signed-off-by: Easwar Hariharan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 4 ++-- drivers/staging/rdma/hfi1/platform.c | 2 +- include/rdma/opa_port_info.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 8c06e3b0185a..f31cc238d6db 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -5950,12 +5950,12 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) if ((ppd->offline_disabled_reason > HFI1_ODR_MASK( - OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) || + OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) ppd->offline_disabled_reason = HFI1_ODR_MASK( - OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED); + OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); if (ppd->host_link_state == HLS_DN_POLL) { /* diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index c3df1d892754..506a82766b33 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -816,7 +816,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) } else ppd->offline_disabled_reason = HFI1_ODR_MASK( - OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED); + OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); break; default: dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__); diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index a0fa975cd1c1..2b95c2c336eb 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -97,7 +97,7 @@ #define OPA_LINKDOWN_REASON_WIDTH_POLICY 41 /* 42-48 reserved */ #define OPA_LINKDOWN_REASON_DISCONNECTED 49 -#define OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED 50 +#define OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED 50 #define OPA_LINKDOWN_REASON_NOT_INSTALLED 51 #define OPA_LINKDOWN_REASON_CHASSIS_CONFIG 52 /* 53 reserved */ -- cgit v1.2.3-59-g8ed1b From 0840aea98cdf9024aff7f69e1167c4648665d48b Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:37:06 -0800 Subject: staging/rdma/hfi1: Improve performance of user SDMA To facilitate locked page counting, the user SDMA routines would maintain a list of io vectors, which were freed in the completion callback and then unpin the associated pages during the next call into the kernel. Since the size of this list was unbounded, doing this was bad for performance because the driver ended up spending too much time freeing the io vectors. This commit changes how the io vector freeing is done by moving the actual page unpinning in the callback and maintaining a count of unpinned pages. This count can then be used during the next call into the kernel to update the mm->pinned_vm variable (since that requires process context and the ability to sleep.) Reviewed-by: Mike Marciniszyn Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 55 ++++++++++++----------------------- drivers/staging/rdma/hfi1/user_sdma.h | 7 ++--- 2 files changed, 22 insertions(+), 40 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 0c32eaf25afc..55c7e6a4eb1a 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -236,8 +236,6 @@ struct user_sdma_request { u64 seqcomp; u64 seqsubmitted; struct list_head txps; - spinlock_t txcmp_lock; /* protect txcmp list */ - struct list_head txcmp; unsigned long flags; /* status of the last txreq completed */ int status; @@ -381,14 +379,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) goto pq_reqs_nomem; INIT_LIST_HEAD(&pq->list); - INIT_LIST_HEAD(&pq->iovec_list); pq->dd = dd; pq->ctxt = uctxt->ctxt; pq->subctxt = fd->subctxt; pq->n_max_reqs = hfi1_sdma_comp_ring_size; pq->state = SDMA_PKT_Q_INACTIVE; atomic_set(&pq->n_reqs, 0); - spin_lock_init(&pq->iovec_lock); init_waitqueue_head(&pq->wait); iowait_init(&pq->busy, 0, NULL, defer_packet_queue, @@ -444,7 +440,6 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_user_sdma_pkt_q *pq; - struct user_sdma_iovec *iov; unsigned long flags; hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit, @@ -460,15 +455,6 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) wait_event_interruptible( pq->wait, (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); - /* Unpin any left over buffers. */ - while (!list_empty(&pq->iovec_list)) { - spin_lock_irqsave(&pq->iovec_lock, flags); - iov = list_first_entry(&pq->iovec_list, - struct user_sdma_iovec, list); - list_del_init(&iov->list); - spin_unlock_irqrestore(&pq->iovec_lock, flags); - unpin_vector_pages(iov); - } kfree(pq->reqs); kmem_cache_destroy(pq->txreq_cache); kfree(pq); @@ -492,11 +478,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_devdata *dd = pq->dd; - unsigned long idx = 0, flags; + unsigned long idx = 0, unpinned; u8 pcount = initial_pkt_count; struct sdma_req_info info; struct user_sdma_request *req; - struct user_sdma_iovec *ioptr; u8 opcode, sc, vl; if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { @@ -515,13 +500,11 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, } /* Process any completed vectors */ - while (!list_empty(&pq->iovec_list)) { - spin_lock_irqsave(&pq->iovec_lock, flags); - ioptr = list_first_entry(&pq->iovec_list, - struct user_sdma_iovec, list); - list_del_init(&ioptr->list); - spin_unlock_irqrestore(&pq->iovec_lock, flags); - unpin_vector_pages(ioptr); + unpinned = xchg(&pq->unpinned, 0); + if (unpinned) { + down_write(¤t->mm->mmap_sem); + current->mm->pinned_vm -= unpinned; + up_write(¤t->mm->mmap_sem); } trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, @@ -1075,10 +1058,6 @@ static int pin_vector_pages(struct user_sdma_request *req, unpin_vector_pages(iovec); return -EFAULT; } - /* - * Get a reference to the process's mm so we can use it when - * unpinning the io vectors. - */ return 0; } @@ -1368,7 +1347,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq; u16 idx; - int i; + int i, j; if (!tx->req) return; @@ -1379,15 +1358,19 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, /* * If we have any io vectors associated with this txreq, - * check whether they need to be 'freed'. We can't free them - * here because the unpin function needs to be able to sleep. + * check whether they need to be 'freed'. */ for (i = tx->idx; i >= 0; i--) { if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) { - spin_lock(&pq->iovec_lock); - list_add_tail(&tx->iovecs[i].vec->list, - &pq->iovec_list); - spin_unlock(&pq->iovec_lock); + struct user_sdma_iovec *vec = + tx->iovecs[i].vec; + + for (j = 0; j < vec->npages; j++) + put_page(vec->pages[j]); + xadd(&pq->unpinned, vec->npages); + kfree(vec->pages); + vec->pages = NULL; + vec->npages = 0; } } diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h index 317f0e8cffb6..7ef31a6b6dbe 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/staging/rdma/hfi1/user_sdma.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015, 2016 Intel Corporation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -69,8 +69,7 @@ struct hfi1_user_sdma_pkt_q { struct iowait busy; unsigned state; wait_queue_head_t wait; - struct list_head iovec_list; - spinlock_t iovec_lock; /* protect iovec_list */ + unsigned long unpinned; }; struct hfi1_user_sdma_comp_q { -- cgit v1.2.3-59-g8ed1b From 3fafebb6f6c7084c899924b51c0716a778915c3b Mon Sep 17 00:00:00 2001 From: Sadanand Warrier Date: Wed, 3 Feb 2016 14:37:15 -0800 Subject: staging/rdma/hfi1: Add credits for VL0 to VL7 in snoop mode Add a new option to the snoop ioctl which allows credits to be allocated across all VLs. Previously only VL0 and VL15 had credits allocated. The new option used in the ioctl HFI1_SNOOP_IOCSET_OPTS allows credits to be allocated so that VL15 will have at least 8.5KB credits and the other VLs will have the rest of the credits divided equally across themselves. The total number of credits are stored in the upper 16 bits of the integer passed and the cumulative value should ensure that VL0 has at least 8.5KB and each VL a minimum of 2KB + 128 bytes Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Sadanand Warrier Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 3 +- drivers/staging/rdma/hfi1/diag.c | 64 ++++++++++++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/hfi.h | 1 + 3 files changed, 66 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index f31cc238d6db..77b07c3a85a7 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -10711,8 +10711,7 @@ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, * raise = if the new limit is higher than the current value (may be changed * earlier in the algorithm), set the new limit to the new value */ -static int set_buffer_control(struct hfi1_devdata *dd, - struct buffer_control *new_bc) +int set_buffer_control(struct hfi1_devdata *dd, struct buffer_control *new_bc) { u64 changing_mask, ld_mask, stat_mask; int change_count; diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index fafb3d7f8367..bfce812c71ff 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -80,6 +80,7 @@ /* Snoop option mask */ #define SNOOP_DROP_SEND BIT(0) #define SNOOP_USE_METADATA BIT(1) +#define SNOOP_SET_VL0TOVL15 BIT(2) static u8 snoop_flags; @@ -965,6 +966,65 @@ static ssize_t hfi1_snoop_read(struct file *fp, char __user *data, return ret; } +/** + * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others + * @ppd : ptr to hfi1 port data + * @value : options from user space + * + * Assumes the rest of the CM credit registers are zero from a + * previous global or credit reset. + * Leave shared count at zero for both global and all vls. + * In snoop mode ideally we don't use shared credits + * Reserve 8.5k for VL15 + * If total credits less than 8.5kbytes return error. + * Divide the rest of the credits across VL0 to VL7 and if + * each of these levels has less than 34 credits (at least 2048 + 128 bytes) + * return with an error. + * The credit registers will be reset to zero on link negotiation or link up + * so this function should be activated from user space only if the port has + * gone past link negotiation and link up. + * + * Return -- 0 if successful else error condition + * + */ +static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd, + int value) +{ +#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */ + struct buffer_control t; + int i; + struct hfi1_devdata *dd = ppd->dd; + u16 total_credits = (value >> 16) & 0xffff; + u16 vl15_credits = dd->vl15_init / 2; + u16 per_vl_credits; + __be16 be_per_vl_credits; + + if (!(ppd->host_link_state & HLS_UP)) + goto err_exit; + if (total_credits < vl15_credits) + goto err_exit; + + per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL; + + if (per_vl_credits < OPA_MIN_PER_VL_CREDITS) + goto err_exit; + + memset(&t, 0, sizeof(t)); + be_per_vl_credits = cpu_to_be16(per_vl_credits); + + for (i = 0; i < TXE_NUM_DATA_VL; i++) + t.vl[i].dedicated = be_per_vl_credits; + + t.vl[15].dedicated = cpu_to_be16(vl15_credits); + return set_buffer_control(ppd->dd, &t); + +err_exit: + snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d", + ppd->host_link_state, total_credits, vl15_credits); + + return -EINVAL; +} + static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { struct hfi1_devdata *dd; @@ -1191,6 +1251,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) snoop_flags |= SNOOP_DROP_SEND; if (value & SNOOP_USE_METADATA) snoop_flags |= SNOOP_USE_METADATA; + if (value & (SNOOP_SET_VL0TOVL15)) { + ppd = &dd->pport[0]; /* first port will do */ + ret = hfi1_assign_snoop_link_credits(ppd, value); + } break; default: return -ENOTTY; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 363e6ef3dafd..023c50460e13 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1515,6 +1515,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); +int set_buffer_control(struct hfi1_devdata *dd, struct buffer_control *bc); static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) { -- cgit v1.2.3-59-g8ed1b From e154f12716ffbbd7bab52b48b8e78142a22a59c0 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:37:24 -0800 Subject: staging/rdma/hfi1: Make EPROM check per device Add a variable eprom_available to each device, replacing the global of the same name. This is to allow multiple HFI devices with different EPROM availability to operate correctly on the the same system. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/eprom.c | 16 +++++----------- drivers/staging/rdma/hfi1/hfi.h | 1 + 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index 8104a1121bf2..29958aa4e4fd 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -106,10 +106,8 @@ /* * Use the EP mutex to guard against other callers from within the driver. - * Also covers usage of eprom_available. */ static DEFINE_MUTEX(eprom_mutex); -static int eprom_available; /* default: not available */ /* * Turn on external enable line that allows writing on the flash. @@ -376,15 +374,13 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) return -EINVAL; } + /* some devices do not have an EPROM */ + if (!dd->eprom_available) + return -EOPNOTSUPP; + /* lock against other callers touching the ASIC block */ mutex_lock(&eprom_mutex); - /* some platforms do not have an EPROM */ - if (!eprom_available) { - ret = -ENOSYS; - goto done_asic; - } - /* lock against the other HFI on another OS */ ret = acquire_hw_mutex(dd); if (ret) { @@ -458,8 +454,6 @@ int eprom_init(struct hfi1_devdata *dd) /* lock against other callers */ mutex_lock(&eprom_mutex); - if (eprom_available) /* already initialized */ - goto done_asic; /* * Lock against the other HFI on another OS - the mutex above @@ -487,7 +481,7 @@ int eprom_init(struct hfi1_devdata *dd) /* wake the device with command "release powerdown NoID" */ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); - eprom_available = 1; + dd->eprom_available = true; release_hw_mutex(dd); done_asic: mutex_unlock(&eprom_mutex); diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 023c50460e13..585485bb6e77 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1145,6 +1145,7 @@ struct hfi1_devdata { __le64 *rcvhdrtail_dummy_kvaddr; dma_addr_t rcvhdrtail_dummy_physaddr; + bool eprom_available; /* true if EPROM is available for this device */ bool aspm_supported; /* Does HW support ASPM */ bool aspm_enabled; /* ASPM state: enabled/disabled */ /* Serialize ASPM enable/disable between multiple verbs contexts */ -- cgit v1.2.3-59-g8ed1b From 7580fc31dfbfcacab2a3243342d4b5de4b494cbf Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Wed, 3 Feb 2016 14:37:32 -0800 Subject: staging/rdma/hfi1: Remove unused variable nsbr Remove unused nsbr count from PCIe Gen3 code Reviewed-by: Stuart Summers Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/pcie.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 3d0c2e21d3e1..5642d859fc7c 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -955,7 +955,6 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) uint default_pset; u16 target_vector, target_speed; u16 lnkctl2, vendor; - u8 nsbr = 1; u8 div; const u8 (*eq)[3]; int return_error = 0; @@ -988,12 +987,6 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) return 0; } - /* - * A0 needs an additional SBR - */ - if (is_ax(dd)) - nsbr++; - /* * Do the Gen3 transition. Steps are those of the PCIe Gen3 * recipe. -- cgit v1.2.3-59-g8ed1b From a402d6ab409e0e943150a803b94dee76c9de5c27 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Wed, 3 Feb 2016 14:37:41 -0800 Subject: staging/rdma/hfi1: Fix bug that could block the process on context exit A race was discovred in the user SDMA code, which could result in an process being stuck in the kernel call indefinitely in certain error conditions. If, during the processing of a user SDMA request, there was an error *and* all outstanding SDMA descriptor had been completed by the time the that error case was handled in the calling function, the state of the packet queue would not get correctly updated resulting in the process subsequently getting stuck, thinking that there are more descriptors to be completed. To handle this scenario, the driver now checks the submitted packet count vs. the completed. If all submitted packets have also been completed, the driver can safely free the request and signal user level. Otherwise, this will be handled by the completion callback. Reviewed-by: Dennis Dalessandro Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 55c7e6a4eb1a..ac903099843e 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -678,7 +678,6 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, ret = user_sdma_send_pkts(req, pcount); if (unlikely(ret < 0 && ret != -EBUSY)) { req->status = ret; - atomic_dec(&pq->n_reqs); goto free_req; } @@ -703,6 +702,9 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, if (ret != -EBUSY) { req->status = ret; set_bit(SDMA_REQ_DONE_ERROR, &req->flags); + if (ACCESS_ONCE(req->seqcomp) == + req->seqsubmitted - 1) + goto free_req; return ret; } wait_event_interruptible_timeout( @@ -717,6 +719,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, return 0; free_req: user_sdma_free_request(req, true); + pq_update(pq); set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); return ret; } -- cgit v1.2.3-59-g8ed1b From eb2e557c3663bb43a49f223b49e5101bbfc1d546 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:37:50 -0800 Subject: staging/rdma/hfi1: Change for data type of port number This commit changes the data type for port_num in pma_get_opa_porterrors() from unsigned long to u8. Reviewed-by: Ira Weiny Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 3df1c8eeb4f0..97bdcb75854c 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2687,7 +2687,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, { size_t response_data_size; struct _port_ectrs *rsp; - unsigned long port_num; + u8 port_num; struct opa_port_error_counters64_msg *req; struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 num_ports; @@ -2728,7 +2728,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask)); - if ((u8)port_num != port) { + if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } @@ -2739,7 +2739,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, ppd = ppd_from_ibp(ibp); memset(rsp, 0, sizeof(*rsp)); - rsp->port_number = (u8)port_num; + rsp->port_number = port_num; rsp->port_rcv_constraint_errors = cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, @@ -2807,7 +2807,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u64 port_mask; u32 num_ports; - unsigned long port_num; + u8 port_num; u8 num_pslm; u64 reg; @@ -2840,7 +2840,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask)); - if ((u8)port_num != port) { + if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } @@ -3048,7 +3048,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u64 port_mask; u32 num_ports; - unsigned long port_num; + u8 port_num; u8 num_pslm; u32 error_info_select; @@ -3073,7 +3073,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, port_num = find_first_bit((unsigned long *)&port_mask, sizeof(port_mask)); - if ((u8)port_num != port) { + if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } -- cgit v1.2.3-59-g8ed1b From 5950e9b184ae47c6e4ec9cfb0dc698194d524f80 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:37:59 -0800 Subject: staging/rdma/hfi1: Replacement of goto's for break/returns It replaces goto's for break and return statements in process_perf_opa(). Reviewed-by: Ira Weiny Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 56 ++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 26 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 97bdcb75854c..a31557911f54 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -3828,7 +3828,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, if (smp->class_version != OPA_SMI_CLASS_VERSION) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)smp); - goto bail; + return ret; } ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey, smp->route.dr.dr_slid, smp->route.dr.return_path, @@ -3854,7 +3854,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, smp->route.dr.return_path, smp->hop_cnt); ret = IB_MAD_RESULT_FAILURE; - goto bail; + return ret; } *resp_len = opa_get_smp_header_size(smp); @@ -3866,23 +3866,25 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, clear_opa_smp_data(smp); ret = subn_get_opa_sma(attr_id, smp, am, data, ibdev, port, resp_len); - goto bail; + break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_get_opa_aggregate(smp, ibdev, port, resp_len); - goto bail; + break; } + break; case IB_MGMT_METHOD_SET: switch (attr_id) { default: ret = subn_set_opa_sma(attr_id, smp, am, data, ibdev, port, resp_len); - goto bail; + break; case OPA_ATTRIB_ID_AGGREGATE: ret = subn_set_opa_aggregate(smp, ibdev, port, resp_len); - goto bail; + break; } + break; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: @@ -3893,13 +3895,13 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; - goto bail; + break; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_mad_hdr *)smp); + break; } -bail: return ret; } @@ -3915,7 +3917,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_mad_hdr *)smp); - goto bail; + return ret; } ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, @@ -3942,7 +3944,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, (__force __be32)smp->dr_slid, smp->return_path, smp->hop_cnt); ret = IB_MAD_RESULT_FAILURE; - goto bail; + return ret; } switch (smp->method) { @@ -3950,15 +3952,15 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, switch (smp->attr_id) { case IB_SMP_ATTR_NODE_INFO: ret = subn_get_nodeinfo(smp, ibdev, port); - goto bail; + break; default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)smp); - goto bail; + break; } + break; } -bail: return ret; } @@ -3983,44 +3985,46 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port, switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len); - goto bail; + break; case OPA_PM_ATTRIB_ID_PORT_STATUS: ret = pma_get_opa_portstatus(pmp, ibdev, port, resp_len); - goto bail; + break; case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS: ret = pma_get_opa_datacounters(pmp, ibdev, port, resp_len); - goto bail; + break; case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS: ret = pma_get_opa_porterrors(pmp, ibdev, port, resp_len); - goto bail; + break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_get_opa_errorinfo(pmp, ibdev, port, resp_len); - goto bail; + break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); - goto bail; + break; } + break; case IB_MGMT_METHOD_SET: switch (pmp->mad_hdr.attr_id) { case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS: ret = pma_set_opa_portstatus(pmp, ibdev, port, resp_len); - goto bail; + break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_set_opa_errorinfo(pmp, ibdev, port, resp_len); - goto bail; + break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_mad_hdr *)pmp); - goto bail; + break; } + break; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: @@ -4030,14 +4034,14 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port, * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; - goto bail; + break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_mad_hdr *)pmp); + break; } -bail: return ret; } @@ -4102,12 +4106,12 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port, case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); - goto bail; + break; default: ret = IB_MAD_RESULT_SUCCESS; + break; } -bail: return ret; } -- cgit v1.2.3-59-g8ed1b From b8d114ebb6fb6dfb61a6f7bd5b2bef529015b0f0 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:38:07 -0800 Subject: staging/rdma/hfi1: Adding support for hfi counters via sysfs It enables access to counters in /sys/class/infiniband/hfi1_0/ports/1/counters by providing infrastructure when PMA queries occur. Counters symbol_error and VL15_dropped are not supported in OPA, therefore, 0 will always be returned. In addition, two common routines (pma_get_opa_port_dctrs, pma_get_opa_port_ectrs) were created to query counters to avoid code duplication. Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 306 +++++++++++++++++++++++++++++++++------- 1 file changed, 252 insertions(+), 54 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index a31557911f54..2fcc9f3290d7 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2524,6 +2524,27 @@ static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, } } +static void pma_get_opa_port_dctrs(struct ib_device *ibdev, + struct _port_dctrs *rsp) +{ + struct hfi1_devdata *dd = dd_from_ibdev(ibdev); + + rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, + CNTR_INVALID_VL)); + rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, + CNTR_INVALID_VL)); + rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, + CNTR_INVALID_VL)); + rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, + CNTR_INVALID_VL)); + rsp->port_multicast_xmit_pkts = + cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, + CNTR_INVALID_VL)); + rsp->port_multicast_rcv_pkts = + cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, + CNTR_INVALID_VL)); +} + static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, struct ib_device *ibdev, u8 port, u32 *resp_len) { @@ -2592,34 +2613,14 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, */ hfi1_read_link_quality(dd, &lq); rsp->link_quality_indicator = cpu_to_be32((u32)lq); + pma_get_opa_port_dctrs(ibdev, rsp); - /* rsp->sw_port_congestion is 0 for HFIs */ - /* rsp->port_xmit_time_cong is 0 for HFIs */ - /* rsp->port_xmit_wasted_bw ??? */ - /* rsp->port_xmit_wait_data ??? */ - /* rsp->port_mark_fecn is 0 for HFIs */ - - rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, - CNTR_INVALID_VL)); - rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, - CNTR_INVALID_VL)); - rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS, - CNTR_INVALID_VL)); - rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS, - CNTR_INVALID_VL)); - rsp->port_multicast_xmit_pkts = - cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, - CNTR_INVALID_VL)); - rsp->port_multicast_rcv_pkts = - cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, - CNTR_INVALID_VL)); rsp->port_xmit_wait = cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL)); rsp->port_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL)); rsp->port_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL)); - rsp->port_error_counter_summary = cpu_to_be64(get_error_counter_summary(ibdev, port, res_lli, res_ler)); @@ -2682,6 +2683,81 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } +static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *) + pmp->data; + struct _port_dctrs rsp; + + if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; + goto bail; + } + + memset(&rsp, 0, sizeof(rsp)); + pma_get_opa_port_dctrs(ibdev, &rsp); + + p->port_xmit_data = rsp.port_xmit_data; + p->port_rcv_data = rsp.port_rcv_data; + p->port_xmit_packets = rsp.port_xmit_pkts; + p->port_rcv_packets = rsp.port_rcv_pkts; + p->port_unicast_xmit_packets = 0; + p->port_unicast_rcv_packets = 0; + p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts; + p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts; + +bail: + return reply((struct ib_mad_hdr *)pmp); +} + +static void pma_get_opa_port_ectrs(struct ib_device *ibdev, + struct _port_ectrs *rsp, u8 port) +{ + u64 tmp, tmp2; + struct hfi1_devdata *dd = dd_from_ibdev(ibdev); + struct hfi1_ibport *ibp = to_iport(ibdev, port); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + + tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); + tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, + CNTR_INVALID_VL); + if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { + /* overflow/wrapped */ + rsp->link_error_recovery = cpu_to_be32(~0); + } else { + rsp->link_error_recovery = cpu_to_be32(tmp2); + } + + rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, + CNTR_INVALID_VL)); + rsp->port_rcv_errors = + cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL)); + rsp->port_rcv_remote_physical_errors = + cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, + CNTR_INVALID_VL)); + rsp->port_rcv_switch_relay_errors = 0; + rsp->port_xmit_discards = + cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, + CNTR_INVALID_VL)); + rsp->port_xmit_constraint_errors = + cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, + CNTR_INVALID_VL)); + rsp->port_rcv_constraint_errors = + cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, + CNTR_INVALID_VL)); + tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL); + tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL); + if (tmp2 < tmp) { + /* overflow/wrapped */ + rsp->local_link_integrity_errors = cpu_to_be64(~0); + } else { + rsp->local_link_integrity_errors = cpu_to_be64(tmp2); + } + rsp->excessive_buffer_overruns = + cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); +} + static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct ib_device *ibdev, u8 port, u32 *resp_len) { @@ -2697,7 +2773,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, struct hfi1_pportdata *ppd; struct _vls_ectrs *vlinfo; unsigned long vl; - u64 port_mask, tmp, tmp2; + u64 port_mask, tmp; u32 vl_select_mask; int vfi; @@ -2741,44 +2817,16 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port_num; - rsp->port_rcv_constraint_errors = - cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR, - CNTR_INVALID_VL)); - /* port_rcv_switch_relay_errors is 0 for HFIs */ - rsp->port_xmit_discards = - cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD, - CNTR_INVALID_VL)); + pma_get_opa_port_ectrs(ibdev, rsp, port_num); + rsp->port_rcv_remote_physical_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR, - CNTR_INVALID_VL)); - tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL); - tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL); - if (tmp2 < tmp) { - /* overflow/wrapped */ - rsp->local_link_integrity_errors = cpu_to_be64(~0); - } else { - rsp->local_link_integrity_errors = cpu_to_be64(tmp2); - } - tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); - tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, - CNTR_INVALID_VL); - if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { - /* overflow/wrapped */ - rsp->link_error_recovery = cpu_to_be32(~0); - } else { - rsp->link_error_recovery = cpu_to_be32(tmp2); - } - rsp->port_xmit_constraint_errors = - cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, - CNTR_INVALID_VL)); - rsp->excessive_buffer_overruns = - cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL)); + CNTR_INVALID_VL)); rsp->fm_config_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL)); - rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, - CNTR_INVALID_VL)); tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); + rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]); @@ -2798,6 +2846,91 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } +static int pma_get_ib_portcounters(struct ib_pma_mad *pmp, + struct ib_device *ibdev, u8 port) +{ + struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) + pmp->data; + struct _port_ectrs rsp; + u64 temp_link_overrun_errors; + u64 temp_64; + u32 temp_32; + + memset(&rsp, 0, sizeof(rsp)); + pma_get_opa_port_ectrs(ibdev, &rsp, port); + + if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { + pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; + goto bail; + } + + p->symbol_error_counter = 0; /* N/A for OPA */ + + temp_32 = be32_to_cpu(rsp.link_error_recovery); + if (temp_32 > 0xFFUL) + p->link_error_recovery_counter = 0xFF; + else + p->link_error_recovery_counter = (u8)temp_32; + + temp_32 = be32_to_cpu(rsp.link_downed); + if (temp_32 > 0xFFUL) + p->link_downed_counter = 0xFF; + else + p->link_downed_counter = (u8)temp_32; + + temp_64 = be64_to_cpu(rsp.port_rcv_errors); + if (temp_64 > 0xFFFFUL) + p->port_rcv_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_errors = cpu_to_be16((u16)temp_64); + + temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors); + if (temp_64 > 0xFFFFUL) + p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); + else + p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64); + + temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors); + p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64); + + temp_64 = be64_to_cpu(rsp.port_xmit_discards); + if (temp_64 > 0xFFFFUL) + p->port_xmit_discards = cpu_to_be16(0xFFFF); + else + p->port_xmit_discards = cpu_to_be16((u16)temp_64); + + temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors); + if (temp_64 > 0xFFUL) + p->port_xmit_constraint_errors = 0xFF; + else + p->port_xmit_constraint_errors = (u8)temp_64; + + temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors); + if (temp_64 > 0xFFUL) + p->port_rcv_constraint_errors = 0xFFUL; + else + p->port_rcv_constraint_errors = (u8)temp_64; + + /* LocalLink: 7:4, BufferOverrun: 3:0 */ + temp_64 = be64_to_cpu(rsp.local_link_integrity_errors); + if (temp_64 > 0xFUL) + temp_64 = 0xFUL; + + temp_link_overrun_errors = temp_64 << 4; + + temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns); + if (temp_64 > 0xFUL) + temp_64 = 0xFUL; + temp_link_overrun_errors |= temp_64; + + p->link_overrun_errors = (u8)temp_link_overrun_errors; + + p->vl15_dropped = 0; /* N/A for OPA */ + +bail: + return reply((struct ib_mad_hdr *)pmp); +} + static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, struct ib_device *ibdev, u8 port, u32 *resp_len) { @@ -3964,6 +4097,68 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, return ret; } +static int process_perf(struct ib_device *ibdev, u8 port, + const struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; + struct ib_class_port_info *cpi = (struct ib_class_port_info *) + &pmp->data; + int ret = IB_MAD_RESULT_FAILURE; + + *out_mad = *in_mad; + if (pmp->mad_hdr.class_version != 1) { + pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; + ret = reply((struct ib_mad_hdr *)pmp); + return ret; + } + + switch (pmp->mad_hdr.method) { + case IB_MGMT_METHOD_GET: + switch (pmp->mad_hdr.attr_id) { + case IB_PMA_PORT_COUNTERS: + ret = pma_get_ib_portcounters(pmp, ibdev, port); + break; + case IB_PMA_PORT_COUNTERS_EXT: + ret = pma_get_ib_portcounters_ext(pmp, ibdev, port); + break; + case IB_PMA_CLASS_PORT_INFO: + cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + ret = reply((struct ib_mad_hdr *)pmp); + break; + default: + pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_mad_hdr *)pmp); + break; + } + break; + + case IB_MGMT_METHOD_SET: + if (pmp->mad_hdr.attr_id) { + pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; + ret = reply((struct ib_mad_hdr *)pmp); + } + break; + + case IB_MGMT_METHOD_TRAP: + case IB_MGMT_METHOD_GET_RESP: + /* + * The ib_mad module will call us to process responses + * before checking for other consumers. + * Just tell the caller to process it normally. + */ + ret = IB_MAD_RESULT_SUCCESS; + break; + + default: + pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; + ret = reply((struct ib_mad_hdr *)pmp); + break; + } + + return ret; +} + static int process_perf_opa(struct ib_device *ibdev, u8 port, const struct opa_mad *in_mad, struct opa_mad *out_mad, u32 *resp_len) @@ -4107,6 +4302,9 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port, case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); break; + case IB_MGMT_CLASS_PERF_MGMT: + ret = process_perf(ibdev, port, in_mad, out_mad); + break; default: ret = IB_MAD_RESULT_SUCCESS; break; -- cgit v1.2.3-59-g8ed1b From cd93a9e8c5a58e451c834e48f1278383fbfa1072 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Wed, 3 Feb 2016 14:38:16 -0800 Subject: staging/rdma/hfi1: Removing unused struct hfi1_verbs_counters It removes the unused struct hfi1_verbs_counters from verbs.h Reviewed-by: Mike Marciniszyn Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.h | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index a157e6458ab0..335e3a8583e7 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -279,22 +279,6 @@ struct hfi1_ibdev { #endif }; -struct hfi1_verbs_counters { - u64 symbol_error_counter; - u64 link_error_recovery_counter; - u64 link_downed_counter; - u64 port_rcv_errors; - u64 port_rcv_remphys_errors; - u64 port_xmit_discards; - u64 port_xmit_data; - u64 port_rcv_data; - u64 port_xmit_packets; - u64 port_rcv_packets; - u32 local_link_integrity_errors; - u32 excessive_buffer_overrun_errors; - u32 vl15_dropped; -}; - static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev) { struct rvt_dev_info *rdi; -- cgit v1.2.3-59-g8ed1b From 9171bfdd363304713a5a82ae03da6ec55a0cae39 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 10:59:01 -0800 Subject: staging/rdma/hfi1: centralize timer routines into rc Centralize disparate timer maintenance. This allow for central control and changes to the RC timer handling including future optimizations. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 107 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 371edc3dd4f6..350faaa45db4 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -62,6 +62,113 @@ static void rc_timeout(unsigned long arg); +/** + * hfi1_add_retry_timer - add/start a retry timer + * @qp - the QP + * + * add a retry timer on the QP + */ +static inline void hfi1_add_retry_timer(struct rvt_qp *qp) +{ + qp->s_flags |= RVT_S_TIMER; + qp->s_timer.function = rc_timeout; + /* 4.096 usec. * (1 << qp->timeout) */ + qp->s_timer.expires = jiffies + qp->timeout_jiffies; + add_timer(&qp->s_timer); +} + +/** + * hfi1_add_rnr_timer - add/start an rnr timer + * @qp - the QP + * @to - timeout in usecs + * + * add an rnr timer on the QP + */ +static inline void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) +{ + qp->s_flags |= RVT_S_WAIT_RNR; + qp->s_timer.function = hfi1_rc_rnr_retry; + qp->s_timer.expires = jiffies + usecs_to_jiffies(to); + add_timer(&qp->s_timer); +} + +/** + * hfi1_mod_retry_timer - mod a retry timer + * @qp - the QP + * + * Modify a potentially already running retry + * timer + */ +static inline void hfi1_mod_retry_timer(struct rvt_qp *qp) +{ + qp->s_flags |= RVT_S_TIMER; + qp->s_timer.function = rc_timeout; + /* 4.096 usec. * (1 << qp->timeout) */ + mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); +} + +/** + * hfi1_stop_retry_timer - stop a retry timer + * @qp - the QP + * + * stop a retry timer and return if the timer + * had been pending. + */ +static inline int hfi1_stop_retry_timer(struct rvt_qp *qp) +{ + int rval = 0; + + /* Remove QP from retry */ + if (qp->s_flags & RVT_S_TIMER) { + qp->s_flags &= ~RVT_S_TIMER; + rval = del_timer(&qp->s_timer); + } + return rval; +} + +/** + * hfi1_stop_rc_timers - stop all timers + * @qp - the QP + * + * stop any pending timers + */ +static inline void hfi1_stop_rc_timers(struct rvt_qp *qp) +{ + /* Remove QP from all timers */ + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); + del_timer(&qp->s_timer); + } +} + +/** + * hfi1_stop_rnr_timer - stop an rnr timer + * @qp - the QP + * + * stop an rnr timer and return if the timer + * had been pending. + */ +static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) +{ + int rval = 0; + + /* Remove QP from rnr timer */ + if (qp->s_flags & RVT_S_WAIT_RNR) { + qp->s_flags &= ~RVT_S_WAIT_RNR; + rval = del_timer(&qp->s_timer); + } + return rval; +} + +/** + * hfi1_del_timers_sync - wait for any timeout routines to exit + * @qp - the QP + */ +static inline void hfi1_del_timers_sync(struct rvt_qp *qp) +{ + del_timer_sync(&qp->s_timer); +} + static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) { -- cgit v1.2.3-59-g8ed1b From e6f8c2b31f107f39e7301a02b5a6808d79c9f1f0 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 10:59:09 -0800 Subject: staging/rdma/hfi1: use new timer routines Use the new timer routines. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 39 ++++++++++----------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 350faaa45db4..5c32182dbf17 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -183,15 +183,6 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, return wqe->length - len; } -static void start_timer(struct rvt_qp *qp) -{ - qp->s_flags |= RVT_S_TIMER; - qp->s_timer.function = rc_timeout; - /* 4.096 usec. * (1 << qp->timeout) */ - qp->s_timer.expires = jiffies + qp->timeout_jiffies; - add_timer(&qp->s_timer); -} - /** * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) * @dev: the device for this QP @@ -1054,11 +1045,8 @@ void hfi1_rc_rnr_retry(unsigned long arg) unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & RVT_S_WAIT_RNR) { - qp->s_flags &= ~RVT_S_WAIT_RNR; - del_timer(&qp->s_timer); - hfi1_schedule_send(qp); - } + hfi1_stop_rnr_timer(qp); + hfi1_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } @@ -1128,7 +1116,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) - start_timer(qp); + hfi1_add_retry_timer(qp); while (qp->s_last != qp->s_acked) { wqe = rvt_get_swqe_ptr(qp, qp->s_last); @@ -1276,12 +1264,10 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, int ret = 0; u32 ack_psn; int diff; + unsigned long to; /* Remove QP from retry timer */ - if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); - del_timer(&qp->s_timer); - } + hfi1_stop_rc_timers(qp); /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write @@ -1378,7 +1364,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, * We are expecting more ACKs so * reset the re-transmit timer. */ - start_timer(qp); + hfi1_add_retry_timer(qp); /* * We can stop re-sending the earlier packets and * continue with the next packet the receiver wants. @@ -1421,12 +1407,10 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, reset_psn(qp, psn); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); - qp->s_flags |= RVT_S_WAIT_RNR; - qp->s_timer.function = hfi1_rc_rnr_retry; - qp->s_timer.expires = jiffies + usecs_to_jiffies( + to = ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & - HFI1_AETH_CREDIT_MASK]); - add_timer(&qp->s_timer); + HFI1_AETH_CREDIT_MASK]; + hfi1_add_rnr_timer(qp, to); goto bail; case 3: /* NAK */ @@ -1496,10 +1480,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct rvt_swqe *wqe; /* Remove QP from retry timer */ - if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { - qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); - del_timer(&qp->s_timer); - } + hfi1_stop_rc_timers(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_acked); -- cgit v1.2.3-59-g8ed1b From 633d27399514e7726633c9029e3947f0526d2565 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 10:59:18 -0800 Subject: staging/rdma/hfi1: use mod_timer when appropriate Use new timer API to optimize maintenance of timers during ACK processing. When we are still expecting ACKs, mod the timer to avoid a heavyweight delete/add. Otherwise, insure do_rc_ack() maintains the timer as it had. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 5c32182dbf17..700d84942afe 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1266,9 +1266,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, int diff; unsigned long to; - /* Remove QP from retry timer */ - hfi1_stop_rc_timers(qp); - /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued @@ -1296,7 +1293,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, opcode == OP(RDMA_READ_RESPONSE_ONLY) && diff == 0) { ret = 1; - goto bail; + goto bail_stop; } /* * If this request is a RDMA read or atomic, and the ACK is @@ -1327,7 +1324,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, * No need to process the ACK/NAK since we are * restarting an earlier request. */ - goto bail; + goto bail_stop; } if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { @@ -1362,18 +1359,22 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, if (qp->s_acked != qp->s_tail) { /* * We are expecting more ACKs so - * reset the re-transmit timer. + * mod the retry timer. */ - hfi1_add_retry_timer(qp); + hfi1_mod_retry_timer(qp); /* * We can stop re-sending the earlier packets and * continue with the next packet the receiver wants. */ if (cmp_psn(qp->s_psn, psn) <= 0) reset_psn(qp, psn + 1); - } else if (cmp_psn(qp->s_psn, psn) <= 0) { - qp->s_state = OP(SEND_LAST); - qp->s_psn = psn + 1; + } else { + /* No more acks - kill all timers */ + hfi1_stop_rc_timers(qp); + if (cmp_psn(qp->s_psn, psn) <= 0) { + qp->s_state = OP(SEND_LAST); + qp->s_psn = psn + 1; + } } if (qp->s_flags & RVT_S_WAIT_ACK) { qp->s_flags &= ~RVT_S_WAIT_ACK; @@ -1383,15 +1384,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, psn); - ret = 1; - goto bail; + return 1; case 1: /* RNR NAK */ ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) - goto bail; + goto bail_stop; if (qp->s_flags & RVT_S_WAIT_RNR) - goto bail; + goto bail_stop; if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; goto class_b; @@ -1407,15 +1407,16 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, reset_psn(qp, psn); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); + hfi1_stop_rc_timers(qp); to = ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK]; hfi1_add_rnr_timer(qp, to); - goto bail; + return 0; case 3: /* NAK */ if (qp->s_acked == qp->s_tail) - goto bail; + goto bail_stop; /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) & @@ -1458,15 +1459,16 @@ class_b: } qp->s_retry = qp->s_retry_cnt; qp->s_rnr_retry = qp->s_rnr_retry_cnt; - goto bail; + goto bail_stop; default: /* 2: reserved */ reserved: /* Ignore reserved NAK codes. */ - goto bail; + goto bail_stop; } - -bail: + return ret; +bail_stop: + hfi1_stop_rc_timers(qp); return ret; } -- cgit v1.2.3-59-g8ed1b From 3c9d149bdc797a7dfb48bcf327f9ceeb6060a512 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 10:59:27 -0800 Subject: staging/rdma/hfi1: add unique rnr timer Add a new rnr timer to hfi1. This allows for future optimizations having the retry and rnr timers separate. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 2 +- drivers/staging/rdma/hfi1/rc.c | 2 +- drivers/staging/rdma/hfi1/verbs.h | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index b78c8eadcd95..a5f0e2e41eb1 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -607,7 +607,7 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, kfree(priv); return ERR_PTR(-ENOMEM); } - + setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); return priv; } diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 700d84942afe..1ff19aa41ef4 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -164,7 +164,7 @@ static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) * hfi1_del_timers_sync - wait for any timeout routines to exit * @qp - the QP */ -static inline void hfi1_del_timers_sync(struct rvt_qp *qp) +void hfi1_del_timers_sync(struct rvt_qp *qp) { del_timer_sync(&qp->s_timer); } diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 335e3a8583e7..6294fa81c20b 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -210,6 +210,7 @@ struct hfi1_qp_priv { u8 s_sc; /* SC[0..4] for next packet */ u8 r_adefered; /* number of acks defered */ struct iowait s_iowait; + struct timer_list s_rnr_timer; struct rvt_qp *owner; }; @@ -403,6 +404,7 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid); void hfi1_rc_rnr_retry(unsigned long arg); +void hfi1_del_timers_sync(struct rvt_qp *qp); void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr); -- cgit v1.2.3-59-g8ed1b From 08279d5c9424afd710c90d0b6df95612d2bb5a3f Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 10:59:36 -0800 Subject: staging/rdma/hfi1: use new RNR timer Use the new RNR timer for hfi1. For qib, this timer doesn't exist, so exploit driver callbacks to use the new timer as appropriate. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_qp.c | 1 + drivers/infiniband/sw/rdmavt/qp.c | 1 - drivers/staging/rdma/hfi1/qp.c | 3 +++ drivers/staging/rdma/hfi1/rc.c | 22 +++++++++++++--------- drivers/staging/rdma/hfi1/verbs.h | 2 ++ 5 files changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 45bed5f2bba4..787116f59395 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -412,6 +412,7 @@ void stop_send_queue(struct rvt_qp *qp) struct qib_qp_priv *priv = qp->priv; cancel_work_sync(&priv->s_work); + del_timer_sync(&qp->s_timer); } void quiesce_qp(struct rvt_qp *qp) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 322de64164f7..439213c37537 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -405,7 +405,6 @@ void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, /* Stop the send queue and the retry timer */ rdi->driver_f.stop_send_queue(qp); - del_timer_sync(&qp->s_timer); /* Wait for things to stop */ rdi->driver_f.quiesce_qp(qp); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index a5f0e2e41eb1..b96d5ee397de 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -608,6 +608,7 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, return ERR_PTR(-ENOMEM); } setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); + qp->s_timer.function = hfi1_rc_timeout; return priv; } @@ -647,6 +648,7 @@ unsigned free_all_qps(struct rvt_dev_info *rdi) void flush_qp_waiters(struct rvt_qp *qp) { flush_iowait(qp); + hfi1_stop_rc_timers(qp); } void stop_send_queue(struct rvt_qp *qp) @@ -654,6 +656,7 @@ void stop_send_queue(struct rvt_qp *qp) struct hfi1_qp_priv *priv = qp->priv; cancel_work_sync(&priv->s_iowait.iowork); + hfi1_del_timers_sync(qp); } void quiesce_qp(struct rvt_qp *qp) diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 1ff19aa41ef4..2c46491746bb 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -60,8 +60,6 @@ /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_RC_##x -static void rc_timeout(unsigned long arg); - /** * hfi1_add_retry_timer - add/start a retry timer * @qp - the QP @@ -71,7 +69,6 @@ static void rc_timeout(unsigned long arg); static inline void hfi1_add_retry_timer(struct rvt_qp *qp) { qp->s_flags |= RVT_S_TIMER; - qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + qp->timeout_jiffies; add_timer(&qp->s_timer); @@ -86,10 +83,11 @@ static inline void hfi1_add_retry_timer(struct rvt_qp *qp) */ static inline void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) { + struct hfi1_qp_priv *priv = qp->priv; + qp->s_flags |= RVT_S_WAIT_RNR; - qp->s_timer.function = hfi1_rc_rnr_retry; qp->s_timer.expires = jiffies + usecs_to_jiffies(to); - add_timer(&qp->s_timer); + add_timer(&priv->s_rnr_timer); } /** @@ -102,7 +100,6 @@ static inline void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) static inline void hfi1_mod_retry_timer(struct rvt_qp *qp) { qp->s_flags |= RVT_S_TIMER; - qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); } @@ -132,12 +129,15 @@ static inline int hfi1_stop_retry_timer(struct rvt_qp *qp) * * stop any pending timers */ -static inline void hfi1_stop_rc_timers(struct rvt_qp *qp) +void hfi1_stop_rc_timers(struct rvt_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; + /* Remove QP from all timers */ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); + del_timer(&priv->s_rnr_timer); } } @@ -151,11 +151,12 @@ static inline void hfi1_stop_rc_timers(struct rvt_qp *qp) static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) { int rval = 0; + struct hfi1_qp_priv *priv = qp->priv; /* Remove QP from rnr timer */ if (qp->s_flags & RVT_S_WAIT_RNR) { qp->s_flags &= ~RVT_S_WAIT_RNR; - rval = del_timer(&qp->s_timer); + rval = del_timer(&priv->s_rnr_timer); } return rval; } @@ -166,7 +167,10 @@ static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp) */ void hfi1_del_timers_sync(struct rvt_qp *qp) { + struct hfi1_qp_priv *priv = qp->priv; + del_timer_sync(&qp->s_timer); + del_timer_sync(&priv->s_rnr_timer); } static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, @@ -1015,7 +1019,7 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) /* * This is called from s_timer for missing responses. */ -static void rc_timeout(unsigned long arg) +void hfi1_rc_timeout(unsigned long arg) { struct rvt_qp *qp = (struct rvt_qp *)arg; struct hfi1_ibport *ibp; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 6294fa81c20b..26eda8a3e55e 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -404,7 +404,9 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid); void hfi1_rc_rnr_retry(unsigned long arg); +void hfi1_rc_timeout(unsigned long arg); void hfi1_del_timers_sync(struct rvt_qp *qp); +void hfi1_stop_rc_timers(struct rvt_qp *qp); void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr); -- cgit v1.2.3-59-g8ed1b From 0940e0f68e59fca500cbad6f5f58bddefdb6dc53 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 11:03:02 -0800 Subject: staging/rdma/hfi1: remove duplicate timeout print The qp->timeout field is duplicated in the seqfile print. Remove it. Reviewed-by: Jubin John Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index b96d5ee397de..9901ef0ef79c 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -547,7 +547,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); seq_printf(s, - "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n", + "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u SDE %p,%u\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -572,7 +572,6 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->remote_ah_attr.sl, qp->pmtu, qp->s_retry_cnt, - qp->timeout, qp->s_rnr_retry_cnt, sde, sde ? sde->this_idx : 0); -- cgit v1.2.3-59-g8ed1b From 20658661bc0712c51ad9798914f5eb3e28cb8121 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 11:03:11 -0800 Subject: staging/rdma/hfi1: add s_retry to diagnostics This is needed to debug ULP issues with getting retry attributes correctly specified. Reviewed-by: Jubin John Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 9901ef0ef79c..ec9ee726267b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -547,7 +547,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); seq_printf(s, - "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u SDE %p,%u\n", + "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -571,6 +571,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->remote_ah_attr.dlid, qp->remote_ah_attr.sl, qp->pmtu, + qp->s_retry, qp->s_retry_cnt, qp->s_rnr_retry_cnt, sde, -- cgit v1.2.3-59-g8ed1b From 6c2ab0b857d1b674c5f710d2cbf06a0f3ac52313 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 4 Feb 2016 11:03:19 -0800 Subject: staging/rdma/hfi1: Insure last cursor is updated prior to complete This patch is a prerequisite for adding a separate lock for post send. The timing of updating s_last needs to be before returning any send completion to avoid a race between a poll cq seeing a completion and the post send checking for a full queue. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 20 ++++++++++++++++---- drivers/staging/rdma/hfi1/ruc.c | 12 +++++++----- 2 files changed, 23 insertions(+), 9 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 2c46491746bb..e54e0b4bb5e5 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1123,10 +1123,18 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) hfi1_add_retry_timer(qp); while (qp->s_last != qp->s_acked) { + u32 s_last; + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; @@ -1143,8 +1151,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr) wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } /* * If we were waiting for sends to complete before re-sending, @@ -1184,11 +1190,19 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, */ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + u32 s_last; + for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; rvt_put_mr(sge->mr); } + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { @@ -1200,8 +1214,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, wc.qp = &qp->ibqp; rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } else { struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 6aeea6c4b236..66449acac76d 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -921,6 +921,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; + last = qp->s_last; + old_last = last; + if (++last >= qp->s_size) + last = 0; + qp->s_last = last; + /* See post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { struct rvt_sge *sge = &wqe->sg_list[i]; @@ -948,11 +955,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, status != IB_WC_SUCCESS); } - last = qp->s_last; - old_last = last; - if (++last >= qp->s_size) - last = 0; - qp->s_last = last; if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) -- cgit v1.2.3-59-g8ed1b From 045277cf1548ab04a114bf560a01e38cf33b91b4 Mon Sep 17 00:00:00 2001 From: Hari Prasath Gujulan Elango Date: Thu, 4 Feb 2016 11:03:45 -0800 Subject: IB/qib,staging/rdma/hfi1: use setup_timer api Replace the timer API's to initialize a timer & then assign the callback function by the setup_timer() API. Signed-off-by: Hari Prasath Gujulan Elango Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_verbs.c | 4 +--- drivers/staging/rdma/hfi1/verbs.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 41583629b146..2abe31d4e350 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1602,9 +1602,7 @@ int qib_register_ib_device(struct qib_devdata *dd) init_ibport(ppd + i); /* Only need to initialize non-zero fields. */ - init_timer(&dev->mem_timer); - dev->mem_timer.function = mem_timer; - dev->mem_timer.data = (unsigned long) dev; + setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); qpt_mask = dd->qpn_mask; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 0ee6b1debd05..35f6d92a6249 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1455,9 +1455,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) /* Only need to initialize non-zero fields. */ - init_timer(&dev->mem_timer); - dev->mem_timer.function = mem_timer; - dev->mem_timer.data = (unsigned long) dev; + setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); seqlock_init(&dev->iowait_lock); INIT_LIST_HEAD(&dev->txwait); -- cgit v1.2.3-59-g8ed1b From c3838b396b425b4242bfe627bfabefc4c1af56f2 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Tue, 9 Feb 2016 14:29:13 -0800 Subject: staging/rdma/hfi1: Fetch platform configuration data from EFI variable The platform configuration data has been moved into the EFI variable store where it is populated by the HFI1 option ROM. This patch pulls the configuration data from the new location, retaining a fallback to request_firmware. Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 3 ++ drivers/staging/rdma/hfi1/chip.h | 2 ++ drivers/staging/rdma/hfi1/firmware.c | 69 ++++++++++++++++++++++++++++-------- drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/init.c | 1 + drivers/staging/rdma/hfi1/platform.c | 58 ++++++++++++++++++++++++++---- drivers/staging/rdma/hfi1/platform.h | 9 +++++ 7 files changed, 121 insertions(+), 22 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 77b07c3a85a7..4750e3c2db3e 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -14096,6 +14096,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, if (ret) goto bail_cleanup; + /* Needs to be called before hfi1_firmware_init */ + get_platform_config(dd); + /* read in firmware */ ret = hfi1_firmware_init(dd); if (ret) diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 0e95f0b7f2bb..3b041dc771cd 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -617,6 +617,8 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32); #define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */ extern const u8 pcie_serdes_broadcast[]; extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES]; +extern uint platform_config_load; + /* SBus commands */ #define RESET_SBUS_RECEIVER 0x20 #define WRITE_SBUS_RECEIVER 0x21 diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 0b23e3eaf574..d2ec6c5f18ac 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -77,7 +77,13 @@ static uint fw_8051_load = 1; static uint fw_fabric_serdes_load = 1; static uint fw_pcie_serdes_load = 1; static uint fw_sbus_load = 1; -static uint platform_config_load = 1; + +/* + * Access required in platform.c + * Maintains state of whether the platform config was fetched via the + * fallback option + */ +uint platform_config_load; /* Firmware file names get set in hfi1_firmware_init() based on the above */ static char *fw_8051_name; @@ -677,10 +683,15 @@ static int obtain_firmware(struct hfi1_devdata *dd) } /* not in FW_TRY state */ - if (fw_state == FW_FINAL) + if (fw_state == FW_FINAL) { + if (platform_config) { + dd->platform_config.data = platform_config->data; + dd->platform_config.size = platform_config->size; + } goto done; /* already acquired */ - else if (fw_state == FW_ERR) + } else if (fw_state == FW_ERR) { goto done; /* already tried and failed */ + } /* fw_state is FW_EMPTY */ /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */ @@ -690,8 +701,14 @@ static int obtain_firmware(struct hfi1_devdata *dd) platform_config = NULL; err = request_firmware(&platform_config, platform_config_name, &dd->pcidev->dev); - if (err) + if (err) { platform_config = NULL; + fw_state = FW_ERR; + fw_err = -ENOENT; + goto done; + } + dd->platform_config.data = platform_config->data; + dd->platform_config.size = platform_config->size; } done: @@ -1457,14 +1474,14 @@ int parse_platform_config(struct hfi1_devdata *dd) { struct platform_config_cache *pcfgcache = &dd->pcfg_cache; u32 *ptr = NULL; - u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0; + u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0; u32 record_idx = 0, table_type = 0, table_length_dwords = 0; - if (platform_config == NULL) { + if (!dd->platform_config.data) { dd_dev_info(dd, "%s: Missing config file\n", __func__); goto bail; } - ptr = (u32 *)platform_config->data; + ptr = (u32 *)dd->platform_config.data; magic_num = *ptr; ptr++; @@ -1473,12 +1490,31 @@ int parse_platform_config(struct hfi1_devdata *dd) goto bail; } - while (ptr < (u32 *)(platform_config->data + platform_config->size)) { + /* Field is file size in DWORDs */ + file_length = (*ptr) * 4; + ptr++; + + if (file_length > dd->platform_config.size) { + dd_dev_info(dd, "%s:File claims to be larger than read size\n", + __func__); + goto bail; + } else if (file_length < dd->platform_config.size) { + dd_dev_info(dd, "%s:File claims to be smaller than read size\n", + __func__); + } + /* exactly equal, perfection */ + + /* + * In both cases where we proceed, using the self-reported file length + * is the safer option + */ + while (ptr < (u32 *)(dd->platform_config.data + file_length)) { header1 = *ptr; header2 = *(ptr + 1); if (header1 != ~header2) { dd_dev_info(dd, "%s: Failed validation at offset %ld\n", - __func__, (ptr - (u32 *)platform_config->data)); + __func__, (ptr - + (u32 *)dd->platform_config.data)); goto bail; } @@ -1520,7 +1556,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_info(dd, "%s: Unknown data table %d, offset %ld\n", __func__, table_type, - (ptr - (u32 *)platform_config->data)); + (ptr - (u32 *)dd->platform_config.data)); goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1541,9 +1577,10 @@ int parse_platform_config(struct hfi1_devdata *dd) break; default: dd_dev_info(dd, - "%s: Unknown metadata table %d, offset %ld\n", - __func__, table_type, - (ptr - (u32 *)platform_config->data)); + "%s: Unknown meta table %d, offset %ld\n", + __func__, table_type, + (ptr - + (u32 *)dd->platform_config.data)); goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = @@ -1559,7 +1596,9 @@ int parse_platform_config(struct hfi1_devdata *dd) ptr += table_length_dwords; if (crc != *ptr) { dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n", - __func__, (ptr - (u32 *)platform_config->data)); + __func__, (ptr - + (u32 *) + dd->platform_config.data)); goto bail; } /* Jump the CRC DWORD */ @@ -1675,7 +1714,7 @@ int get_platform_config_field(struct hfi1_devdata *dd, } break; case PLATFORM_CONFIG_PORT_TABLE: - /* Port table is 4 DWORDS in META_VERSION 0 */ + /* Port table is 4 DWORDS */ src_ptr = dd->hfi1_id ? pcfgcache->config_tables[table_type].table + 4 : pcfgcache->config_tables[table_type].table; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 585485bb6e77..702723b3ff90 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1028,6 +1028,7 @@ struct hfi1_devdata { u16 irev; /* implementation revision */ u16 dc8051_ver; /* 8051 firmware version */ + struct platform_config platform_config; struct platform_config_cache pcfg_cache; /* control high-level access to qsfp */ struct mutex qsfp_i2c_mutex; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index fe5e1e57307b..112cb6c09857 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -983,6 +983,7 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) idr_remove(&hfi1_unit_table, dd->unit); list_del(&dd->list); spin_unlock_irqrestore(&hfi1_devs_lock, flags); + free_platform_config(dd); hfi1_dbg_ibdev_exit(&dd->verbs_dev); rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index 506a82766b33..0309c5238823 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -47,7 +47,48 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ + #include "hfi.h" +#include "efivar.h" + +void get_platform_config(struct hfi1_devdata *dd) +{ + int ret = 0; + unsigned long size = 0; + u8 *temp_platform_config = NULL; + + ret = read_hfi1_efi_var(dd, "configuration", &size, + (void **)&temp_platform_config); + if (ret) { + dd_dev_info(dd, + "%s: Failed to get platform config from UEFI, falling back to request firmware\n", + __func__); + /* fall back to request firmware */ + platform_config_load = 1; + goto bail; + } + + dd->platform_config.data = temp_platform_config; + dd->platform_config.size = size; + +bail: + /* exit */; +} + +void free_platform_config(struct hfi1_devdata *dd) +{ + if (!platform_config_load) { + /* + * was loaded from EFI, release memory + * allocated by read_efi_var + */ + kfree(dd->platform_config.data); + } + /* + * else do nothing, dispose_firmware will release + * struct firmware platform_config on driver exit + */ +} int set_qsfp_tx(struct hfi1_pportdata *ppd, int on) { @@ -739,8 +780,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) /* Skip the tuning for testing (loopback != none) and simulations */ if (loopback != LOOPBACK_NONE || - ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR || - !dd->pcfg_cache.cache_valid) { + ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { ppd->driver_link_ready = 1; return; } @@ -805,6 +845,12 @@ void tune_serdes(struct hfi1_pportdata *ppd) &rx_preset_index, &tuning_method, &total_atten); + + /* + * We may have modified the QSFP memory, so + * update the cache to reflect the changes + */ + refresh_qsfp_cache(ppd, &ppd->qsfp_info); if (ret) goto bail; } else { @@ -820,7 +866,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) break; default: dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__); - break; + goto bail; } if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) @@ -828,10 +874,8 @@ void tune_serdes(struct hfi1_pportdata *ppd) total_atten, ppd->qsfp_info.limiting_active); - if (ppd->port_type == PORT_TYPE_QSFP) - refresh_qsfp_cache(ppd, &ppd->qsfp_info); - - ppd->driver_link_ready = 1; + if (!ret) + ppd->driver_link_ready = 1; return; bail: diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/staging/rdma/hfi1/platform.h index 5b53d71ddf96..cc280cca9b9c 100644 --- a/drivers/staging/rdma/hfi1/platform.h +++ b/drivers/staging/rdma/hfi1/platform.h @@ -150,6 +150,11 @@ enum platform_config_variable_settings_table_fields { VARIABLE_SETTINGS_TABLE_MAX }; +struct platform_config { + size_t size; + const u8 *data; +}; + struct platform_config_data { u32 *table; u32 *table_metadata; @@ -293,6 +298,10 @@ enum link_tuning_encoding { OPA_UNKNOWN_TUNING }; +/* platform.c */ +void get_platform_config(struct hfi1_devdata *dd); +void free_platform_config(struct hfi1_devdata *dd); int set_qsfp_tx(struct hfi1_pportdata *ppd, int on); void tune_serdes(struct hfi1_pportdata *ppd); + #endif /*__PLATFORM_H*/ -- cgit v1.2.3-59-g8ed1b From 97167e8134150eb5104e19fd7208e3ac3525f48b Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Tue, 9 Feb 2016 14:29:22 -0800 Subject: staging/rdma/hfi1: Tune for unknown channel if configuration file is absent Currently, the driver fails to tune the SerDes and therefore prevents link up if the configuration file is missing or fails parsing or validation. This patch adds a fallback option so that the 8051 is asked to tune for an unknown channel and possibly get the link up if tuning succeeds. It also adds a user-friendly message to update the configuration file if it is out-of-date. Reviewed-by: Mike Marciniszyn Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.h | 4 ++ drivers/staging/rdma/hfi1/firmware.c | 49 ++++++++++++++++-- drivers/staging/rdma/hfi1/platform.c | 97 +++++++++++++++++++----------------- drivers/staging/rdma/hfi1/qsfp.c | 1 - 4 files changed, 99 insertions(+), 52 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 3b041dc771cd..b86c220161e5 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -390,6 +390,10 @@ #define LINK_QUALITY_INFO 0x14 #define REMOTE_DEVICE_ID 0x15 +/* 8051 lane specific register field IDs */ +#define TX_EQ_SETTINGS 0x00 +#define CHANNEL_LOSS_SETTINGS 0x05 + /* Lane ID for general configuration registers */ #define GENERAL_CONFIG 4 diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index d2ec6c5f18ac..35084b754b7c 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -703,8 +703,6 @@ static int obtain_firmware(struct hfi1_devdata *dd) &dd->pcidev->dev); if (err) { platform_config = NULL; - fw_state = FW_ERR; - fw_err = -ENOENT; goto done; } dd->platform_config.data = platform_config->data; @@ -1470,12 +1468,51 @@ int hfi1_firmware_init(struct hfi1_devdata *dd) return obtain_firmware(dd); } +/* + * This function is a helper function for parse_platform_config(...) and + * does not check for validity of the platform configuration cache + * (because we know it is invalid as we are building up the cache). + * As such, this should not be called from anywhere other than + * parse_platform_config + */ +static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table) +{ + u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask; + struct platform_config_cache *pcfgcache = &dd->pcfg_cache; + + if (!system_table) + return -EINVAL; + + meta_ver_meta = + *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata + + SYSTEM_TABLE_META_VERSION); + + mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1); + ver_start = meta_ver_meta & mask; + + meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT; + + mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1); + ver_len = meta_ver_meta & mask; + + ver_start /= 8; + meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1); + + if (meta_ver < 5) { + dd_dev_info( + dd, "%s:Please update platform config\n", __func__); + return -EINVAL; + } + return 0; +} + int parse_platform_config(struct hfi1_devdata *dd) { struct platform_config_cache *pcfgcache = &dd->pcfg_cache; u32 *ptr = NULL; u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0; u32 record_idx = 0, table_type = 0, table_length_dwords = 0; + int ret = -EINVAL; /* assume failure */ if (!dd->platform_config.data) { dd_dev_info(dd, "%s: Missing config file\n", __func__); @@ -1499,7 +1536,8 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__); goto bail; } else if (file_length < dd->platform_config.size) { - dd_dev_info(dd, "%s:File claims to be smaller than read size\n", + dd_dev_info(dd, + "%s:File claims to be smaller than read size, continuing\n", __func__); } /* exactly equal, perfection */ @@ -1537,6 +1575,9 @@ int parse_platform_config(struct hfi1_devdata *dd) case PLATFORM_CONFIG_SYSTEM_TABLE: pcfgcache->config_tables[table_type].num_table = 1; + ret = check_meta_version(dd, ptr); + if (ret) + goto bail; break; case PLATFORM_CONFIG_PORT_TABLE: pcfgcache->config_tables[table_type].num_table = @@ -1609,7 +1650,7 @@ int parse_platform_config(struct hfi1_devdata *dd) return 0; bail: memset(pcfgcache, 0, sizeof(struct platform_config_cache)); - return -EINVAL; + return ret; } static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index 0309c5238823..2f07becb0b96 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -498,14 +498,14 @@ static void apply_rx_amplitude_settings( #define OPA_INVALID_INDEX 0xFFF -static void apply_tx_lanes(struct hfi1_pportdata *ppd, u32 config_data, - const char *message) +static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id, + u32 config_data, const char *message) { u8 i; int ret = HCMD_SUCCESS; for (i = 0; i < 4; i++) { - ret = load_8051_config(ppd->dd, 0, i, config_data); + ret = load_8051_config(ppd->dd, field_id, i, config_data); if (ret != HCMD_SUCCESS) { dd_dev_err( ppd->dd, @@ -524,6 +524,7 @@ static void apply_tunings( u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0; u8 *cache = ppd->qsfp_info.cache; + /* Enable external device config if channel is limiting active */ read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS, GENERAL_CONFIG, &config_data); config_data |= limiting_active; @@ -536,6 +537,7 @@ static void apply_tunings( __func__); config_data = 0; /* re-init */ + /* Pass tuning method to 8051 */ read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG, &config_data); config_data |= tuning_method; @@ -545,47 +547,39 @@ static void apply_tunings( dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n", __func__); - external_device_config = - ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) | - ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) | - ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) | - (cache[QSFP_EQ_INFO_OFFS] & 0x4); - - config_data = 0; /* re-init */ - read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, - &config_data); - config_data |= (external_device_config << 24); - ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, GENERAL_CONFIG, - config_data); - if (ret != HCMD_SUCCESS) - dd_dev_err( - ppd->dd, - "%s: Failed to set external device config parameters\n", - __func__); - - config_data = 0; /* re-init */ - read_8051_config(ppd->dd, TX_SETTINGS, GENERAL_CONFIG, &config_data); - if ((ppd->link_speed_supported & OPA_LINK_SPEED_25G) && - (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)) - config_data |= 0x02; - if ((ppd->link_speed_supported & OPA_LINK_SPEED_12_5G) && - (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)) - config_data |= 0x01; - ret = load_8051_config(ppd->dd, TX_SETTINGS, GENERAL_CONFIG, - config_data); - if (ret != HCMD_SUCCESS) - dd_dev_err( - ppd->dd, - "%s: Failed to set external device config parameters\n", - __func__); - - config_data = (total_atten << 8) | (total_atten); - - apply_tx_lanes(ppd, config_data, "Setting channel loss"); + /* Set same channel loss for both TX and RX */ + config_data = 0 | (total_atten << 16) | (total_atten << 24); + apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data, + "Setting channel loss"); + + /* Inform 8051 of cable capabilities */ + if (ppd->qsfp_info.cache_valid) { + external_device_config = + ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) | + ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) | + ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) | + (cache[QSFP_EQ_INFO_OFFS] & 0x4); + ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, + GENERAL_CONFIG, &config_data); + /* Clear, then set the external device config field */ + config_data &= ~(0xFF << 24); + config_data |= (external_device_config << 24); + ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS, + GENERAL_CONFIG, config_data); + if (ret != HCMD_SUCCESS) + dd_dev_info(ppd->dd, + "%s: Failed set ext device config params\n", + __func__); + } - if (tx_preset_index == OPA_INVALID_INDEX) + if (tx_preset_index == OPA_INVALID_INDEX) { + if (ppd->port_type == PORT_TYPE_QSFP && limiting_active) + dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n", + __func__); return; + } + /* Following for limiting active channels only */ get_platform_config_field( ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index, TX_PRESET_TABLE_PRECUR, &tx_preset, 4); @@ -603,7 +597,8 @@ static void apply_tunings( config_data = precur | (attn << 8) | (postcur << 16); - apply_tx_lanes(ppd, config_data, "Applying TX settings"); + apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data, + "Applying TX settings"); } static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, @@ -766,7 +761,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) u32 total_atten = 0; u32 remote_atten = 0, platform_atten = 0; u32 rx_preset_index, tx_preset_index; - u8 tuning_method = 0; + u8 tuning_method = 0, limiting_active = 0; struct hfi1_devdata *dd = ppd->dd; rx_preset_index = OPA_INVALID_INDEX; @@ -789,7 +784,7 @@ void tune_serdes(struct hfi1_pportdata *ppd) PORT_TABLE_PORT_TYPE, &ppd->port_type, 4); if (ret) - goto bail; + ppd->port_type = PORT_TYPE_UNKNOWN; switch (ppd->port_type) { case PORT_TYPE_DISCONNECTED: @@ -853,6 +848,9 @@ void tune_serdes(struct hfi1_pportdata *ppd) refresh_qsfp_cache(ppd, &ppd->qsfp_info); if (ret) goto bail; + + limiting_active = + ppd->qsfp_info.limiting_active; } else { dd_dev_err(dd, "%s: Reading QSFP memory failed\n", @@ -866,13 +864,18 @@ void tune_serdes(struct hfi1_pportdata *ppd) break; default: dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__); - goto bail; + ppd->port_type = PORT_TYPE_UNKNOWN; + tuning_method = OPA_UNKNOWN_TUNING; + total_atten = 0; + limiting_active = 0; + tx_preset_index = OPA_INVALID_INDEX; + break; } + if (ppd->offline_disabled_reason == HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) apply_tunings(ppd, tx_preset_index, tuning_method, - total_atten, - ppd->qsfp_info.limiting_active); + total_atten, limiting_active); if (!ret) ppd->driver_link_ready = 1; diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index c9d1e64ef681..42e5be494fca 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -344,7 +344,6 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) ppd->qsfp_info.cache_valid = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); - dd_dev_info(ppd->dd, "%s called\n", __func__); if (!qsfp_mod_present(ppd)) { ret = -ENODEV; goto bail; -- cgit v1.2.3-59-g8ed1b From 34cee28f0bb067f4210271c4d7c4febe34bad2d3 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Tue, 9 Feb 2016 14:29:31 -0800 Subject: staging/rdma/hfi1: actually use new RNR timer API in loopback path The patch series which added a new API for the RNR timer did not include an updated call in the loopback path. RC/UC RNR loopback would be broken without this. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 2 +- drivers/staging/rdma/hfi1/ruc.c | 8 +++----- drivers/staging/rdma/hfi1/verbs.h | 1 + 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index e54e0b4bb5e5..ba2a2ccac6f2 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -81,7 +81,7 @@ static inline void hfi1_add_retry_timer(struct rvt_qp *qp) * * add an rnr timer on the QP */ -static inline void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) +void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) { struct hfi1_qp_priv *priv = qp->priv; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 66449acac76d..a7add3c5d0f2 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -371,6 +371,7 @@ static void ruc_loopback(struct rvt_qp *sqp) int release; int ret; int copy_last = 0; + u32 to; rcu_read_lock(); @@ -600,11 +601,8 @@ rnr_nak: spin_lock_irqsave(&sqp->s_lock, flags); if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) goto clr_busy; - sqp->s_flags |= RVT_S_WAIT_RNR; - sqp->s_timer.function = hfi1_rc_rnr_retry; - sqp->s_timer.expires = jiffies + - usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]); - add_timer(&sqp->s_timer); + to = ib_hfi1_rnr_table[qp->r_min_rnr_timer]; + hfi1_add_rnr_timer(sqp, to); goto clr_busy; op_err: diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 26eda8a3e55e..adb63bb6fae2 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -404,6 +404,7 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid); void hfi1_rc_rnr_retry(unsigned long arg); +void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to); void hfi1_rc_timeout(unsigned long arg); void hfi1_del_timers_sync(struct rvt_qp *qp); void hfi1_stop_rc_timers(struct rvt_qp *qp); -- cgit v1.2.3-59-g8ed1b From 773d0451685b96f157ccd617a5e9cd9d3fa7d986 Mon Sep 17 00:00:00 2001 From: Sebastian Sanchez Date: Tue, 9 Feb 2016 14:29:40 -0800 Subject: staging/rdma/hfi1: Turning off LED without checking if stepping is Ax It prevents the LED from staying on when the QSFP module is not present. Reviewed-by: Easwar Hariharan Signed-off-by: Sebastian Sanchez Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 4 ++-- drivers/staging/rdma/hfi1/pcie.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 4750e3c2db3e..129461770186 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13418,8 +13418,8 @@ static void init_chip(struct hfi1_devdata *dd) write_csr(dd, CCE_DC_CTRL, 0); /* Set the LED off */ - if (is_ax(dd)) - setextled(dd, 0); + setextled(dd, 0); + /* * Clear the QSFP reset. * An FLR enforces a 0 on all out pins. The driver does not touch diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 5642d859fc7c..04f2d8a37f36 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -1261,8 +1261,7 @@ retry: write_csr(dd, CCE_DC_CTRL, 0); /* Set the LED off */ - if (is_ax(dd)) - setextled(dd, 0); + setextled(dd, 0); /* check for any per-lane errors */ pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, ®32); -- cgit v1.2.3-59-g8ed1b From bfee5e32e701b98634b380a9eef8b5820feb7488 Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Tue, 9 Feb 2016 14:29:49 -0800 Subject: IB/rdmavt, staging/rdma/hfi1: use qps to dynamically scale timeout value A busy_jiffies variable is maintained and updated when rc qps are created and deleted. busy_jiffies is a scaled value of the number of rc qps in the device. busy_jiffies is incremented every rc qp scaling interval. busy_jiffies is added to the rc timeout in add_retry_timer and mod_retry_timer. The rc qp scaling interval is selected based on extensive performance evaluation of targeted workloads. Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 17 +++++++++++++++++ drivers/staging/rdma/hfi1/rc.c | 12 ++++++++++-- include/rdma/rdma_vt.h | 4 +++- include/rdma/rdmavt_qp.h | 2 ++ 4 files changed, 32 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 439213c37537..7dc837c6554b 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -685,6 +685,19 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, } rdi->n_qps_allocated++; + /* + * Maintain a busy_jiffies variable that will be added to the timeout + * period in mod_retry_timer and add_retry_timer. This busy jiffies + * is scaled by the number of rc qps created for the device to reduce + * the number of timeouts occurring when there is a large number of + * qps. busy_jiffies is incremented every rc qp scaling interval. + * The scaling interval is selected based on extensive performance + * evaluation of targeted workloads. + */ + if (init_attr->qp_type == IB_QPT_RC) { + rdi->n_rc_qps++; + rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; + } spin_unlock(&rdi->n_qps_lock); if (qp->ip) { @@ -1223,6 +1236,10 @@ int rvt_destroy_qp(struct ib_qp *ibqp) spin_lock(&rdi->n_qps_lock); rdi->n_qps_allocated--; + if (qp->ibqp.qp_type == IB_QPT_RC) { + rdi->n_rc_qps--; + rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; + } spin_unlock(&rdi->n_qps_lock); if (qp->ip) diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index ba2a2ccac6f2..a4a44d33d857 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -68,9 +68,13 @@ */ static inline void hfi1_add_retry_timer(struct rvt_qp *qp) { + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + qp->s_flags |= RVT_S_TIMER; /* 4.096 usec. * (1 << qp->timeout) */ - qp->s_timer.expires = jiffies + qp->timeout_jiffies; + qp->s_timer.expires = jiffies + qp->timeout_jiffies + + rdi->busy_jiffies; add_timer(&qp->s_timer); } @@ -99,9 +103,13 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) */ static inline void hfi1_mod_retry_timer(struct rvt_qp *qp) { + struct ib_qp *ibqp = &qp->ibqp; + struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); + qp->s_flags |= RVT_S_TIMER; /* 4.096 usec. * (1 << qp->timeout) */ - mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); + mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies + + rdi->busy_jiffies); } /** diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 4242fea9cf4e..5ccf683b28f1 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -318,7 +318,9 @@ struct rvt_dev_info { /* QP */ struct rvt_qp_ibdev *qp_dev; u32 n_qps_allocated; /* number of QPs allocated for device */ - spinlock_t n_qps_lock; /* keep track of number of qps */ + u32 n_rc_qps; /* number of RC QPs allocated for device */ + u32 busy_jiffies; /* timeout scaling based on RC QP count */ + spinlock_t n_qps_lock; /* protect qps, rc qps and busy jiffy counts */ /* memory maps */ struct list_head pending_mmaps; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index aed13e13591c..b3ea74579316 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -225,6 +225,8 @@ struct rvt_ack_entry { }; }; +#define RC_QP_SCALING_INTERVAL 5 + /* * Variables prefixed with s_ are for the requester (sender). * Variables prefixed with r_ are for the responder (receiver). -- cgit v1.2.3-59-g8ed1b From 46a80d62e6e0ccfc9d8a05c5b773405b84a4afd7 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:10:04 -0800 Subject: IB/qib, staging/rdma/hfi1: add s_hlock for use in post send This patch adds an additional lock to reduce contention on the s_lock. This lock is used in post_send() so that the post_send is not serialized with the send engine and other send related processing. To do this the s_next_psn is now maintained on post_send() while post_send() related fields are moved to a new cache line. There is an s_avail maintained for the post_send() to mitigate trading cache lines with the send engine. The lock is released/acquired around releasing the just built packet to the egress mechanism. Reviewed-by: Jubin John Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Harish Chegondi Signed-off-by: Mike Marciniszyn Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_qp.c | 36 +++++++++++++ drivers/infiniband/hw/qib/qib_rc.c | 44 ++++------------ drivers/infiniband/hw/qib/qib_ruc.c | 11 ++-- drivers/infiniband/hw/qib/qib_uc.c | 22 ++++---- drivers/infiniband/hw/qib/qib_ud.c | 22 ++++---- drivers/infiniband/hw/qib/qib_verbs.c | 37 +++++++++---- drivers/infiniband/hw/qib/qib_verbs.h | 6 +-- drivers/infiniband/sw/rdmavt/qp.c | 97 +++++++++++++++++++++++++++-------- drivers/staging/rdma/hfi1/qp.c | 79 +++++++++++++++++++++++++--- drivers/staging/rdma/hfi1/qp.h | 37 +------------ drivers/staging/rdma/hfi1/rc.c | 44 ++++------------ drivers/staging/rdma/hfi1/ruc.c | 40 ++++++++------- drivers/staging/rdma/hfi1/uc.c | 21 ++++---- drivers/staging/rdma/hfi1/ud.c | 22 ++++---- drivers/staging/rdma/hfi1/verbs.c | 3 +- drivers/staging/rdma/hfi1/verbs.h | 2 +- include/rdma/rdma_vt.h | 4 +- include/rdma/rdmavt_qp.h | 13 +++-- 18 files changed, 319 insertions(+), 221 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 01d49dc91de2..6ffa0221da9f 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -474,6 +474,42 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth) } } +/** + * qib_check_send_wqe - validate wr/wqe + * @qp - The qp + * @wqe - The built wqe + * + * validate wr/wqe. This is called + * prior to inserting the wqe into + * the ring but after the wqe has been + * setup. + * + * Returns 0 on success, -EINVAL on failure + */ +int qib_check_send_wqe(struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + struct rvt_ah *ah; + + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + if (wqe->length > 0x80000000U) + return -EINVAL; + break; + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_UD: + ah = ibah_to_rvtah(wqe->ud_wr.ah); + if (wqe->length > (1 << ah->log_pmtu)) + return -EINVAL; + break; + default: + break; + } + return 0; +} + #ifdef CONFIG_DEBUG_FS struct qib_qp_iter { diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index ce886b2ade74..9088e26d3ac8 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -226,6 +226,8 @@ bail: * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int qib_make_rc_req(struct rvt_qp *qp) @@ -241,7 +243,6 @@ int qib_make_rc_req(struct rvt_qp *qp) u32 bth2; u32 pmtu = qp->pmtu; char newreq; - unsigned long flags; int ret = 0; int delta; @@ -249,12 +250,6 @@ int qib_make_rc_req(struct rvt_qp *qp) if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ohdr = &priv->s_hdr->u.l.oth; - /* - * The lock is needed to synchronize between the sending tasklet, - * the receive interrupt handler, and timeout resends. - */ - spin_lock_irqsave(&qp->s_lock, flags); - /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & RVT_S_RESP_PENDING) && qib_make_rc_ack(dev, qp, ohdr, pmtu)) @@ -264,7 +259,8 @@ int qib_make_rc_req(struct rvt_qp *qp) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -321,8 +317,8 @@ int qib_make_rc_req(struct rvt_qp *qp) qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } - wqe->psn = qp->s_next_psn; newreq = 1; + qp->s_psn = wqe->psn; } /* * Note that we have to be careful not to modify the @@ -341,9 +337,7 @@ int qib_make_rc_req(struct rvt_qp *qp) qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(SEND_FIRST); len = pmtu; break; @@ -381,9 +375,7 @@ int qib_make_rc_req(struct rvt_qp *qp) cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; @@ -418,13 +410,6 @@ int qib_make_rc_req(struct rvt_qp *qp) qp->s_num_rd_atomic++; if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - /* - * Adjust s_next_psn to count the - * expected number of responses. - */ - if (len > pmtu) - qp->s_next_psn += (len - 1) / pmtu; - wqe->lpsn = qp->s_next_psn++; } ohdr->u.rc.reth.vaddr = @@ -456,7 +441,6 @@ int qib_make_rc_req(struct rvt_qp *qp) qp->s_num_rd_atomic++; if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - wqe->lpsn = wqe->psn; } if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qp->s_state = OP(COMPARE_SWAP); @@ -499,11 +483,8 @@ int qib_make_rc_req(struct rvt_qp *qp) } if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_psn = wqe->lpsn + 1; - else { + else qp->s_psn++; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; - } break; case OP(RDMA_READ_RESPONSE_FIRST): @@ -523,8 +504,6 @@ int qib_make_rc_req(struct rvt_qp *qp) /* FALLTHROUGH */ case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -564,8 +543,6 @@ int qib_make_rc_req(struct rvt_qp *qp) /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -630,13 +607,9 @@ int qib_make_rc_req(struct rvt_qp *qp) qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); done: - ret = 1; - goto unlock; - + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } @@ -1454,7 +1427,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, goto ack_done; /* Ignore invalid responses. */ - if (qib_cmp24(psn, qp->s_next_psn) >= 0) + smp_read_barrier_depends(); /* see post_one_send */ + if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 2623684745f0..a5f07a64b228 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -391,7 +391,8 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) sqp->s_flags |= RVT_S_BUSY; again: - if (sqp->s_last == sqp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); @@ -765,22 +766,24 @@ void qib_do_send(struct rvt_qp *qp) qp->s_flags |= RVT_S_BUSY; - spin_unlock_irqrestore(&qp->s_lock, flags); - do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { + spin_unlock_irqrestore(&qp->s_lock, flags); /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) - break; + return; /* Record that s_hdr is empty. */ qp->s_hdrwords = 0; + spin_lock_irqsave(&qp->s_lock, flags); } } while (make_req(qp)); + + spin_unlock_irqrestore(&qp->s_lock, flags); } /* diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 1b2fc69855b2..7bdbc79ceaa3 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -41,6 +41,8 @@ * qib_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int qib_make_uc_req(struct rvt_qp *qp) @@ -48,20 +50,18 @@ int qib_make_uc_req(struct rvt_qp *qp) struct qib_qp_priv *priv = qp->priv; struct qib_other_headers *ohdr; struct rvt_swqe *wqe; - unsigned long flags; u32 hwords; u32 bth0; u32 len; u32 pmtu = qp->pmtu; int ret = 0; - spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -90,13 +90,13 @@ int qib_make_uc_req(struct rvt_qp *qp) RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ - if (qp->s_cur == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) goto bail; /* * Start a new request. */ - wqe->psn = qp->s_next_psn; - qp->s_psn = qp->s_next_psn; + qp->s_psn = wqe->psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; @@ -215,15 +215,11 @@ int qib_make_uc_req(struct rvt_qp *qp) qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), - qp->s_next_psn++ & QIB_PSN_MASK); + qp->s_psn++ & QIB_PSN_MASK); done: - ret = 1; - goto unlock; - + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index fe4917272b89..d9502137de62 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -234,6 +234,8 @@ drop: * qib_make_ud_req - construct a UD request packet * @qp: the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int qib_make_ud_req(struct rvt_qp *qp) @@ -244,7 +246,6 @@ int qib_make_ud_req(struct rvt_qp *qp) struct qib_pportdata *ppd; struct qib_ibport *ibp; struct rvt_swqe *wqe; - unsigned long flags; u32 nwords; u32 extra_bytes; u32 bth0; @@ -253,13 +254,12 @@ int qib_make_ud_req(struct rvt_qp *qp) int ret = 0; int next_cur; - spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -271,7 +271,9 @@ int qib_make_ud_req(struct rvt_qp *qp) goto done; } - if (qp->s_cur == qp->s_head) + /* see post_one_send() */ + smp_read_barrier_depends(); + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); @@ -292,6 +294,7 @@ int qib_make_ud_req(struct rvt_qp *qp) this_cpu_inc(ibp->pmastats->n_unicast_xmit); lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); if (unlikely(lid == ppd->lid)) { + unsigned long flags; /* * If DMAs are in progress, we can't generate * a completion for the loopback packet since @@ -304,6 +307,7 @@ int qib_make_ud_req(struct rvt_qp *qp) goto bail; } qp->s_cur = next_cur; + local_irq_save(flags); spin_unlock_irqrestore(&qp->s_lock, flags); qib_ud_loopback(qp, wqe); spin_lock_irqsave(&qp->s_lock, flags); @@ -378,7 +382,7 @@ int qib_make_ud_req(struct rvt_qp *qp) ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ? cpu_to_be32(QIB_MULTICAST_QPN) : cpu_to_be32(wqe->ud_wr.remote_qpn); - ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); + ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK); /* * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). @@ -388,13 +392,9 @@ int qib_make_ud_req(struct rvt_qp *qp) ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); done: - ret = 1; - goto unlock; - + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index fa94f78073cf..5cf019fb50d9 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1662,6 +1662,7 @@ int qib_register_ib_device(struct qib_devdata *dd) dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name; dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; + dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe; dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn; dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc; @@ -1677,6 +1678,7 @@ int qib_register_ib_device(struct qib_devdata *dd) dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu; dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr; + dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send; dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port; dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port; dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg; @@ -1778,17 +1780,34 @@ void qib_unregister_ib_device(struct qib_devdata *dd) dev->pio_hdrs, dev->pio_hdrs_phys); } -/* - * This must be called with s_lock held. +/** + * _qib_schedule_send - schedule progress + * @qp - the qp + * + * This schedules progress w/o regard to the s_flags. + * + * It is only used in post send, which doesn't hold + * the s_lock. */ -void qib_schedule_send(struct rvt_qp *qp) +void _qib_schedule_send(struct rvt_qp *qp) { + struct qib_ibport *ibp = + to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_qp_priv *priv = qp->priv; - if (qib_send_ok(qp)) { - struct qib_ibport *ibp = - to_iport(qp->ibqp.device, qp->port_num); - struct qib_pportdata *ppd = ppd_from_ibp(ibp); - queue_work(ppd->qib_wq, &priv->s_work); - } + queue_work(ppd->qib_wq, &priv->s_work); +} + +/** + * qib_schedule_send - schedule progress + * @qp - the qp + * + * This schedules qp progress. The s_lock + * should be held. + */ +void qib_schedule_send(struct rvt_qp *qp) +{ + if (qib_send_ok(qp)) + _qib_schedule_send(qp); } diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index b88e027b6cb0..d137d714935d 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -298,9 +298,7 @@ static inline int qib_send_ok(struct rvt_qp *qp) !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); } -/* - * This must be called with s_lock held. - */ +void _qib_schedule_send(struct rvt_qp *qp); void qib_schedule_send(struct rvt_qp *qp); static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) @@ -392,6 +390,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); +int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); + struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid); void qib_rc_rnr_retry(unsigned long arg); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 7dc837c6554b..522404ac7c38 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -401,6 +401,7 @@ void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, rdi->driver_f.flush_qp_waiters(qp); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT); spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); /* Stop the send queue and the retry timer */ @@ -415,6 +416,7 @@ void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, /* grab the lock b/c it was locked at call time */ spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); rvt_clear_mr_refs(qp, 1); @@ -610,6 +612,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, * except for qp->ibqp.qp_num. */ spin_lock_init(&qp->r_lock); + spin_lock_init(&qp->s_hlock); spin_lock_init(&qp->s_lock); spin_lock_init(&qp->r_rq.lock); atomic_set(&qp->refcount, 0); @@ -620,6 +623,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; + qp->s_avail = init_attr->cap.max_send_wr; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) qp->s_flags = RVT_S_SIGNAL_REQ_WR; @@ -779,6 +783,7 @@ void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) wqe->ud_wr.ah)->refcount); if (++qp->s_last >= qp->s_size) qp->s_last = 0; + smp_wmb(); /* see qp_set_savail */ } if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); @@ -833,7 +838,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) rdi->driver_f.notify_error_qp(qp); /* Schedule the sending tasklet to drain the send work queue. */ - if (qp->s_last != qp->s_head) + if (ACCESS_ONCE(qp->s_last) != qp->s_head) rdi->driver_f.schedule_send(qp); rvt_clear_mr_refs(qp, 0); @@ -979,6 +984,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, link = rdma_port_get_link_layer(ibqp->device, qp->port_num); spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? @@ -1151,6 +1157,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_PATH_MTU) { qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu); qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu); + qp->log_pmtu = ilog2(qp->pmtu); } if (attr_mask & IB_QP_RETRY_CNT) { @@ -1186,6 +1193,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, rdi->driver_f.modify_qp(qp, attr, attr_mask, udata); spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) @@ -1207,6 +1215,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, inval: spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); return -EINVAL; } @@ -1226,9 +1235,11 @@ int rvt_destroy_qp(struct ib_qp *ibqp) struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); rvt_reset_qp(rdi, qp, ibqp->qp_type); spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); /* qpn is now available for use again */ @@ -1357,6 +1368,28 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, return 0; } +/** + * qp_get_savail - return number of avail send entries + * + * @qp - the qp + * + * This assumes the s_hlock is held but the s_last + * qp variable is uncontrolled. + */ +static inline u32 qp_get_savail(struct rvt_qp *qp) +{ + u32 slast; + u32 ret; + + smp_read_barrier_depends(); /* see rc.c */ + slast = ACCESS_ONCE(qp->s_last); + if (qp->s_head >= slast) + ret = qp->s_size - (qp->s_head - slast); + else + ret = slast - qp->s_head; + return ret - 1; +} + /** * rvt_post_one_wr - post one RC, UC, or UD send work request * @qp: the QP to post on @@ -1372,6 +1405,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) struct rvt_lkey_table *rkt; struct rvt_pd *pd; struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); + u8 log_pmtu; + int ret; /* IB spec says that num_sge == 0 is OK. */ if (unlikely(wr->num_sge > qp->s_max_sge)) @@ -1403,16 +1438,16 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { return -EINVAL; } - + /* check for avail */ + if (unlikely(!qp->s_avail)) { + qp->s_avail = qp_get_savail(qp); + WARN_ON(qp->s_avail > (qp->s_size - 1)); + if (!qp->s_avail) + return -ENOMEM; + } next = qp->s_head + 1; if (next >= qp->s_size) next = 0; - if (next == qp->s_last) - return -ENOMEM; - - if (rdi->driver_f.check_send_wr && - rdi->driver_f.check_send_wr(qp, wr)) - return -EINVAL; rkt = &rdi->lkey_table; pd = ibpd_to_rvtpd(qp->ibqp.pd); @@ -1444,21 +1479,39 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) continue; ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], &wr->sg_list[i], acc); - if (!ok) + if (!ok) { + ret = -EINVAL; goto bail_inval_free; + } wqe->length += length; j++; } wqe->wr.num_sge = j; } - if (qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_RC) { - if (wqe->length > 0x80000000U) + + /* general part of wqe valid - allow for driver checks */ + if (rdi->driver_f.check_send_wqe) { + ret = rdi->driver_f.check_send_wqe(qp, wqe); + if (ret) goto bail_inval_free; - } else { + } + + log_pmtu = qp->log_pmtu; + if (qp->ibqp.qp_type != IB_QPT_UC && + qp->ibqp.qp_type != IB_QPT_RC) { + struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah); + + log_pmtu = ah->log_pmtu; atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount); } + wqe->ssn = qp->s_ssn++; + wqe->psn = qp->s_next_psn; + wqe->lpsn = wqe->psn + + (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0); + qp->s_next_psn = wqe->lpsn + 1; + smp_wmb(); /* see request builders */ + qp->s_avail--; qp->s_head = next; return 0; @@ -1470,7 +1523,7 @@ bail_inval_free: rvt_put_mr(sge->mr); } - return -EINVAL; + return ret; } /** @@ -1491,14 +1544,14 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, unsigned nreq = 0; int err = 0; - spin_lock_irqsave(&qp->s_lock, flags); + spin_lock_irqsave(&qp->s_hlock, flags); /* * Ensure QP state is such that we can send. If not bail out early, * there is no need to do this every time we post a send. */ if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) { - spin_unlock_irqrestore(&qp->s_lock, flags); + spin_unlock_irqrestore(&qp->s_hlock, flags); return -EINVAL; } @@ -1518,11 +1571,13 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, nreq++; } bail: - if (nreq && !call_send) - rdi->driver_f.schedule_send(qp); - spin_unlock_irqrestore(&qp->s_lock, flags); - if (nreq && call_send) - rdi->driver_f.do_send(qp); + spin_unlock_irqrestore(&qp->s_hlock, flags); + if (nreq) { + if (call_send) + rdi->driver_f.schedule_send_no_lock(qp); + else + rdi->driver_f.do_send(qp); + } return err; } diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index ec9ee726267b..00866c07fddc 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -226,16 +226,45 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, } } -int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr) +/** + * hfi1_check_send_wqe - validate wqe + * @qp - The qp + * @wqe - The built wqe + * + * validate wqe. This is called + * prior to inserting the wqe into + * the ring but after the wqe has been + * setup. + * + * Returns 0 on success, -EINVAL on failure + * + */ +int hfi1_check_send_wqe(struct rvt_qp *qp, + struct rvt_swqe *wqe) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - struct rvt_ah *ah = ibah_to_rvtah(ud_wr(wr)->ah); + struct rvt_ah *ah; - if (qp->ibqp.qp_type != IB_QPT_RC && - qp->ibqp.qp_type != IB_QPT_UC && - qp->ibqp.qp_type != IB_QPT_SMI && - ibp->sl_to_sc[ah->attr.sl] == 0xf) { - return -EINVAL; + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + if (wqe->length > 0x80000000U) + return -EINVAL; + break; + case IB_QPT_SMI: + ah = ibah_to_rvtah(wqe->ud_wr.ah); + if (wqe->length > (1 << ah->log_pmtu)) + return -EINVAL; + break; + case IB_QPT_GSI: + case IB_QPT_UD: + ah = ibah_to_rvtah(wqe->ud_wr.ah); + if (wqe->length > (1 << ah->log_pmtu)) + return -EINVAL; + if (ibp->sl_to_sc[ah->attr.sl] == 0xf) + return -EINVAL; + default: + break; } return 0; } @@ -301,6 +330,42 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) return cpu_to_be32(aeth); } +/** + * _hfi1_schedule_send - schedule progress + * @qp: the QP + * + * This schedules qp progress w/o regard to the s_flags. + * + * It is only used in the post send, which doesn't hold + * the s_lock. + */ +void _hfi1_schedule_send(struct rvt_qp *qp) +{ + struct hfi1_qp_priv *priv = qp->priv; + struct hfi1_ibport *ibp = + to_iport(qp->ibqp.device, qp->port_num); + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + + iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, + priv->s_sde ? + priv->s_sde->cpu : + cpumask_first(cpumask_of_node(dd->node))); +} + +/** + * hfi1_schedule_send - schedule progress + * @qp: the QP + * + * This schedules qp progress and caller should hold + * the s_lock. + */ +void hfi1_schedule_send(struct rvt_qp *qp) +{ + if (hfi1_send_ok(qp)) + _hfi1_schedule_send(qp); +} + /** * hfi1_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 973c14b5268a..98827b5dd2a1 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -137,41 +137,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter); */ void qp_comm_est(struct rvt_qp *qp); -/** - * _hfi1_schedule_send - schedule progress - * @qp: the QP - * - * This schedules qp progress w/o regard to the s_flags. - * - * It is only used in the post send, which doesn't hold - * the s_lock. - */ -static inline void _hfi1_schedule_send(struct rvt_qp *qp) -{ - struct hfi1_qp_priv *priv = qp->priv; - struct hfi1_ibport *ibp = - to_iport(qp->ibqp.device, qp->port_num); - struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - - iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, - priv->s_sde ? - priv->s_sde->cpu : - cpumask_first(cpumask_of_node(dd->node))); -} - -/** - * hfi1_schedule_send - schedule progress - * @qp: the QP - * - * This schedules qp progress and caller should hold - * the s_lock. - */ -static inline void hfi1_schedule_send(struct rvt_qp *qp) -{ - if (hfi1_send_ok(qp)) - _hfi1_schedule_send(qp); -} +void _hfi1_schedule_send(struct rvt_qp *qp); +void hfi1_schedule_send(struct rvt_qp *qp); void hfi1_migrate_qp(struct rvt_qp *qp); diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index a4a44d33d857..a62c9424fa86 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -367,6 +367,8 @@ bail: * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * + * Assumes s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_rc_req(struct rvt_qp *qp) @@ -383,7 +385,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) u32 bth2; u32 pmtu = qp->pmtu; char newreq; - unsigned long flags; int ret = 0; int middle = 0; int delta; @@ -392,12 +393,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ohdr = &priv->s_hdr->ibh.u.l.oth; - /* - * The lock is needed to synchronize between the sending tasklet, - * the receive interrupt handler, and timeout re-sends. - */ - spin_lock_irqsave(&qp->s_lock, flags); - /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & RVT_S_RESP_PENDING) && make_rc_ack(dev, qp, ohdr, pmtu)) @@ -407,7 +402,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { @@ -463,8 +459,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp) qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } - wqe->psn = qp->s_next_psn; newreq = 1; + qp->s_psn = wqe->psn; } /* * Note that we have to be careful not to modify the @@ -483,9 +479,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(SEND_FIRST); len = pmtu; break; @@ -522,9 +516,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; @@ -559,13 +551,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) qp->s_num_rd_atomic++; if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - /* - * Adjust s_next_psn to count the - * expected number of responses. - */ - if (len > pmtu) - qp->s_next_psn += (len - 1) / pmtu; - wqe->lpsn = qp->s_next_psn++; } ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr); @@ -596,7 +581,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) qp->s_num_rd_atomic++; if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - wqe->lpsn = wqe->psn; } if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qp->s_state = OP(COMPARE_SWAP); @@ -639,11 +623,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp) } if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_psn = wqe->lpsn + 1; - else { + else qp->s_psn++; - if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; - } break; case OP(RDMA_READ_RESPONSE_FIRST): @@ -663,8 +644,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) /* FALLTHROUGH */ case OP(SEND_MIDDLE): bth2 = mask_psn(qp->s_psn++); - if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -705,8 +684,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp) /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): bth2 = mask_psn(qp->s_psn++); - if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -777,13 +754,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp) bth2, middle); done: - ret = 1; - goto unlock; - + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } @@ -1563,7 +1536,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, trace_hfi1_rc_ack(qp, psn); /* Ignore invalid responses. */ - if (cmp_psn(psn, qp->s_next_psn) >= 0) + smp_read_barrier_depends(); /* see post_one_send */ + if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index a7add3c5d0f2..6114550bb73f 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -392,7 +392,8 @@ static void ruc_loopback(struct rvt_qp *sqp) sqp->s_flags |= RVT_S_BUSY; again: - if (sqp->s_last == sqp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); @@ -871,40 +872,43 @@ void hfi1_do_send(struct rvt_qp *qp) qp->s_flags |= RVT_S_BUSY; - spin_unlock_irqrestore(&qp->s_lock, flags); - timeout = jiffies + (timeout_int) / 8; cpu = priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(ps.ppd->dd->node)); do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { + spin_unlock_irqrestore(&qp->s_lock, flags); /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ if (hfi1_verbs_send(qp, &ps)) - break; + return; /* Record that s_hdr is empty. */ qp->s_hdrwords = 0; - } - - /* allow other tasks to run */ - if (unlikely(time_after(jiffies, timeout))) { - if (workqueue_congested(cpu, ps.ppd->hfi1_wq)) { - spin_lock_irqsave(&qp->s_lock, flags); - qp->s_flags &= ~RVT_S_BUSY; - hfi1_schedule_send(qp); - spin_unlock_irqrestore(&qp->s_lock, - flags); + /* allow other tasks to run */ + if (unlikely(time_after(jiffies, timeout))) { + if (workqueue_congested(cpu, + ps.ppd->hfi1_wq)) { + spin_lock_irqsave(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; + hfi1_schedule_send(qp); + spin_unlock_irqrestore(&qp->s_lock, + flags); + this_cpu_inc( + *ps.ppd->dd->send_schedule); + return; + } + cond_resched(); this_cpu_inc(*ps.ppd->dd->send_schedule); - return; + timeout = jiffies + (timeout_int) / 8; } - cond_resched(); - this_cpu_inc(*ps.ppd->dd->send_schedule); - timeout = jiffies + (timeout_int) / 8; + spin_lock_irqsave(&qp->s_lock, flags); } } while (make_req(qp)); + + spin_unlock_irqrestore(&qp->s_lock, flags); } /* diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 0aa604b7557b..f884b5c8051b 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -59,6 +59,8 @@ * hfi1_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * + * Assume s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_uc_req(struct rvt_qp *qp) @@ -66,7 +68,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp) struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; struct rvt_swqe *wqe; - unsigned long flags; u32 hwords = 5; u32 bth0 = 0; u32 len; @@ -74,13 +75,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp) int ret = 0; int middle = 0; - spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { @@ -106,15 +106,15 @@ int hfi1_make_uc_req(struct rvt_qp *qp) RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ - if (qp->s_cur == qp->s_head) { + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } /* * Start a new request. */ - wqe->psn = qp->s_next_psn; - qp->s_psn = qp->s_next_psn; + qp->s_psn = wqe->psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; @@ -235,15 +235,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp) qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), - mask_psn(qp->s_next_psn++), middle); + mask_psn(qp->s_psn++), middle); done: - ret = 1; - goto unlock; + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index fdf6e3bee8f1..ba78e2e3e0bb 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -261,6 +261,8 @@ drop: * hfi1_make_ud_req - construct a UD request packet * @qp: the QP * + * Assume s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ int hfi1_make_ud_req(struct rvt_qp *qp) @@ -271,7 +273,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp) struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; struct rvt_swqe *wqe; - unsigned long flags; u32 nwords; u32 extra_bytes; u32 bth0; @@ -281,13 +282,12 @@ int hfi1_make_ud_req(struct rvt_qp *qp) int next_cur; u8 sc5; - spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_iowait.sdma_busy)) { @@ -299,7 +299,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp) goto done; } - if (qp->s_cur == qp->s_head) + /* see post_one_send() */ + smp_read_barrier_depends(); + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); @@ -317,6 +319,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) if (unlikely(!loopback && (lid == ppd->lid || (lid == be16_to_cpu(IB_LID_PERMISSIVE) && qp->ibqp.qp_type == IB_QPT_GSI)))) { + unsigned long flags; /* * If DMAs are in progress, we can't generate * a completion for the loopback packet since @@ -329,6 +332,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) goto bail; } qp->s_cur = next_cur; + local_irq_save(flags); spin_unlock_irqrestore(&qp->s_lock, flags); ud_loopback(qp, wqe); spin_lock_irqsave(&qp->s_lock, flags); @@ -408,7 +412,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn); - ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++)); + ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); /* * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). @@ -423,13 +427,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp) priv->s_hdr->sde = NULL; done: - ret = 1; - goto unlock; - + return 1; bail: qp->s_flags &= ~RVT_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 35f6d92a6249..1df464815247 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1533,6 +1533,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; + dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp; dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters; @@ -1543,7 +1544,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu; dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp; dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp; - dd->verbs_dev.rdi.driver_f.check_send_wr = hfi1_check_send_wr; + dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe; /* completeion queue */ snprintf(dd->verbs_dev.rdi.dparms.cq_name, diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index adb63bb6fae2..d00c55d06c8c 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -427,7 +427,7 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); -int hfi1_check_send_wr(struct rvt_qp *qp, struct ib_send_wr *wr); +int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0); diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 5ccf683b28f1..aabd2e5bc5d7 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -220,6 +220,7 @@ struct rvt_ah { }; struct rvt_dev_info; +struct rvt_swqe; struct rvt_driver_provided { /* * The work to create port files in /sys/class Infiniband is different @@ -240,6 +241,7 @@ struct rvt_driver_provided { void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp); void (*notify_qp_reset)(struct rvt_qp *qp); void (*schedule_send)(struct rvt_qp *qp); + void (*schedule_send_no_lock)(struct rvt_qp *qp); void (*do_send)(struct rvt_qp *qp); int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr); @@ -273,7 +275,7 @@ struct rvt_driver_provided { void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); - int (*check_send_wr)(struct rvt_qp *qp, struct ib_send_wr *wr); + int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe); void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx); void (*notify_free_mad_agent)(struct rvt_dev_info *rdi, int port_idx); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index b3ea74579316..1066b5d1b4d2 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -250,11 +250,12 @@ struct rvt_qp { enum ib_mtu path_mtu; int srate_mbps; /* s_srate (below) converted to Mbit/s */ u32 remote_qpn; - u32 pmtu; /* decoded from path_mtu */ u32 qkey; /* QKEY for this QP (for UD or RD) */ u32 s_size; /* send work queue size */ u32 s_ahgpsn; /* set to the psn in the copy of the header */ + u16 pmtu; /* decoded from path_mtu */ + u8 log_pmtu; /* shift for pmtu */ u8 state; /* QP state */ u8 allowed_ops; /* high order bits of allowed opcodes */ u8 qp_access_flags; @@ -299,6 +300,13 @@ struct rvt_qp { struct rvt_sge_state r_sge; /* current receive data */ struct rvt_rq r_rq; /* receive work queue */ + /* post send line */ + spinlock_t s_hlock ____cacheline_aligned_in_smp; + u32 s_head; /* new entries added here */ + u32 s_next_psn; /* PSN for next request */ + u32 s_avail; /* number of entries avail */ + u32 s_ssn; /* SSN of tail entry */ + spinlock_t s_lock ____cacheline_aligned_in_smp; struct rvt_sge_state *s_cur_sge; u32 s_flags; @@ -308,19 +316,16 @@ struct rvt_qp { u32 s_cur_size; /* size of send packet in bytes */ u32 s_len; /* total length of s_sge */ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ - u32 s_next_psn; /* PSN for next request */ u32 s_last_psn; /* last response PSN processed */ u32 s_sending_psn; /* lowest PSN that is being sent */ u32 s_sending_hpsn; /* highest PSN that is being sent */ u32 s_psn; /* current packet sequence number */ u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ - u32 s_head; /* new entries added here */ u32 s_tail; /* next entry to process */ u32 s_cur; /* current work queue entry */ u32 s_acked; /* last un-ACK'ed entry */ u32 s_last; /* last completed entry */ - u32 s_ssn; /* SSN of tail entry */ u32 s_lsn; /* limit sequence number (credit) */ u16 s_hdrwords; /* size of s_hdr in 32 bit words */ u16 s_rdma_ack_cnt; -- cgit v1.2.3-59-g8ed1b From 0ec79e875ada58bd6598d8965f9079fe1a662950 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Sun, 14 Feb 2016 12:10:20 -0800 Subject: staging/rdma/hfi1: Put QPs into error state after SL->SC table changes If an SL->SC mapping table change occurs after an RC/UC QP is created, there is no mechanism to change the SC nor the VL for that QP. The fix is to place the QP into error state so that ULP can recreate the QP with the new SL->SC mapping. Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 13 +++++++++-- drivers/staging/rdma/hfi1/qp.c | 52 +++++++++++++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/qp.h | 1 + 3 files changed, 64 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 2fcc9f3290d7..d9efe223328b 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -55,6 +55,7 @@ #include "hfi.h" #include "mad.h" #include "trace.h" +#include "qp.h" /* the reset value from the FM is supposed to be 0xffff, handle both */ #define OPA_LINK_WIDTH_RESET_OLD 0x0fff @@ -1517,14 +1518,22 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data, struct hfi1_ibport *ibp = to_iport(ibdev, port); u8 *p = data; int i; + u8 sc; if (am) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } - for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) - ibp->sl_to_sc[i] = *p++; + for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) { + sc = *p++; + if (ibp->sl_to_sc[i] != sc) { + ibp->sl_to_sc[i] = sc; + + /* Put all stale qps into error state */ + hfi1_error_port_qps(ibp, i); + } + } return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len); } diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 00866c07fddc..9e0531434eff 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -840,3 +840,55 @@ void notify_error_qp(struct rvt_qp *qp) } } +/** + * hfi1_error_port_qps - put a port's RC/UC qps into error state + * @ibp: the ibport. + * @sl: the service level. + * + * This function places all RC/UC qps with a given service level into error + * state. It is generally called to force upper lay apps to abandon stale qps + * after an sl->sc mapping change. + */ +void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) +{ + struct rvt_qp *qp = NULL; + struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); + struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; + int n; + int lastwqe; + struct ib_event ev; + + rcu_read_lock(); + + /* Deal only with RC/UC qps that use the given SL. */ + for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { + for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; + qp = rcu_dereference(qp->next)) { + if (qp->port_num == ppd->port && + (qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_RC) && + qp->remote_ah_attr.sl == sl && + (ib_rvt_state_ops[qp->state] & + RVT_POST_SEND_OK)) { + spin_lock_irq(&qp->r_lock); + spin_lock(&qp->s_hlock); + spin_lock(&qp->s_lock); + lastwqe = rvt_error_qp(qp, + IB_WC_WR_FLUSH_ERR); + spin_unlock(&qp->s_lock); + spin_unlock(&qp->s_hlock); + spin_unlock_irq(&qp->r_lock); + if (lastwqe) { + ev.device = qp->ibqp.device; + ev.element.qp = &qp->ibqp; + ev.event = + IB_EVENT_QP_LAST_WQE_REACHED; + qp->ibqp.event_handler(&ev, + qp->ibqp.qp_context); + } + } + } + } + + rcu_read_unlock(); +} diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 98827b5dd2a1..afc2b4d242b7 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -158,4 +158,5 @@ void stop_send_queue(struct rvt_qp *qp); void quiesce_qp(struct rvt_qp *qp); u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu); int mtu_to_path_mtu(u32 mtu); +void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl); #endif /* _QP_H */ -- cgit v1.2.3-59-g8ed1b From 3585254d56b0c474a50f911295710e786b33d9ca Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:44:17 -0800 Subject: staging/rdma/hfi1: add s_avail to qp_stats This diagnostic capability was missed in the dual lock series. Signed-off-by: Vennila Megavannan Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 9e0531434eff..9846cd69d4f9 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -612,7 +612,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); seq_printf(s, - "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n", + "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -632,6 +632,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->s_sending_psn, qp->s_sending_hpsn, qp->s_last, qp->s_acked, qp->s_cur, qp->s_tail, qp->s_head, qp->s_size, + qp->s_avail, qp->remote_qpn, qp->remote_ah_attr.dlid, qp->remote_ah_attr.sl, -- cgit v1.2.3-59-g8ed1b From 45842abbb292338d7d328c40bae411218242d2cd Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:44:34 -0800 Subject: staging/rdma/hfi1: move txreq header code The patch separates the txreq defines into new files, one for verbs and one for sdma. The verbs_txreq implementation handles the setup and teardown of the txreq cache, so the register routine is changed to call the new init/exit routines. This patch allows for followup patches enhance the send engine. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 3 +- drivers/staging/rdma/hfi1/qp.c | 2 +- drivers/staging/rdma/hfi1/ruc.c | 2 +- drivers/staging/rdma/hfi1/sdma.h | 80 +---------------- drivers/staging/rdma/hfi1/sdma_txreq.h | 130 ++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/verbs.c | 111 ++---------------------- drivers/staging/rdma/hfi1/verbs_txreq.c | 149 ++++++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/verbs_txreq.h | 95 ++++++++++++++++++++ 8 files changed, 384 insertions(+), 188 deletions(-) create mode 100644 drivers/staging/rdma/hfi1/sdma_txreq.h create mode 100644 drivers/staging/rdma/hfi1/verbs_txreq.c create mode 100644 drivers/staging/rdma/hfi1/verbs_txreq.h (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 6681b74b5cf3..9b117062d52e 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -11,7 +11,8 @@ hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ eprom.o file_ops.o firmware.o \ init.o intr.o mad.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ - uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o + uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \ + verbs_txreq.o hfi1-$(CONFIG_DEBUG_FS) += debugfs.o CFLAGS_trace.o = -I$(src) diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 9846cd69d4f9..7387ef5cd069 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -60,7 +60,7 @@ #include "hfi.h" #include "qp.h" #include "trace.h" -#include "sdma.h" +#include "verbs_txreq.h" unsigned int hfi1_qp_table_size = 256; module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 6114550bb73f..7c6feffe65cc 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -53,7 +53,7 @@ #include "hfi.h" #include "mad.h" #include "qp.h" -#include "sdma.h" +#include "verbs_txreq.h" /* * Convert the AETH RNR timeout code into the number of microseconds. diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 1d52d6e21bd0..76ed2157c514 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -58,9 +58,8 @@ #include "hfi.h" #include "verbs.h" +#include "sdma_txreq.h" -/* increased for AHG */ -#define NUM_DESC 6 /* Hardware limit */ #define MAX_DESC 64 /* Hardware limit for SDMA packet size */ @@ -311,83 +310,6 @@ struct hw_sdma_desc { __le64 qw[2]; }; -/* - * struct sdma_desc - canonical fragment descriptor - * - * This is the descriptor carried in the tx request - * corresponding to each fragment. - * - */ -struct sdma_desc { - /* private: don't use directly */ - u64 qw[2]; -}; - -struct sdma_txreq; -typedef void (*callback_t)(struct sdma_txreq *, int, int); - -/** - * struct sdma_txreq - the sdma_txreq structure (one per packet) - * @list: for use by user and by queuing for wait - * - * This is the representation of a packet which consists of some - * number of fragments. Storage is provided to within the structure. - * for all fragments. - * - * The storage for the descriptors are automatically extended as needed - * when the currently allocation is exceeded. - * - * The user (Verbs or PSM) may overload this structure with fields - * specific to their use by putting this struct first in their struct. - * The method of allocation of the overloaded structure is user dependent - * - * The list is the only public field in the structure. - * - */ - -struct sdma_txreq { - struct list_head list; - /* private: */ - struct sdma_desc *descp; - /* private: */ - void *coalesce_buf; - /* private: */ - u16 coalesce_idx; - /* private: */ - struct iowait *wait; - /* private: */ - callback_t complete; -#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER - u64 sn; -#endif - /* private: - used in coalesce/pad processing */ - u16 packet_len; - /* private: - down-counted to trigger last */ - u16 tlen; - /* private: flags */ - u16 flags; - /* private: */ - u16 num_desc; - /* private: */ - u16 desc_limit; - /* private: */ - u16 next_descq_idx; - /* private: */ - struct sdma_desc descs[NUM_DESC]; -}; - -struct verbs_txreq { - struct hfi1_pio_header phdr; - struct sdma_txreq txreq; - struct rvt_qp *qp; - struct rvt_swqe *wqe; - struct rvt_mregion *mr; - struct rvt_sge_state *ss; - struct sdma_engine *sde; - u16 hdr_dwords; - u16 hdr_inx; -}; - /** * struct sdma_engine - Data pertaining to each SDMA engine. * @dd: a back-pointer to the device data diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/staging/rdma/hfi1/sdma_txreq.h new file mode 100644 index 000000000000..d0f77a844e79 --- /dev/null +++ b/drivers/staging/rdma/hfi1/sdma_txreq.h @@ -0,0 +1,130 @@ +/* + * Copyright(c) 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef HFI1_SDMA_TXREQ_H +#define HFI1_SDMA_TXREQ_H + +/* increased for AHG */ +#define NUM_DESC 6 + +/* + * struct sdma_desc - canonical fragment descriptor + * + * This is the descriptor carried in the tx request + * corresponding to each fragment. + * + */ +struct sdma_desc { + /* private: don't use directly */ + u64 qw[2]; +}; + +/** + * struct sdma_txreq - the sdma_txreq structure (one per packet) + * @list: for use by user and by queuing for wait + * + * This is the representation of a packet which consists of some + * number of fragments. Storage is provided to within the structure. + * for all fragments. + * + * The storage for the descriptors are automatically extended as needed + * when the currently allocation is exceeded. + * + * The user (Verbs or PSM) may overload this structure with fields + * specific to their use by putting this struct first in their struct. + * The method of allocation of the overloaded structure is user dependent + * + * The list is the only public field in the structure. + * + */ + +#define SDMA_TXREQ_S_OK 0 +#define SDMA_TXREQ_S_SENDERROR 1 +#define SDMA_TXREQ_S_ABORTED 2 +#define SDMA_TXREQ_S_SHUTDOWN 3 + +/* flags bits */ +#define SDMA_TXREQ_F_URGENT 0x0001 +#define SDMA_TXREQ_F_AHG_COPY 0x0002 +#define SDMA_TXREQ_F_USE_AHG 0x0004 + +struct sdma_txreq; +typedef void (*callback_t)(struct sdma_txreq *, int, int); + +struct iowait; +struct sdma_txreq { + struct list_head list; + /* private: */ + struct sdma_desc *descp; + /* private: */ + void *coalesce_buf; + /* private: */ + struct iowait *wait; + /* private: */ + callback_t complete; +#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER + u64 sn; +#endif + /* private: - used in coalesce/pad processing */ + u16 packet_len; + /* private: - down-counted to trigger last */ + u16 tlen; + /* private: */ + u16 num_desc; + /* private: */ + u16 desc_limit; + /* private: */ + u16 next_descq_idx; + /* private: */ + u16 coalesce_idx; + /* private: flags */ + u16 flags; + /* private: */ + struct sdma_desc descs[NUM_DESC]; +}; + +#endif /* HFI1_SDMA_TXREQ_H */ diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 1df464815247..7838b212d50c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -63,7 +63,7 @@ #include "device.h" #include "trace.h" #include "qp.h" -#include "sdma.h" +#include "verbs_txreq.h" static unsigned int hfi1_lkey_table_size = 16; module_param_named(lkey_table_size, hfi1_lkey_table_size, uint, @@ -508,89 +508,6 @@ void update_sge(struct rvt_sge_state *ss, u32 length) } } -static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, - struct rvt_qp *qp) -{ - struct hfi1_qp_priv *priv = qp->priv; - struct verbs_txreq *tx; - unsigned long flags; - - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); - if (!tx) { - spin_lock_irqsave(&qp->s_lock, flags); - write_seqlock(&dev->iowait_lock); - if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK && - list_empty(&priv->s_iowait.list)) { - dev->n_txwait++; - qp->s_flags |= RVT_S_WAIT_TX; - list_add_tail(&priv->s_iowait.list, &dev->txwait); - trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX); - atomic_inc(&qp->refcount); - } - qp->s_flags &= ~RVT_S_BUSY; - write_sequnlock(&dev->iowait_lock); - spin_unlock_irqrestore(&qp->s_lock, flags); - tx = ERR_PTR(-EBUSY); - } - return tx; -} - -static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, - struct rvt_qp *qp) -{ - struct verbs_txreq *tx; - - tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); - if (!tx) { - /* call slow path to get the lock */ - tx = __get_txreq(dev, qp); - if (IS_ERR(tx)) - return tx; - } - tx->qp = qp; - return tx; -} - -void hfi1_put_txreq(struct verbs_txreq *tx) -{ - struct hfi1_ibdev *dev; - struct rvt_qp *qp; - unsigned long flags; - unsigned int seq; - struct hfi1_qp_priv *priv; - - qp = tx->qp; - dev = to_idev(qp->ibqp.device); - - if (tx->mr) { - rvt_put_mr(tx->mr); - tx->mr = NULL; - } - sdma_txclean(dd_from_dev(dev), &tx->txreq); - - /* Free verbs_txreq and return to slab cache */ - kmem_cache_free(dev->verbs_txreq_cache, tx); - - do { - seq = read_seqbegin(&dev->iowait_lock); - if (!list_empty(&dev->txwait)) { - struct iowait *wait; - - write_seqlock_irqsave(&dev->iowait_lock, flags); - /* Wake up first QP wanting a free struct */ - wait = list_first_entry(&dev->txwait, struct iowait, - list); - qp = iowait_to_qp(wait); - priv = qp->priv; - list_del_init(&priv->s_iowait.list); - /* refcount held until actual wake up */ - write_sequnlock_irqrestore(&dev->iowait_lock, flags); - hfi1_qp_wakeup(qp, RVT_S_WAIT_TX); - break; - } - } while (read_seqretry(&dev->iowait_lock, seq)); -} - /* * This is called with progress side lock held. */ @@ -1427,13 +1344,6 @@ static void init_ibport(struct hfi1_pportdata *ppd) RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } -static void verbs_txreq_kmem_cache_ctor(void *obj) -{ - struct verbs_txreq *tx = obj; - - memset(tx, 0, sizeof(*tx)); -} - /** * hfi1_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -1447,8 +1357,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) unsigned i; int ret; size_t lcpysz = IB_DEVICE_NAME_MAX; - u16 descq_cnt; - char buf[TXREQ_NAME_LEN]; for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); @@ -1461,18 +1369,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) INIT_LIST_HEAD(&dev->txwait); INIT_LIST_HEAD(&dev->memwait); - descq_cnt = sdma_get_descq_cnt(); - - snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit); - /* SLAB_HWCACHE_ALIGN for AHG */ - dev->verbs_txreq_cache = kmem_cache_create(buf, - sizeof(struct verbs_txreq), - 0, SLAB_HWCACHE_ALIGN, - verbs_txreq_kmem_cache_ctor); - if (!dev->verbs_txreq_cache) { - ret = -ENOMEM; + ret = verbs_txreq_init(dev); + if (ret) goto err_verbs_txreq; - } /* * The system image GUID is supposed to be the same for all @@ -1578,7 +1477,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) err_class: rvt_unregister_device(&dd->verbs_dev.rdi); err_verbs_txreq: - kmem_cache_destroy(dev->verbs_txreq_cache); + verbs_txreq_exit(dev); dd_dev_err(dd, "cannot register verbs: %d!\n", -ret); return ret; } @@ -1597,7 +1496,7 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) dd_dev_err(dd, "memwait list not empty!\n"); del_timer_sync(&dev->mem_timer); - kmem_cache_destroy(dev->verbs_txreq_cache); + verbs_txreq_exit(dev); } void hfi1_cnp_rcv(struct hfi1_packet *packet) diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/staging/rdma/hfi1/verbs_txreq.c new file mode 100644 index 000000000000..bc95c4112c61 --- /dev/null +++ b/drivers/staging/rdma/hfi1/verbs_txreq.c @@ -0,0 +1,149 @@ +/* + * Copyright(c) 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "hfi.h" +#include "verbs_txreq.h" +#include "qp.h" +#include "trace.h" + +#define TXREQ_LEN 24 + +void hfi1_put_txreq(struct verbs_txreq *tx) +{ + struct hfi1_ibdev *dev; + struct rvt_qp *qp; + unsigned long flags; + unsigned int seq; + struct hfi1_qp_priv *priv; + + qp = tx->qp; + dev = to_idev(qp->ibqp.device); + + if (tx->mr) + rvt_put_mr(tx->mr); + + sdma_txclean(dd_from_dev(dev), &tx->txreq); + + /* Free verbs_txreq and return to slab cache */ + kmem_cache_free(dev->verbs_txreq_cache, tx); + + do { + seq = read_seqbegin(&dev->iowait_lock); + if (!list_empty(&dev->txwait)) { + struct iowait *wait; + + write_seqlock_irqsave(&dev->iowait_lock, flags); + wait = list_first_entry(&dev->txwait, struct iowait, + list); + qp = iowait_to_qp(wait); + priv = qp->priv; + list_del_init(&priv->s_iowait.list); + /* refcount held until actual wake up */ + write_sequnlock_irqrestore(&dev->iowait_lock, flags); + hfi1_qp_wakeup(qp, RVT_S_WAIT_TX); + break; + } + } while (read_seqretry(&dev->iowait_lock, seq)); +} + +struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, + struct rvt_qp *qp) +{ + struct verbs_txreq *tx = ERR_PTR(-EBUSY); + unsigned long flags; + + spin_lock_irqsave(&qp->s_lock, flags); + write_seqlock(&dev->iowait_lock); + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { + struct hfi1_qp_priv *priv; + + tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + if (tx) + goto out; + priv = qp->priv; + if (list_empty(&priv->s_iowait.list)) { + dev->n_txwait++; + qp->s_flags |= RVT_S_WAIT_TX; + list_add_tail(&priv->s_iowait.list, &dev->txwait); + trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX); + atomic_inc(&qp->refcount); + } + qp->s_flags &= ~RVT_S_BUSY; + } +out: + write_sequnlock(&dev->iowait_lock); + spin_unlock_irqrestore(&qp->s_lock, flags); + return tx; +} + +static void verbs_txreq_kmem_cache_ctor(void *obj) +{ + struct verbs_txreq *tx = (struct verbs_txreq *)obj; + + memset(tx, 0, sizeof(*tx)); +} + +int verbs_txreq_init(struct hfi1_ibdev *dev) +{ + char buf[TXREQ_LEN]; + struct hfi1_devdata *dd = dd_from_dev(dev); + + snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit); + dev->verbs_txreq_cache = kmem_cache_create(buf, + sizeof(struct verbs_txreq), + 0, SLAB_HWCACHE_ALIGN, + verbs_txreq_kmem_cache_ctor); + if (!dev->verbs_txreq_cache) + return -ENOMEM; + return 0; +} + +void verbs_txreq_exit(struct hfi1_ibdev *dev) +{ + kmem_cache_destroy(dev->verbs_txreq_cache); + dev->verbs_txreq_cache = NULL; +} diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h new file mode 100644 index 000000000000..387882a54c7b --- /dev/null +++ b/drivers/staging/rdma/hfi1/verbs_txreq.h @@ -0,0 +1,95 @@ +/* + * Copyright(c) 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef HFI1_VERBS_TXREQ_H +#define HFI1_VERBS_TXREQ_H + +#include +#include + +#include "verbs.h" +#include "sdma_txreq.h" +#include "iowait.h" + +struct verbs_txreq { + struct hfi1_pio_header phdr; + struct sdma_txreq txreq; + struct rvt_qp *qp; + struct rvt_swqe *wqe; + struct rvt_mregion *mr; + struct rvt_sge_state *ss; + struct sdma_engine *sde; + struct send_context *psc; + u16 hdr_dwords; +}; + +struct hfi1_ibdev; +struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, + struct rvt_qp *qp); + +static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, + struct rvt_qp *qp) +{ + struct verbs_txreq *tx; + + tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); + if (unlikely(!tx)) { + /* call slow path to get the lock */ + tx = __get_txreq(dev, qp); + if (IS_ERR(tx)) + return tx; + } + tx->qp = qp; + tx->mr = NULL; + return tx; +} + +void hfi1_put_txreq(struct verbs_txreq *tx); +int verbs_txreq_init(struct hfi1_ibdev *dev); +void verbs_txreq_exit(struct hfi1_ibdev *dev); + +#endif /* HFI1_VERBS_TXREQ_H */ -- cgit v1.2.3-59-g8ed1b From bb5df5f9eea6b9efb5911a5fef63b4614af01c89 Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Sun, 14 Feb 2016 12:44:43 -0800 Subject: staging/rdma/hfi1: Remove header memcpy from sdma send path. Instead of writing the header into a buffer then copying it into another buffer to be sent, remove that memcpy and instead build the header directly into the tx request that will be sent. Reviewed-by: Mike Marciniszyn Signed-off-by: Vennila Megavannan Signed-off-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/diag.c | 20 +++++++---- drivers/staging/rdma/hfi1/rc.c | 42 +++++++++++++++------- drivers/staging/rdma/hfi1/ruc.c | 22 +++++++----- drivers/staging/rdma/hfi1/uc.c | 30 +++++++++++----- drivers/staging/rdma/hfi1/ud.c | 56 ++++++++++++++++++----------- drivers/staging/rdma/hfi1/verbs.c | 63 +++++++++++++++------------------ drivers/staging/rdma/hfi1/verbs.h | 18 ++++------ drivers/staging/rdma/hfi1/verbs_txreq.h | 1 - 8 files changed, 148 insertions(+), 104 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index bfce812c71ff..9523dc1b012f 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -70,6 +70,7 @@ #include "hfi.h" #include "device.h" #include "common.h" +#include "verbs_txreq.h" #include "trace.h" #undef pr_fmt @@ -1682,8 +1683,6 @@ int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { - struct hfi1_qp_priv *priv = qp->priv; - struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; struct rvt_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; @@ -1691,7 +1690,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u32 plen = hdrwords + dwords + 2; /* includes pbc */ struct hfi1_pportdata *ppd = ps->ppd; struct snoop_packet *s_packet = NULL; - u32 *hdr = (u32 *)&ahdr->ibh; + u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; u32 length = 0; struct rvt_sge_state temp_ss; void *data = NULL; @@ -1702,7 +1701,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct capture_md md; u32 vl; u32 hdr_len = hdrwords << 2; - u32 tlen = HFI1_GET_PKT_LEN(&ahdr->ibh); + u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr); md.u.pbc = 0; @@ -1729,7 +1728,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, md.port = 1; md.dir = PKT_DIR_EGRESS; if (likely(pbc == 0)) { - vl = be16_to_cpu(ahdr->ibh.lrh[0]) >> 12; + vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12; md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); } else { md.u.pbc = 0; @@ -1791,7 +1790,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ret = HFI1_FILTER_HIT; } else { ret = ppd->dd->hfi1_snoop.filter_callback( - &ahdr->ibh, + &ps->s_txreq->phdr.hdr, NULL, ppd->dd->hfi1_snoop.filter_value); } @@ -1823,9 +1822,16 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, spin_unlock_irqrestore(&qp->s_lock, flags); } else if (qp->ibqp.qp_type == IB_QPT_RC) { spin_lock_irqsave(&qp->s_lock, flags); - hfi1_rc_send_complete(qp, &ahdr->ibh); + hfi1_rc_send_complete(qp, + &ps->s_txreq->phdr.hdr); spin_unlock_irqrestore(&qp->s_lock, flags); } + + /* + * If snoop is dropping the packet we need to put the + * txreq back because no one else will. + */ + hfi1_put_txreq(ps->s_txreq); return 0; } break; diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index a62c9424fa86..75d70d583d03 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -54,7 +54,7 @@ #include "hfi.h" #include "qp.h" -#include "sdma.h" +#include "verbs_txreq.h" #include "trace.h" /* cut down ridiculously long IB macro names */ @@ -201,13 +201,15 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, * @qp: a pointer to the QP * @ohdr: a pointer to the IB header being constructed * @pmtu: the path MTU + * @ps: the xmit packet state * * Return 1 if constructed; otherwise, return 0. * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, - struct hfi1_other_headers *ohdr, u32 pmtu) + struct hfi1_other_headers *ohdr, u32 pmtu, + struct hfi1_pkt_state *ps) { struct rvt_ack_entry *e; u32 hwords; @@ -347,7 +349,7 @@ normal: qp->s_rdma_ack_cnt++; qp->s_hdrwords = hwords; qp->s_cur_size = len; - hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle); + hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); return 1; bail: @@ -371,7 +373,7 @@ bail: * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_rc_req(struct rvt_qp *qp) +int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); @@ -385,18 +387,21 @@ int hfi1_make_rc_req(struct rvt_qp *qp) u32 bth2; u32 pmtu = qp->pmtu; char newreq; - int ret = 0; int middle = 0; int delta; - ohdr = &priv->s_hdr->ibh.u.oth; + ps->s_txreq = get_txreq(ps->dev, qp); + if (IS_ERR(ps->s_txreq)) + goto bail_no_tx; + + ohdr = &ps->s_txreq->phdr.hdr.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &priv->s_hdr->ibh.u.l.oth; + ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & RVT_S_RESP_PENDING) && - make_rc_ack(dev, qp, ohdr, pmtu)) - goto done; + make_rc_ack(dev, qp, ohdr, pmtu, ps)) + return 1; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) @@ -415,7 +420,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp) hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ - goto done; + goto done_free_tx; } if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) @@ -752,12 +757,23 @@ int hfi1_make_rc_req(struct rvt_qp *qp) ohdr, bth0 | (qp->s_state << 24), bth2, - middle); -done: + middle, + ps); return 1; + +done_free_tx: + hfi1_put_txreq(ps->s_txreq); + ps->s_txreq = NULL; + return 1; + bail: + hfi1_put_txreq(ps->s_txreq); + +bail_no_tx: + ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; - return ret; + qp->s_hdrwords = 0; + return 0; } /** diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 7c6feffe65cc..70d1d3422e6e 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -54,6 +54,7 @@ #include "mad.h" #include "qp.h" #include "verbs_txreq.h" +#include "trace.h" /* * Convert the AETH RNR timeout code into the number of microseconds. @@ -698,6 +699,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, static inline void build_ahg(struct rvt_qp *qp, u32 npsn) { struct hfi1_qp_priv *priv = qp->priv; + if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR)) clear_ahg(qp); if (!(qp->s_flags & RVT_S_AHG_VALID)) { @@ -740,10 +742,11 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn) } void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, - u32 bth0, u32 bth2, int middle) + u32 bth0, u32 bth2, int middle, + struct hfi1_pkt_state *ps) { - struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_qp_priv *priv = qp->priv; + struct hfi1_ibport *ibp = ps->ibp; u16 lrh0; u32 nwords; u32 extra_bytes; @@ -754,7 +757,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = HFI1_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { - qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh, + qp->s_hdrwords += hfi1_make_grh(ibp, + &ps->s_txreq->phdr.hdr.u.l.grh, &qp->remote_ah_attr.grh, qp->s_hdrwords, nwords); lrh0 = HFI1_LRH_GRH; @@ -784,11 +788,11 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, build_ahg(qp, bth2); else qp->s_flags &= ~RVT_S_AHG_VALID; - priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); - priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); - priv->s_hdr->ibh.lrh[2] = + ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0); + ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); + ps->s_txreq->phdr.hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); - priv->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | + ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qp->remote_ah_attr.src_path_bits); bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index); bth0 |= extra_bytes << 20; @@ -826,7 +830,7 @@ void hfi1_do_send(struct rvt_qp *qp) { struct hfi1_pkt_state ps; struct hfi1_qp_priv *priv = qp->priv; - int (*make_req)(struct rvt_qp *qp); + int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps); unsigned long flags; unsigned long timeout; unsigned long timeout_int; @@ -906,7 +910,7 @@ void hfi1_do_send(struct rvt_qp *qp) } spin_lock_irqsave(&qp->s_lock, flags); } - } while (make_req(qp)); + } while (make_req(qp, &ps)); spin_unlock_irqrestore(&qp->s_lock, flags); } diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index f884b5c8051b..77431b145305 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -49,7 +49,7 @@ */ #include "hfi.h" -#include "sdma.h" +#include "verbs_txreq.h" #include "qp.h" /* cut down ridiculously long IB macro names */ @@ -63,7 +63,7 @@ * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_uc_req(struct rvt_qp *qp) +int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; @@ -72,9 +72,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp) u32 bth0 = 0; u32 len; u32 pmtu = qp->pmtu; - int ret = 0; int middle = 0; + ps->s_txreq = get_txreq(ps->dev, qp); + if (IS_ERR(ps->s_txreq)) + goto bail_no_tx; + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; @@ -90,12 +93,12 @@ int hfi1_make_uc_req(struct rvt_qp *qp) clear_ahg(qp); wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); - goto done; + goto done_free_tx; } - ohdr = &priv->s_hdr->ibh.u.oth; + ohdr = &ps->s_txreq->phdr.hdr.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &priv->s_hdr->ibh.u.l.oth; + ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; /* Get the next send request. */ wqe = rvt_get_swqe_ptr(qp, qp->s_cur); @@ -235,13 +238,22 @@ int hfi1_make_uc_req(struct rvt_qp *qp) qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), - mask_psn(qp->s_psn++), middle); -done: + mask_psn(qp->s_psn++), middle, ps); + return 1; + +done_free_tx: + hfi1_put_txreq(ps->s_txreq); + ps->s_txreq = NULL; return 1; bail: + hfi1_put_txreq(ps->s_txreq); + +bail_no_tx: + ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; - return ret; + qp->s_hdrwords = 0; + return 0; } /** diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index ba78e2e3e0bb..a7118bca0d2a 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -54,6 +54,7 @@ #include "hfi.h" #include "mad.h" #include "qp.h" +#include "verbs_txreq.h" /** * ud_loopback - handle send on loopback QPs @@ -265,7 +266,7 @@ drop: * * Return 1 if constructed; otherwise, return 0. */ -int hfi1_make_ud_req(struct rvt_qp *qp) +int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_other_headers *ohdr; @@ -278,10 +279,13 @@ int hfi1_make_ud_req(struct rvt_qp *qp) u32 bth0; u16 lrh0; u16 lid; - int ret = 0; int next_cur; u8 sc5; + ps->s_txreq = get_txreq(ps->dev, qp); + if (IS_ERR(ps->s_txreq)) + goto bail_no_tx; + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; @@ -296,7 +300,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) } wqe = rvt_get_swqe_ptr(qp, qp->s_last); hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); - goto done; + goto done_free_tx; } /* see post_one_send() */ @@ -337,7 +341,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) ud_loopback(qp, wqe); spin_lock_irqsave(&qp->s_lock, flags); hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); - goto done; + goto done_free_tx; } } @@ -359,11 +363,12 @@ int hfi1_make_ud_req(struct rvt_qp *qp) if (ah_attr->ah_flags & IB_AH_GRH) { /* Header size in 32-bit words. */ - qp->s_hdrwords += hfi1_make_grh(ibp, &priv->s_hdr->ibh.u.l.grh, - &ah_attr->grh, - qp->s_hdrwords, nwords); + qp->s_hdrwords += hfi1_make_grh(ibp, + &ps->s_txreq->phdr.hdr.u.l.grh, + &ah_attr->grh, + qp->s_hdrwords, nwords); lrh0 = HFI1_LRH_GRH; - ohdr = &priv->s_hdr->ibh.u.l.oth; + ohdr = &ps->s_txreq->phdr.hdr.u.l.oth; /* * Don't worry about sending to locally attached multicast * QPs. It is unspecified by the spec. what happens. @@ -371,7 +376,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) } else { /* Header size in 32-bit words. */ lrh0 = HFI1_LRH_BTH; - ohdr = &priv->s_hdr->ibh.u.oth; + ohdr = &ps->s_txreq->phdr.hdr.u.oth; } if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qp->s_hdrwords++; @@ -389,19 +394,20 @@ int hfi1_make_ud_req(struct rvt_qp *qp) priv->s_sc = sc5; } priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); - priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); - priv->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ - priv->s_hdr->ibh.lrh[2] = + ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0); + ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); + ps->s_txreq->phdr.hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); - if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) - priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; - else { + if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { + ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE; + } else { lid = ppd->lid; if (lid) { lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); - priv->s_hdr->ibh.lrh[3] = cpu_to_be16(lid); - } else - priv->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE; + ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid); + } else { + ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE; + } } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; @@ -426,11 +432,21 @@ int hfi1_make_ud_req(struct rvt_qp *qp) priv->s_hdr->tx_flags = 0; priv->s_hdr->sde = NULL; -done: return 1; + +done_free_tx: + hfi1_put_txreq(ps->s_txreq); + ps->s_txreq = NULL; + return 1; + bail: + hfi1_put_txreq(ps->s_txreq); + +bail_no_tx: + ps->s_txreq = NULL; qp->s_flags &= ~RVT_S_BUSY; - return ret; + qp->s_hdrwords = 0; + return 0; } /* diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 7838b212d50c..8cf1d6b07784 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -622,8 +622,7 @@ bail_txadd: * NOTE: DMA mapping is held in the tx until completed in the ring or * the tx desc is freed without having been submitted to the ring * - * This routine insures the following all the helper routine - * calls succeed. + * This routine ensures all the helper routine calls succeed. */ /* New API */ static int build_verbs_tx_desc( @@ -635,10 +634,9 @@ static int build_verbs_tx_desc( u64 pbc) { int ret = 0; - struct hfi1_pio_header *phdr; + struct hfi1_pio_header *phdr = &tx->phdr; u16 hdrbytes = tx->hdr_dwords << 2; - phdr = &tx->phdr; if (!ahdr->ahgcount) { ret = sdma_txinit_ahg( &tx->txreq, @@ -652,29 +650,14 @@ static int build_verbs_tx_desc( if (ret) goto bail_txadd; phdr->pbc = cpu_to_le64(pbc); - memcpy(&phdr->hdr, &ahdr->ibh, hdrbytes - sizeof(phdr->pbc)); - /* add the header */ ret = sdma_txadd_kvaddr( sde->dd, &tx->txreq, - &tx->phdr, - tx->hdr_dwords << 2); + phdr, + hdrbytes); if (ret) goto bail_txadd; } else { - struct hfi1_other_headers *sohdr = &ahdr->ibh.u.oth; - struct hfi1_other_headers *dohdr = &phdr->hdr.u.oth; - - /* needed in rc_send_complete() */ - phdr->hdr.lrh[0] = ahdr->ibh.lrh[0]; - if ((be16_to_cpu(phdr->hdr.lrh[0]) & 3) == HFI1_LRH_GRH) { - sohdr = &ahdr->ibh.u.l.oth; - dohdr = &phdr->hdr.u.l.oth; - } - /* opcode */ - dohdr->bth[0] = sohdr->bth[0]; - /* PSN/ACK */ - dohdr->bth[2] = sohdr->bth[2]; ret = sdma_txinit_ahg( &tx->txreq, ahdr->tx_flags, @@ -712,6 +695,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u8 sc5 = priv->s_sc; int ret; + struct hfi1_ibdev *tdev; if (!list_empty(&priv->s_iowait.tx_head)) { stx = list_first_entry( @@ -726,7 +710,10 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, return ret; } - tx = get_txreq(dev, qp); + tx = ps->s_txreq; + + tdev = to_idev(qp->ibqp.device); + if (IS_ERR(tx)) goto bail_tx; @@ -748,7 +735,8 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc); if (unlikely(ret)) goto bail_build; - trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); + trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), + &ps->s_txreq->phdr.hdr); ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); if (unlikely(ret == -ECOMM)) goto bail_ecomm; @@ -824,27 +812,29 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { struct hfi1_qp_priv *priv = qp->priv; - struct ahg_ib_header *ahdr = priv->s_hdr; u32 hdrwords = qp->s_hdrwords; struct rvt_sge_state *ss = qp->s_cur_sge; u32 len = qp->s_cur_size; u32 dwords = (len + 3) >> 2; u32 plen = hdrwords + dwords + 2; /* includes pbc */ struct hfi1_pportdata *ppd = ps->ppd; - u32 *hdr = (u32 *)&ahdr->ibh; + u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; u64 pbc_flags = 0; u32 sc5; unsigned long flags = 0; struct send_context *sc; struct pio_buf *pbuf; int wc_status = IB_WC_SUCCESS; + int ret = 0; /* vl15 special case taken care of in ud.c */ sc5 = priv->s_sc; sc = qp_to_send_context(qp, sc5); - if (!sc) - return -EINVAL; + if (!sc) { + ret = -EINVAL; + goto bail; + } if (likely(pbc == 0)) { u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ @@ -872,7 +862,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, * so lets continue to queue the request. */ hfi1_cdbg(PIO, "alloc failed. state active, queuing"); - return no_bufs_available(qp, sc); + ret = no_bufs_available(qp, sc); + goto bail; } } @@ -895,7 +886,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, } } - trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh); + trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), + &ps->s_txreq->phdr.hdr); if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); @@ -909,10 +901,15 @@ pio_bail: spin_unlock_irqrestore(&qp->s_lock, flags); } else if (qp->ibqp.qp_type == IB_QPT_RC) { spin_lock_irqsave(&qp->s_lock, flags); - hfi1_rc_send_complete(qp, &ahdr->ibh); + hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); spin_unlock_irqrestore(&qp->s_lock, flags); } - return 0; + + ret = 0; + +bail: + hfi1_put_txreq(ps->s_txreq); + return ret; } /* @@ -1011,8 +1008,6 @@ bad: int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - struct hfi1_qp_priv *priv = qp->priv; - struct ahg_ib_header *ahdr = priv->s_hdr; int ret; int pio = 0; unsigned long flags = 0; @@ -1026,7 +1021,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) !(dd->flags & HFI1_HAS_SEND_DMA)) pio = 1; - ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp); + ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp); if (unlikely(ret)) { /* * The value we are returning here does not get propagated to diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index d00c55d06c8c..73f471ae1f57 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include @@ -193,13 +194,6 @@ struct hfi1_pio_header { struct hfi1_ib_header hdr; } __packed; -/* - * used for force cacheline alignment for AHG - */ -struct tx_pio_header { - struct hfi1_pio_header phdr; -} ____cacheline_aligned; - /* * hfi1 specific data structures that will be hidden from rvt after the queue * pair is made common @@ -222,6 +216,7 @@ struct hfi1_pkt_state { struct hfi1_ibdev *dev; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; + struct verbs_txreq *s_txreq; }; #define HFI1_PSN_CREDIT 16 @@ -436,7 +431,8 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, struct ib_global_route *grh, u32 hwords, u32 nwords); void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, - u32 bth0, u32 bth2, int middle); + u32 bth0, u32 bth2, int middle, + struct hfi1_pkt_state *ps); void _hfi1_do_send(struct work_struct *work); @@ -447,11 +443,11 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct rvt_qp *qp, int is_fecn); -int hfi1_make_rc_req(struct rvt_qp *qp); +int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps); -int hfi1_make_uc_req(struct rvt_qp *qp); +int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps); -int hfi1_make_ud_req(struct rvt_qp *qp); +int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps); int hfi1_register_ib_device(struct hfi1_devdata *); diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h index 387882a54c7b..d89d29b76199 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.h +++ b/drivers/staging/rdma/hfi1/verbs_txreq.h @@ -63,7 +63,6 @@ struct verbs_txreq { struct rvt_mregion *mr; struct rvt_sge_state *ss; struct sdma_engine *sde; - struct send_context *psc; u16 hdr_dwords; }; -- cgit v1.2.3-59-g8ed1b From c239a5b5d6617b8bdae401f86529cab76313f3e7 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:44:52 -0800 Subject: staging/rdma/hfi1: remove s_rdma_mr It can be conveyed in the verbs_txreq. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 12 ++++++------ drivers/staging/rdma/hfi1/verbs.c | 8 -------- 2 files changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 75d70d583d03..c075e85ab4c6 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -266,9 +266,9 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, goto bail; } /* Copy SGE state in case we need to resend */ - qp->s_rdma_mr = e->rdma_sge.mr; - if (qp->s_rdma_mr) - rvt_get_mr(qp->s_rdma_mr); + ps->s_txreq->mr = e->rdma_sge.mr; + if (ps->s_txreq->mr) + rvt_get_mr(ps->s_txreq->mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; @@ -305,9 +305,9 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, /* FALLTHROUGH */ case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; - qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; - if (qp->s_rdma_mr) - rvt_get_mr(qp->s_rdma_mr); + ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr; + if (ps->s_txreq->mr) + rvt_get_mr(ps->s_txreq->mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) { len = pmtu; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 8cf1d6b07784..dc8eb6b76343 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -728,9 +728,6 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); } tx->wqe = qp->s_wqe; - tx->mr = qp->s_rdma_mr; - if (qp->s_rdma_mr) - qp->s_rdma_mr = NULL; tx->hdr_dwords = hdrwords + 2; ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc); if (unlikely(ret)) @@ -889,11 +886,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ps->s_txreq->phdr.hdr); - if (qp->s_rdma_mr) { - rvt_put_mr(qp->s_rdma_mr); - qp->s_rdma_mr = NULL; - } - pio_bail: if (qp->s_wqe) { spin_lock_irqsave(&qp->s_lock, flags); -- cgit v1.2.3-59-g8ed1b From 721d04273a8265847612a420174bb6e9a13d8d4f Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 12:45:00 -0800 Subject: staging/rdma/hfi1: Add s_sendcontext priv field s_sendcontext will be used to map the QPs to the send contexts for PIO. Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 13 +++++++++++-- drivers/staging/rdma/hfi1/ud.c | 1 + drivers/staging/rdma/hfi1/verbs.h | 9 +++++---- 3 files changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 7387ef5cd069..571e78fa2633 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -191,6 +191,9 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; + + if (!qp_to_send_context(qp, sc)) + return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { @@ -201,6 +204,9 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; + + if (!qp_to_send_context(qp, sc)) + return -EINVAL; } return 0; @@ -608,11 +614,13 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) struct rvt_qp *qp = iter->qp; struct hfi1_qp_priv *priv = qp->priv; struct sdma_engine *sde; + struct send_context *send_context; sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); + send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u\n", + "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -641,7 +649,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->s_retry_cnt, qp->s_rnr_retry_cnt, sde, - sde ? sde->this_idx : 0); + sde ? sde->this_idx : 0, + send_context); } void qp_comm_est(struct rvt_qp *qp) diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index a7118bca0d2a..1b4b191ced99 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -394,6 +394,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) priv->s_sc = sc5; } priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); + priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0); ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); ps->s_txreq->phdr.hdr.lrh[2] = diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 73f471ae1f57..3d25ad406af7 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -199,10 +199,11 @@ struct hfi1_pio_header { * pair is made common */ struct hfi1_qp_priv { - struct ahg_ib_header *s_hdr; /* next packet header to send */ - struct sdma_engine *s_sde; /* current sde */ - u8 s_sc; /* SC[0..4] for next packet */ - u8 r_adefered; /* number of acks defered */ + struct ahg_ib_header *s_hdr; /* next header to send */ + struct sdma_engine *s_sde; /* current sde */ + struct send_context *s_sendcontext; /* current sendcontext */ + u8 s_sc; /* SC[0..4] for next packet */ + u8 r_adefered; /* number of acks defered */ struct iowait s_iowait; struct timer_list s_rnr_timer; struct rvt_qp *owner; -- cgit v1.2.3-59-g8ed1b From 1235bef8f04bf020b03f32e083e34bc91fc51343 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:09 -0800 Subject: staging/rdma/hfi1: avoid passing pmtu It is in the qp. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index c075e85ab4c6..d54d3ad1ed18 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -200,7 +200,6 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, * @dev: the device for this QP * @qp: a pointer to the QP * @ohdr: a pointer to the IB header being constructed - * @pmtu: the path MTU * @ps: the xmit packet state * * Return 1 if constructed; otherwise, return 0. @@ -208,7 +207,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, * Note the QP s_lock must be held. */ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, - struct hfi1_other_headers *ohdr, u32 pmtu, + struct hfi1_other_headers *ohdr, struct hfi1_pkt_state *ps) { struct rvt_ack_entry *e; @@ -217,6 +216,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, u32 bth0; u32 bth2; int middle = 0; + u32 pmtu = qp->pmtu; /* Don't send an ACK if we aren't supposed to. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) @@ -400,7 +400,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & RVT_S_RESP_PENDING) && - make_rc_ack(dev, qp, ohdr, pmtu, ps)) + make_rc_ack(dev, qp, ohdr, ps)) return 1; if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { -- cgit v1.2.3-59-g8ed1b From 711e104ddca7b609889e1edf0a8482673ea4a7cc Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:18 -0800 Subject: staging/rdma/hfi1: fix panic in send engine The send engine wasn't correctly handling pre-built packets, and worse, the pointer to a packet state's txreq wasn't initialized correctly. To fix: - all waiters need to save any prebuilt packets (smda waits already did) - the progress routine needs to handle a QPs prebuilt packet and initialize the txreq pointer properly To keep SDMA working, the dma send code needs to see if a packet has been built already. If not the code will build it. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/iowait.h | 20 ++++++++ drivers/staging/rdma/hfi1/rc.c | 4 ++ drivers/staging/rdma/hfi1/ruc.c | 2 + drivers/staging/rdma/hfi1/sdma_txreq.h | 5 ++ drivers/staging/rdma/hfi1/uc.c | 2 + drivers/staging/rdma/hfi1/ud.c | 6 ++- drivers/staging/rdma/hfi1/verbs.c | 85 ++++++++++++++++----------------- drivers/staging/rdma/hfi1/verbs_txreq.h | 17 +++++++ 8 files changed, 95 insertions(+), 46 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h index e8ba5606d08d..e007eb82cbc8 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/staging/rdma/hfi1/iowait.h @@ -54,6 +54,7 @@ #include #include +#include "sdma_txreq.h" /* * typedef (*restart_t)() - restart callback * @work: pointer to work structure @@ -185,4 +186,23 @@ static inline void iowait_drain_wakeup(struct iowait *wait) wake_up(&wait->wait_dma); } +/** + * iowait_get_txhead() - get packet off of iowait list + * + * @wait wait struture + */ +static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait) +{ + struct sdma_txreq *tx = NULL; + + if (!list_empty(&wait->tx_head)) { + tx = list_first_entry( + &wait->tx_head, + struct sdma_txreq, + list); + list_del_init(&tx->list); + } + return tx; +} + #endif diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index d54d3ad1ed18..27042876ca62 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -348,6 +348,8 @@ normal: } qp->s_rdma_ack_cnt++; qp->s_hdrwords = hwords; + /* pbc */ + ps->s_txreq->hdr_dwords = hwords + 2; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); return 1; @@ -750,6 +752,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } qp->s_len -= len; qp->s_hdrwords = hwords; + /* pbc */ + ps->s_txreq->hdr_dwords = hwords + 2; qp->s_cur_sge = ss; qp->s_cur_size = len; hfi1_make_ruc_header( diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 70d1d3422e6e..70f42c93210c 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -879,6 +879,8 @@ void hfi1_do_send(struct rvt_qp *qp) timeout = jiffies + (timeout_int) / 8; cpu = priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(ps.ppd->dd->node)); + /* insure a pre-built packet is handled */ + ps.s_txreq = get_waiting_verbs_txreq(qp); do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/staging/rdma/hfi1/sdma_txreq.h index d0f77a844e79..2effb35b9b91 100644 --- a/drivers/staging/rdma/hfi1/sdma_txreq.h +++ b/drivers/staging/rdma/hfi1/sdma_txreq.h @@ -127,4 +127,9 @@ struct sdma_txreq { struct sdma_desc descs[NUM_DESC]; }; +static inline int sdma_txreq_built(struct sdma_txreq *tx) +{ + return tx->num_desc; +} + #endif /* HFI1_SDMA_TXREQ_H */ diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 77431b145305..32705618900d 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -235,6 +235,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } qp->s_len -= len; qp->s_hdrwords = hwords; + /* pbc */ + ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 1b4b191ced99..bae5ccdfa7f4 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -53,8 +53,8 @@ #include "hfi.h" #include "mad.h" -#include "qp.h" #include "verbs_txreq.h" +#include "qp.h" /** * ud_loopback - handle send on loopback QPs @@ -394,7 +394,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) priv->s_sc = sc5; } priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); + ps->s_txreq->sde = priv->s_sde; priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); + ps->s_txreq->psc = priv->s_sendcontext; ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0); ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); ps->s_txreq->phdr.hdr.lrh[2] = @@ -432,6 +434,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) priv->s_hdr->ahgidx = 0; priv->s_hdr->tx_flags = 0; priv->s_hdr->sde = NULL; + /* pbc */ + ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; return 1; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index dc8eb6b76343..229dde5fbde6 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -547,7 +547,9 @@ static void verbs_sdma_complete( hfi1_put_txreq(tx); } -static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) +static int wait_kmem(struct hfi1_ibdev *dev, + struct rvt_qp *qp, + struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; unsigned long flags; @@ -556,6 +558,8 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); + list_add_tail(&ps->s_txreq->txreq.list, + &priv->s_iowait.tx_head); if (list_empty(&priv->s_iowait.list)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); @@ -578,7 +582,7 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) * * Add failures will revert the sge cursor */ -static int build_verbs_ulp_payload( +static noinline int build_verbs_ulp_payload( struct sdma_engine *sde, struct rvt_sge_state *ss, u32 length, @@ -690,48 +694,30 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct hfi1_ibdev *dev = ps->dev; struct hfi1_pportdata *ppd = ps->ppd; struct verbs_txreq *tx; - struct sdma_txreq *stx; u64 pbc_flags = 0; u8 sc5 = priv->s_sc; int ret; - struct hfi1_ibdev *tdev; - - if (!list_empty(&priv->s_iowait.tx_head)) { - stx = list_first_entry( - &priv->s_iowait.tx_head, - struct sdma_txreq, - list); - list_del_init(&stx->list); - tx = container_of(stx, struct verbs_txreq, txreq); - ret = sdma_send_txreq(tx->sde, &priv->s_iowait, stx); - if (unlikely(ret == -ECOMM)) - goto bail_ecomm; - return ret; - } tx = ps->s_txreq; - - tdev = to_idev(qp->ibqp.device); - - if (IS_ERR(tx)) - goto bail_tx; - - tx->sde = priv->s_sde; - - if (likely(pbc == 0)) { - u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); - /* No vl15 here */ - /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ - pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; - - pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); + if (!sdma_txreq_built(&tx->txreq)) { + if (likely(pbc == 0)) { + u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); + /* No vl15 here */ + /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ + pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; + + pbc = create_pbc(ppd, + pbc_flags, + qp->srate_mbps, + vl, + plen); + } + tx->wqe = qp->s_wqe; + ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc); + if (unlikely(ret)) + goto bail_build; } - tx->wqe = qp->s_wqe; - tx->hdr_dwords = hdrwords + 2; - ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc); - if (unlikely(ret)) - goto bail_build; trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ps->s_txreq->phdr.hdr); ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); @@ -743,18 +729,22 @@ bail_ecomm: /* The current one got "sent" */ return 0; bail_build: - /* kmalloc or mapping fail */ - hfi1_put_txreq(tx); - return wait_kmem(dev, qp); -bail_tx: - return PTR_ERR(tx); + ret = wait_kmem(dev, qp, ps); + if (!ret) { + /* free txreq - bad state */ + hfi1_put_txreq(ps->s_txreq); + ps->s_txreq = NULL; + } + return ret; } /* * If we are now in the error state, return zero to flush the * send work request. */ -static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) +static int no_bufs_available(struct rvt_qp *qp, + struct send_context *sc, + struct hfi1_pkt_state *ps) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; @@ -771,6 +761,8 @@ static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { write_seqlock(&dev->iowait_lock); + list_add_tail(&ps->s_txreq->txreq.list, + &priv->s_iowait.tx_head); if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibdev *dev = &dd->verbs_dev; int was_empty; @@ -859,8 +851,11 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, * so lets continue to queue the request. */ hfi1_cdbg(PIO, "alloc failed. state active, queuing"); - ret = no_bufs_available(qp, sc); - goto bail; + ret = no_bufs_available(qp, sc, ps); + if (!ret) + goto bail; + /* tx consumed in wait */ + return ret; } } diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h index d89d29b76199..f56149eb51ca 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.h +++ b/drivers/staging/rdma/hfi1/verbs_txreq.h @@ -63,6 +63,7 @@ struct verbs_txreq { struct rvt_mregion *mr; struct rvt_sge_state *ss; struct sdma_engine *sde; + struct send_context *psc; u16 hdr_dwords; }; @@ -74,6 +75,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, struct rvt_qp *qp) { struct verbs_txreq *tx; + struct hfi1_qp_priv *priv = qp->priv; tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); if (unlikely(!tx)) { @@ -84,9 +86,24 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, } tx->qp = qp; tx->mr = NULL; + tx->sde = priv->s_sde; + tx->psc = priv->s_sendcontext; + /* so that we can test if the sdma decriptors are there */ + tx->txreq.num_desc = 0; return tx; } +static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp) +{ + struct sdma_txreq *stx; + struct hfi1_qp_priv *priv = qp->priv; + + stx = iowait_get_txhead(&priv->s_iowait); + if (stx) + return container_of(stx, struct verbs_txreq, txreq); + return NULL; +} + void hfi1_put_txreq(struct verbs_txreq *tx); int verbs_txreq_init(struct hfi1_ibdev *dev); void verbs_txreq_exit(struct hfi1_ibdev *dev); -- cgit v1.2.3-59-g8ed1b From 4f8cc5c04f9445c4b1ef82769b5c0a0f1f8713c9 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:27 -0800 Subject: staging/rdma/hfi1: use u8 for vl/sl The use should match the universal container size. Reviewed-by: Ira Weiny Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 229dde5fbde6..a4f8b26f76fb 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -809,7 +809,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct hfi1_pportdata *ppd = ps->ppd; u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr; u64 pbc_flags = 0; - u32 sc5; + u8 sc5; unsigned long flags = 0; struct send_context *sc; struct pio_buf *pbuf; @@ -825,7 +825,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, goto bail; } if (likely(pbc == 0)) { - u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); + u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); -- cgit v1.2.3-59-g8ed1b From 14553ca11039732bcba3c160a26d702dbe71dd49 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:36 -0800 Subject: staging/rdma/hfi1: Adaptive PIO for short messages The change requires a new pio_busy field in the iowait structure to track the number of outstanding pios. The new counter together with the sdma counter serve as the basis for a packet by packet decision as to which egress mechanism to use. Since packets given to different egress mechanisms are not ordered, this scheme will preserve the order. The iowait drain/wait mechanisms are extended for a pio case. An additional qp wait flag is added for the PIO drain wait case. Currently the only pio wait is for buffers, so the no_bufs_available() routine name is changed to pio_wait() and a third argument is passed with one of the two pio wait flags to generalize the routine. A module parameter is added to hold a configurable threshold. For now, the module parameter is zero. A heuristic routine is added to return the func pointer of the proper egress routine to use. The heuristic is as follows: - SMI always uses pio - GSI,UD qps <= threshold use pio - UD qps > threadhold use sdma o No coordination with sdma is required because order is not required and this qp pio count is not maintained for UD - RC/UC ONLY packets <= threshold chose as follows: o If sdmas pending, use SDMA o Otherwise use pio and enable the pio tracking count at the time the pio buffer is allocated - RC/UC ONLY packets > threshold use SDMA o If pio's are pending the pio_wait with the new wait flag is called to delay for pios to drain The threshold is potentially reduced by the QP's mtu. The sc_buffer_alloc() has two additional args (a callback, a void *) which are exploited by the RC/UC cases to pass a new complete routine and a qp *. When the shadow ring completes the credit associated with a packet, the new complete routine is called. The verbs_pio_complete() will then decrement the busy count and trigger any drain waiters in qp destroy or reset. Reviewed-by: Jubin John Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 10 +++ drivers/staging/rdma/hfi1/chip.h | 1 + drivers/staging/rdma/hfi1/hfi.h | 7 +- drivers/staging/rdma/hfi1/iowait.h | 89 +++++++++++++++++++++++ drivers/staging/rdma/hfi1/pio.c | 3 +- drivers/staging/rdma/hfi1/qp.c | 25 ++++++- drivers/staging/rdma/hfi1/rc.c | 17 ++++- drivers/staging/rdma/hfi1/sdma.c | 14 ++-- drivers/staging/rdma/hfi1/uc.c | 10 ++- drivers/staging/rdma/hfi1/ud.c | 4 +- drivers/staging/rdma/hfi1/verbs.c | 123 +++++++++++++++++++++++--------- drivers/staging/rdma/hfi1/verbs.h | 16 +++++ drivers/staging/rdma/hfi1/verbs_txreq.h | 5 ++ include/rdma/rdmavt_qp.h | 22 +++--- 14 files changed, 286 insertions(+), 60 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 129461770186..36e8e3e9b012 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1588,6 +1588,14 @@ static u64 access_sw_pio_wait(const struct cntr_entry *entry, return dd->verbs_dev.n_piowait; } +static u64 access_sw_pio_drain(const struct cntr_entry *entry, + void *context, int vl, int mode, u64 data) +{ + struct hfi1_devdata *dd = (struct hfi1_devdata *)context; + + return dd->verbs_dev.n_piodrain; +} + static u64 access_sw_vtx_wait(const struct cntr_entry *entry, void *context, int vl, int mode, u64 data) { @@ -4129,6 +4137,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { access_sw_vtx_wait), [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, access_sw_pio_wait), +[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL, + access_sw_pio_drain), [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, access_sw_kmem_wait), [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index b86c220161e5..6c581e0bd65f 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -800,6 +800,7 @@ enum { C_SW_CPU_RCV_LIM, C_SW_VTX_WAIT, C_SW_PIO_WAIT, + C_SW_PIO_DRAIN, C_SW_KMEM_WAIT, C_SW_SEND_SCHED, C_SDMA_DESC_FETCHED_CNT, diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 702723b3ff90..43d48613d48e 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -811,6 +811,7 @@ struct sdma_vl_map; #define BOARD_VERS_MAX 96 /* how long the version string can be */ #define SERIAL_MAX 16 /* length of the serial number */ +typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); struct hfi1_devdata { struct hfi1_ibdev verbs_dev; /* must be first */ struct list_head list; @@ -1121,10 +1122,8 @@ struct hfi1_devdata { * Handlers for outgoing data so that snoop/capture does not * have to have its hooks in the send path */ - int (*process_pio_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc); - int (*process_dma_send)(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc); + send_routine process_pio_send; + send_routine process_dma_send; void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h index e007eb82cbc8..b5eb1e0a5aa2 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/staging/rdma/hfi1/iowait.h @@ -55,6 +55,7 @@ #include #include "sdma_txreq.h" + /* * typedef (*restart_t)() - restart callback * @work: pointer to work structure @@ -71,6 +72,7 @@ struct sdma_engine; * @wakeup: space callback * @iowork: workqueue overhead * @wait_dma: wait for sdma_busy == 0 + * @wait_pio: wait for pio_busy == 0 * @sdma_busy: # of packets in flight * @count: total number of descriptors in tx_head'ed list * @tx_limit: limit for overflow queuing @@ -104,7 +106,9 @@ struct iowait { void (*wakeup)(struct iowait *wait, int reason); struct work_struct iowork; wait_queue_head_t wait_dma; + wait_queue_head_t wait_pio; atomic_t sdma_busy; + atomic_t pio_busy; u32 count; u32 tx_limit; u32 tx_count; @@ -141,7 +145,9 @@ static inline void iowait_init( INIT_LIST_HEAD(&wait->tx_head); INIT_WORK(&wait->iowork, func); init_waitqueue_head(&wait->wait_dma); + init_waitqueue_head(&wait->wait_pio); atomic_set(&wait->sdma_busy, 0); + atomic_set(&wait->pio_busy, 0); wait->tx_limit = tx_limit; wait->sleep = sleep; wait->wakeup = wakeup; @@ -174,6 +180,88 @@ static inline void iowait_sdma_drain(struct iowait *wait) wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy)); } +/** + * iowait_sdma_pending() - return sdma pending count + * + * @wait: iowait structure + * + */ +static inline int iowait_sdma_pending(struct iowait *wait) +{ + return atomic_read(&wait->sdma_busy); +} + +/** + * iowait_sdma_inc - note sdma io pending + * @wait: iowait structure + */ +static inline void iowait_sdma_inc(struct iowait *wait) +{ + atomic_inc(&wait->sdma_busy); +} + +/** + * iowait_sdma_add - add count to pending + * @wait: iowait structure + */ +static inline void iowait_sdma_add(struct iowait *wait, int count) +{ + atomic_add(count, &wait->sdma_busy); +} + +/** + * iowait_sdma_dec - note sdma complete + * @wait: iowait structure + */ +static inline int iowait_sdma_dec(struct iowait *wait) +{ + return atomic_dec_and_test(&wait->sdma_busy); +} + +/** + * iowait_pio_drain() - wait for pios to drain + * + * @wait: iowait structure + * + * This will delay until the iowait pios have + * completed. + */ +static inline void iowait_pio_drain(struct iowait *wait) +{ + wait_event_timeout(wait->wait_pio, + !atomic_read(&wait->pio_busy), + HZ); +} + +/** + * iowait_pio_pending() - return pio pending count + * + * @wait: iowait structure + * + */ +static inline int iowait_pio_pending(struct iowait *wait) +{ + return atomic_read(&wait->pio_busy); +} + +/** + * iowait_pio_inc - note pio pending + * @wait: iowait structure + */ +static inline void iowait_pio_inc(struct iowait *wait) +{ + atomic_inc(&wait->pio_busy); +} + +/** + * iowait_sdma_dec - note pio complete + * @wait: iowait structure + */ +static inline int iowait_pio_dec(struct iowait *wait) +{ + return atomic_dec_and_test(&wait->pio_busy); +} + /** * iowait_drain_wakeup() - trigger iowait_drain() waiter * @@ -184,6 +272,7 @@ static inline void iowait_sdma_drain(struct iowait *wait) static inline void iowait_drain_wakeup(struct iowait *wait) { wake_up(&wait->wait_dma); + wake_up(&wait->wait_pio); } /** diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index be0dcc345f4b..f5aab0ed39d7 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1564,7 +1564,8 @@ full: write_sequnlock_irqrestore(&dev->iowait_lock, flags); for (i = 0; i < n; i++) - hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO); + hfi1_qp_wakeup(qps[i], + RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN); } /* translate a send credit update to a bit code of reasons */ diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 571e78fa2633..c7b83d66b59b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -359,6 +359,25 @@ void _hfi1_schedule_send(struct rvt_qp *qp) cpumask_first(cpumask_of_node(dd->node))); } +static void qp_pio_drain(struct rvt_qp *qp) +{ + struct hfi1_ibdev *dev; + struct hfi1_qp_priv *priv = qp->priv; + + if (!priv->s_sendcontext) + return; + dev = to_idev(qp->ibqp.device); + while (iowait_pio_pending(&priv->s_iowait)) { + write_seqlock_irq(&dev->iowait_lock); + hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); + write_sequnlock_irq(&dev->iowait_lock); + iowait_pio_drain(&priv->s_iowait); + write_seqlock_irq(&dev->iowait_lock); + hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); + write_sequnlock_irq(&dev->iowait_lock); + } +} + /** * hfi1_schedule_send - schedule progress * @qp: the QP @@ -620,7 +639,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%u LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p\n", + "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -630,7 +649,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe ? wqe->wr.opcode : 0, qp->s_hdrwords, qp->s_flags, - atomic_read(&priv->s_iowait.sdma_busy), + iowait_sdma_pending(&priv->s_iowait), + iowait_pio_pending(&priv->s_iowait), !list_empty(&priv->s_iowait.list), qp->timeout, wqe ? wqe->ssn : 0, @@ -739,6 +759,7 @@ void quiesce_qp(struct rvt_qp *qp) struct hfi1_qp_priv *priv = qp->priv; iowait_sdma_drain(&priv->s_iowait); + qp_pio_drain(qp); flush_tx_list(qp); } diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 27042876ca62..443fda8df380 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -181,6 +181,18 @@ void hfi1_del_timers_sync(struct rvt_qp *qp) del_timer_sync(&priv->s_rnr_timer); } +/* only opcode mask for adaptive pio */ +const u32 rc_only_opcode = + BIT(OP(SEND_ONLY) & 0x1f) | + BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) | + BIT(OP(RDMA_WRITE_ONLY & 0x1f)) | + BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) | + BIT(OP(RDMA_READ_REQUEST & 0x1f)) | + BIT(OP(ACKNOWLEDGE & 0x1f)) | + BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) | + BIT(OP(COMPARE_SWAP & 0x1f)) | + BIT(OP(FETCH_ADD & 0x1f)); + static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) { @@ -217,6 +229,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, u32 bth2; int middle = 0; u32 pmtu = qp->pmtu; + struct hfi1_qp_priv *priv = qp->priv; /* Don't send an ACK if we aren't supposed to. */ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) @@ -350,6 +363,7 @@ normal: qp->s_hdrwords = hwords; /* pbc */ ps->s_txreq->hdr_dwords = hwords + 2; + ps->s_txreq->sde = priv->s_sde; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); return 1; @@ -413,7 +427,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&priv->s_iowait.sdma_busy)) { + if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } @@ -754,6 +768,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) qp->s_hdrwords = hwords; /* pbc */ ps->s_txreq->hdr_dwords = hwords + 2; + ps->s_txreq->sde = priv->s_sde; qp->s_cur_sge = ss; qp->s_cur_size = len; hfi1_make_ruc_header( diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 579d82109932..ff38fa3b7ca5 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -410,7 +410,7 @@ static void sdma_flush(struct sdma_engine *sde) #endif sdma_txclean(sde->dd, txp); if (wait) - drained = atomic_dec_and_test(&wait->sdma_busy); + drained = iowait_sdma_dec(wait); if (txp->complete) (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained); if (wait && drained) @@ -584,7 +584,7 @@ static void sdma_flush_descq(struct sdma_engine *sde) /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; if (wait) - drained = atomic_dec_and_test(&wait->sdma_busy); + drained = iowait_sdma_dec(wait); #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER trace_hfi1_sdma_out_sn(sde, txp->sn); if (WARN_ON_ONCE(sde->head_sn != txp->sn)) @@ -1498,7 +1498,7 @@ retry: /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; if (wait) - drained = atomic_dec_and_test(&wait->sdma_busy); + drained = iowait_sdma_dec(wait); #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER trace_hfi1_sdma_out_sn(sde, txp->sn); if (WARN_ON_ONCE(sde->head_sn != txp->sn)) @@ -2092,14 +2092,14 @@ retry: goto nodesc; tail = submit_tx(sde, tx); if (wait) - atomic_inc(&wait->sdma_busy); + iowait_sdma_inc(wait); sdma_update_tail(sde, tail); unlock: spin_unlock_irqrestore(&sde->tail_lock, flags); return ret; unlock_noconn: if (wait) - atomic_inc(&wait->sdma_busy); + iowait_sdma_inc(wait); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; @@ -2181,7 +2181,7 @@ retry: } update_tail: if (wait) - atomic_add(count, &wait->sdma_busy); + iowait_sdma_add(wait, count); if (tail != INVALID_TAIL) sdma_update_tail(sde, tail); spin_unlock_irqrestore(&sde->tail_lock, flags); @@ -2192,7 +2192,7 @@ unlock_noconn: tx->wait = wait; list_del_init(&tx->list); if (wait) - atomic_inc(&wait->sdma_busy); + iowait_sdma_inc(wait); tx->next_descq_idx = 0; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER tx->sn = sde->tail_sn++; diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 32705618900d..e58ec15dd892 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -55,6 +55,13 @@ /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_UC_##x +/* only opcode mask for adaptive pio */ +const u32 uc_only_opcode = + BIT(OP(SEND_ONLY) & 0x1f) | + BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) | + BIT(OP(RDMA_WRITE_ONLY & 0x1f)) | + BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)); + /** * hfi1_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP @@ -86,7 +93,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&priv->s_iowait.sdma_busy)) { + if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } @@ -237,6 +244,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) qp->s_hdrwords = hwords; /* pbc */ ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; + ps->s_txreq->sde = priv->s_sde; qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index bae5ccdfa7f4..da4e465ae846 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -294,7 +294,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&priv->s_iowait.sdma_busy)) { + if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } @@ -331,7 +331,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) * Instead of waiting, we could queue a * zero length descriptor so we get a callback. */ - if (atomic_read(&priv->s_iowait.sdma_busy)) { + if (iowait_sdma_pending(&priv->s_iowait)) { qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index a4f8b26f76fb..d900374abe70 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -124,11 +124,20 @@ unsigned int hfi1_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); +unsigned short piothreshold; +module_param(piothreshold, ushort, S_IRUGO); +MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); + static void verbs_sdma_complete( struct sdma_txreq *cookie, int status, int drained); +static int pio_wait(struct rvt_qp *qp, + struct send_context *sc, + struct hfi1_pkt_state *ps, + u32 flag); + /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 @@ -742,9 +751,10 @@ bail_build: * If we are now in the error state, return zero to flush the * send work request. */ -static int no_bufs_available(struct rvt_qp *qp, - struct send_context *sc, - struct hfi1_pkt_state *ps) +static int pio_wait(struct rvt_qp *qp, + struct send_context *sc, + struct hfi1_pkt_state *ps, + u32 flag) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_devdata *dd = sc->dd; @@ -767,8 +777,10 @@ static int no_bufs_available(struct rvt_qp *qp, struct hfi1_ibdev *dev = &dd->verbs_dev; int was_empty; + dev->n_piowait += !!(flag & RVT_S_WAIT_PIO); + dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN); dev->n_piowait++; - qp->s_flags |= RVT_S_WAIT_PIO; + qp->s_flags |= flag; was_empty = list_empty(&sc->piowait); list_add_tail(&priv->s_iowait.list, &sc->piowait); trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO); @@ -797,6 +809,15 @@ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) return dd->vld[vl].sc; } +static void verbs_pio_complete(void *arg, int code) +{ + struct rvt_qp *qp = (struct rvt_qp *)arg; + struct hfi1_qp_priv *priv = qp->priv; + + if (iowait_pio_dec(&priv->s_iowait)) + iowait_drain_wakeup(&priv->s_iowait); +} + int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc) { @@ -815,6 +836,17 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, struct pio_buf *pbuf; int wc_status = IB_WC_SUCCESS; int ret = 0; + pio_release_cb cb = NULL; + + /* only RC/UC use complete */ + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + cb = verbs_pio_complete; + break; + default: + break; + } /* vl15 special case taken care of in ud.c */ sc5 = priv->s_sc; @@ -830,8 +862,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT; pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); } - pbuf = sc_buffer_alloc(sc, plen, NULL, NULL); + if (cb) + iowait_pio_inc(&priv->s_iowait); + pbuf = sc_buffer_alloc(sc, plen, cb, qp); if (unlikely(pbuf == NULL)) { + if (cb) + verbs_pio_complete(qp, 0); if (ppd->host_link_state != HLS_UP_ACTIVE) { /* * If we have filled the PIO buffers to capacity and are @@ -851,8 +887,9 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, * so lets continue to queue the request. */ hfi1_cdbg(PIO, "alloc failed. state active, queuing"); - ret = no_bufs_available(qp, sc, ps); + ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO); if (!ret) + /* txreq not queued - free */ goto bail; /* tx consumed in wait */ return ret; @@ -984,6 +1021,48 @@ bad: return 1; } +/** + * get_send_routine - choose an egress routine + * + * Choose an egress routine based on QP type + * and size + */ +static inline send_routine get_send_routine(struct rvt_qp *qp, + struct hfi1_ib_header *h) +{ + struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_qp_priv *priv = qp->priv; + + if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) + return dd->process_pio_send; + switch (qp->ibqp.qp_type) { + case IB_QPT_SMI: + return dd->process_pio_send; + case IB_QPT_GSI: + case IB_QPT_UD: + if (piothreshold && qp->s_cur_size <= piothreshold) + return dd->process_pio_send; + break; + case IB_QPT_RC: + if (piothreshold && + qp->s_cur_size <= min(piothreshold, qp->pmtu) && + (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) && + iowait_sdma_pending(&priv->s_iowait) == 0) + return dd->process_pio_send; + break; + case IB_QPT_UC: + if (piothreshold && + qp->s_cur_size <= min(piothreshold, qp->pmtu) && + (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) && + iowait_sdma_pending(&priv->s_iowait) == 0) + return dd->process_pio_send; + break; + default: + break; + } + return dd->process_dma_send; +} + /** * hfi1_verbs_send - send a packet * @qp: the QP to send on @@ -995,19 +1074,10 @@ bad: int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + send_routine sr; int ret; - int pio = 0; - unsigned long flags = 0; - - /* - * VL15 packets (IB_QPT_SMI) will always use PIO, so we - * can defer SDMA restart until link goes ACTIVE without - * worrying about just how we got there. - */ - if ((qp->ibqp.qp_type == IB_QPT_SMI) || - !(dd->flags & HFI1_HAS_SEND_DMA)) - pio = 1; + sr = get_send_routine(qp, &ps->s_txreq->phdr.hdr); ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp); if (unlikely(ret)) { /* @@ -1018,7 +1088,9 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) * mechanism for handling the errors. So for SDMA we can just * return. */ - if (pio) { + if (sr == dd->process_pio_send) { + unsigned long flags; + hfi1_cdbg(PIO, "%s() Failed. Completing with err", __func__); spin_lock_irqsave(&qp->s_lock, flags); @@ -1027,20 +1099,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } return -EINVAL; } - - if (pio) { - ret = dd->process_pio_send(qp, ps, 0); - } else { -#ifdef CONFIG_SDMA_VERBOSITY - dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n", - slashstrip(__FILE__), __LINE__, __func__); - dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", qp->s_hdrwords, - qp->s_cur_size); -#endif - ret = dd->process_dma_send(qp, ps, 0); - } - - return ret; + return sr(qp, ps, 0); } /** diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 3d25ad406af7..8f1fde847c14 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -265,6 +265,7 @@ struct hfi1_ibdev { struct timer_list mem_timer; u64 n_piowait; + u64 n_piodrain; u64 n_txwait; u64 n_kmem_wait; @@ -425,6 +426,19 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); +extern const u32 rc_only_opcode; +extern const u32 uc_only_opcode; + +static inline u8 get_opcode(struct hfi1_ib_header *h) +{ + u16 lnh = be16_to_cpu(h->lrh[0]) & 3; + + if (lnh == IB_LNH_IBA_LOCAL) + return be32_to_cpu(h->u.oth.bth[0]) >> 24; + else + return be32_to_cpu(h->u.l.oth.bth[0]) >> 24; +} + int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0); @@ -494,6 +508,8 @@ extern unsigned int hfi1_max_srq_sges; extern unsigned int hfi1_max_srq_wrs; +extern unsigned short piothreshold; + extern const u32 ib_hfi1_rnr_table[]; #endif /* HFI1_VERBS_H */ diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h index f56149eb51ca..1cf69b2fe4a5 100644 --- a/drivers/staging/rdma/hfi1/verbs_txreq.h +++ b/drivers/staging/rdma/hfi1/verbs_txreq.h @@ -93,6 +93,11 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, return tx; } +static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx) +{ + return &tx->txreq; +} + static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp) { struct sdma_txreq *stx; diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 5c307ed4d195..f2f4df023aaa 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -82,6 +82,7 @@ * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating * next send completion entry not via send DMA * RVT_S_WAIT_PIO - waiting for a send buffer to be available + * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available * RVT_S_WAIT_KMEM - waiting for kernel memory to be available @@ -101,16 +102,17 @@ #define RVT_S_WAIT_SSN_CREDIT 0x0100 #define RVT_S_WAIT_DMA 0x0200 #define RVT_S_WAIT_PIO 0x0400 -#define RVT_S_WAIT_TX 0x0800 -#define RVT_S_WAIT_DMA_DESC 0x1000 -#define RVT_S_WAIT_KMEM 0x2000 -#define RVT_S_WAIT_PSN 0x4000 -#define RVT_S_WAIT_ACK 0x8000 -#define RVT_S_SEND_ONE 0x10000 -#define RVT_S_UNLIMITED_CREDIT 0x20000 -#define RVT_S_AHG_VALID 0x40000 -#define RVT_S_AHG_CLEAR 0x80000 -#define RVT_S_ECN 0x100000 +#define RVT_S_WAIT_PIO_DRAIN 0x0800 +#define RVT_S_WAIT_TX 0x1000 +#define RVT_S_WAIT_DMA_DESC 0x2000 +#define RVT_S_WAIT_KMEM 0x4000 +#define RVT_S_WAIT_PSN 0x8000 +#define RVT_S_WAIT_ACK 0x10000 +#define RVT_S_SEND_ONE 0x20000 +#define RVT_S_UNLIMITED_CREDIT 0x40000 +#define RVT_S_AHG_VALID 0x80000 +#define RVT_S_AHG_CLEAR 0x100000 +#define RVT_S_ECN 0x200000 /* * Wait flags that would prevent any packet type from being sent. -- cgit v1.2.3-59-g8ed1b From 91702b4a39fb566b78f2ef1cea8bf6ed3fe9f4a6 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:44 -0800 Subject: IB/qib, staging/rdma/hfi1, IB/rdmavt: progress selection changes The non-rdamvt versions of qib and hfi1 allow for a differing heuristic to override a schedule progress in favor of a direct call the the progress routine. This patch adds that for both drivers and rdmavt. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/infiniband/hw/qib/qib_qp.c | 7 +++++-- drivers/infiniband/sw/rdmavt/qp.c | 10 +++++++--- drivers/staging/rdma/hfi1/qp.c | 3 ++- 3 files changed, 14 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6ffa0221da9f..575b737d9ef3 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -484,12 +484,13 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth) * the ring but after the wqe has been * setup. * - * Returns 0 on success, -EINVAL on failure + * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure */ int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct rvt_ah *ah; + int ret = 0; switch (qp->ibqp.qp_type) { case IB_QPT_RC: @@ -503,11 +504,13 @@ int qib_check_send_wqe(struct rvt_qp *qp, ah = ibah_to_rvtah(wqe->ud_wr.ah); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; + /* progress hint */ + ret = 1; break; default: break; } - return 0; + return ret; } #ifdef CONFIG_DEBUG_FS diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index dbf124db1fd1..ef82abf2d89e 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1430,7 +1430,9 @@ static inline u32 qp_get_savail(struct rvt_qp *qp) * @qp: the QP to post on * @wr: the work request to send */ -static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) +static int rvt_post_one_wr(struct rvt_qp *qp, + struct ib_send_wr *wr, + int *call_send) { struct rvt_swqe *wqe; u32 next; @@ -1532,8 +1534,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr) /* general part of wqe valid - allow for driver checks */ if (rdi->driver_f.check_send_wqe) { ret = rdi->driver_f.check_send_wqe(qp, wqe); - if (ret) + if (ret < 0) goto bail_inval_free; + if (ret) + *call_send = ret; } log_pmtu = qp->log_pmtu; @@ -1606,7 +1610,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; for (; wr; wr = wr->next) { - err = rvt_post_one_wr(qp, wr); + err = rvt_post_one_wr(qp, wr, &call_send); if (unlikely(err)) { *bad_wr = wr; goto bail; diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index c7b83d66b59b..2d157054576a 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -73,6 +73,7 @@ static int iowait_sleep( struct sdma_txreq *stx, unsigned seq); static void iowait_wakeup(struct iowait *wait, int reason); +static void qp_pio_drain(struct rvt_qp *qp); static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, unsigned off) @@ -272,7 +273,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp, default: break; } - return 0; + return wqe->length <= piothreshold; } /** -- cgit v1.2.3-59-g8ed1b From a545f5308b6cf476def8a9326f7e82f89623bb03 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:45:53 -0800 Subject: staging/rdma/hfi: fix CQ completion order issue The current implementation of the sdma_wait variable has a timing hole that can cause a completion Q entry to be returned from a pio send prior to an older sdma packets completion queue entry. The sdma_wait variable used to be decremented prior to calling the packet complete routine. The hole is between decrement and the verbs completion where send engine using pio could return a out of order completion in that window. This patch closes the hole by allowing an API option to specify an sdma_drained callback. The atomic dec is positioned after the complete callback to avoid the window as long as the pio path doesn't execute when there is a non-zero sdma count. Reviewed-by: Jubin John Signed-off-by: Dean Luick Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/iowait.h | 12 +++-- drivers/staging/rdma/hfi1/qp.c | 20 +++++++- drivers/staging/rdma/hfi1/sdma.c | 94 +++++++++++----------------------- drivers/staging/rdma/hfi1/sdma.h | 4 +- drivers/staging/rdma/hfi1/sdma_txreq.h | 2 +- drivers/staging/rdma/hfi1/user_sdma.c | 7 ++- drivers/staging/rdma/hfi1/verbs.c | 18 +------ 7 files changed, 65 insertions(+), 92 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h index b5eb1e0a5aa2..2cb3f0422752 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/staging/rdma/hfi1/iowait.h @@ -69,7 +69,8 @@ struct sdma_engine; * @list: used to add/insert into QP/PQ wait lists * @tx_head: overflow list of sdma_txreq's * @sleep: no space callback - * @wakeup: space callback + * @wakeup: space callback wakeup + * @sdma_drained: sdma count drained * @iowork: workqueue overhead * @wait_dma: wait for sdma_busy == 0 * @wait_pio: wait for pio_busy == 0 @@ -104,6 +105,7 @@ struct iowait { struct sdma_txreq *tx, unsigned seq); void (*wakeup)(struct iowait *wait, int reason); + void (*sdma_drained)(struct iowait *wait); struct work_struct iowork; wait_queue_head_t wait_dma; wait_queue_head_t wait_pio; @@ -122,7 +124,7 @@ struct iowait { * @tx_limit: limit for overflow queuing * @func: restart function for workqueue * @sleep: sleep function for no space - * @wakeup: wakeup function for no space + * @resume: wakeup function for no space * * This function initializes the iowait * structure embedded in the QP or PQ. @@ -138,7 +140,8 @@ static inline void iowait_init( struct iowait *wait, struct sdma_txreq *tx, unsigned seq), - void (*wakeup)(struct iowait *wait, int reason)) + void (*wakeup)(struct iowait *wait, int reason), + void (*sdma_drained)(struct iowait *wait)) { wait->count = 0; INIT_LIST_HEAD(&wait->list); @@ -151,6 +154,7 @@ static inline void iowait_init( wait->tx_limit = tx_limit; wait->sleep = sleep; wait->wakeup = wakeup; + wait->sdma_drained = sdma_drained; } /** @@ -273,6 +277,8 @@ static inline void iowait_drain_wakeup(struct iowait *wait) { wake_up(&wait->wait_dma); wake_up(&wait->wait_pio); + if (wait->sdma_drained) + wait->sdma_drained(wait); } /** diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 2d157054576a..77e91f280b21 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -73,6 +73,7 @@ static int iowait_sleep( struct sdma_txreq *stx, unsigned seq); static void iowait_wakeup(struct iowait *wait, int reason); +static void iowait_sdma_drained(struct iowait *wait); static void qp_pio_drain(struct rvt_qp *qp); static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, @@ -509,6 +510,22 @@ static void iowait_wakeup(struct iowait *wait, int reason) hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); } +static void iowait_sdma_drained(struct iowait *wait) +{ + struct rvt_qp *qp = iowait_to_qp(wait); + + /* + * This happens when the send engine notes + * a QP in the error state and cannot + * do the flush work until that QP's + * sdma work has finished. + */ + if (qp->s_flags & RVT_S_WAIT_DMA) { + qp->s_flags &= ~RVT_S_WAIT_DMA; + hfi1_schedule_send(qp); + } +} + /** * * qp_to_sdma_engine - map a qp to a send engine @@ -773,7 +790,8 @@ void notify_qp_reset(struct rvt_qp *qp) 1, _hfi1_do_send, iowait_sleep, - iowait_wakeup); + iowait_wakeup, + iowait_sdma_drained); priv->r_adefered = 0; clear_ahg(qp); } diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index ff38fa3b7ca5..e79f931d06ce 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -361,6 +361,28 @@ static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) write_sde_csr(sde, SD(DESC_CNT), reg); } +static inline void complete_tx(struct sdma_engine *sde, + struct sdma_txreq *tx, + int res) +{ + /* protect against complete modifying */ + struct iowait *wait = tx->wait; + callback_t complete = tx->complete; + +#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER + trace_hfi1_sdma_out_sn(sde, txp->sn); + if (WARN_ON_ONCE(sde->head_sn != txp->sn)) + dd_dev_err(sde->dd, "expected %llu got %llu\n", + sde->head_sn, txp->sn); + sde->head_sn++; +#endif + sdma_txclean(sde->dd, tx); + if (complete) + (*complete)(tx, res); + if (iowait_sdma_dec(wait) && wait) + iowait_drain_wakeup(wait); +} + /* * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status * @@ -395,27 +417,8 @@ static void sdma_flush(struct sdma_engine *sde) } spin_unlock_irqrestore(&sde->flushlist_lock, flags); /* flush from flush list */ - list_for_each_entry_safe(txp, txp_next, &flushlist, list) { - int drained = 0; - /* protect against complete modifying */ - struct iowait *wait = txp->wait; - - list_del_init(&txp->list); -#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER - trace_hfi1_sdma_out_sn(sde, txp->sn); - if (WARN_ON_ONCE(sde->head_sn != txp->sn)) - dd_dev_err(sde->dd, "expected %llu got %llu\n", - sde->head_sn, txp->sn); - sde->head_sn++; -#endif - sdma_txclean(sde->dd, txp); - if (wait) - drained = iowait_sdma_dec(wait); - if (txp->complete) - (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained); - if (wait && drained) - iowait_drain_wakeup(wait); - } + list_for_each_entry_safe(txp, txp_next, &flushlist, list) + complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); } /* @@ -577,31 +580,10 @@ static void sdma_flush_descq(struct sdma_engine *sde) head = ++sde->descq_head & sde->sdma_mask; /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == head) { - int drained = 0; - /* protect against complete modifying */ - struct iowait *wait = txp->wait; - /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; - if (wait) - drained = iowait_sdma_dec(wait); -#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER - trace_hfi1_sdma_out_sn(sde, txp->sn); - if (WARN_ON_ONCE(sde->head_sn != txp->sn)) - dd_dev_err(sde->dd, "expected %llu got %llu\n", - sde->head_sn, txp->sn); - sde->head_sn++; -#endif - sdma_txclean(sde->dd, txp); + complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); trace_hfi1_sdma_progress(sde, head, tail, txp); - if (txp->complete) - (*txp->complete)( - txp, - SDMA_TXREQ_S_ABORTED, - drained); - if (wait && drained) - iowait_drain_wakeup(wait); - /* see if there is another txp */ txp = get_txhead(sde); } progress++; @@ -1470,7 +1452,7 @@ static void sdma_make_progress(struct sdma_engine *sde, u64 status) { struct sdma_txreq *txp = NULL; int progress = 0; - u16 hwhead, swhead, swtail; + u16 hwhead, swhead; int idle_check_done = 0; hwhead = sdma_gethead(sde); @@ -1491,29 +1473,9 @@ retry: /* if now past this txp's descs, do the callback */ if (txp && txp->next_descq_idx == swhead) { - int drained = 0; - /* protect against complete modifying */ - struct iowait *wait = txp->wait; - /* remove from list */ sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; - if (wait) - drained = iowait_sdma_dec(wait); -#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER - trace_hfi1_sdma_out_sn(sde, txp->sn); - if (WARN_ON_ONCE(sde->head_sn != txp->sn)) - dd_dev_err(sde->dd, "expected %llu got %llu\n", - sde->head_sn, txp->sn); - sde->head_sn++; -#endif - sdma_txclean(sde->dd, txp); - if (txp->complete) - (*txp->complete)( - txp, - SDMA_TXREQ_S_OK, - drained); - if (wait && drained) - iowait_drain_wakeup(wait); + complete_tx(sde, txp, SDMA_TXREQ_S_OK); /* see if there is another txp */ txp = get_txhead(sde); } @@ -1531,6 +1493,8 @@ retry: * of sdma_make_progress(..) which is ensured by idle_check_done flag */ if ((status & sde->idle_mask) && !idle_check_done) { + u16 swtail; + swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; if (swtail != hwhead) { hwhead = (u16)read_sde_csr(sde, SD(HEAD)); diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 76ed2157c514..f24b5a17322b 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -555,7 +555,7 @@ static inline int sdma_txinit_ahg( u8 num_ahg, u32 *ahg, u8 ahg_hlen, - void (*cb)(struct sdma_txreq *, int, int)) + void (*cb)(struct sdma_txreq *, int)) { if (tlen == 0) return -ENODATA; @@ -618,7 +618,7 @@ static inline int sdma_txinit( struct sdma_txreq *tx, u16 flags, u16 tlen, - void (*cb)(struct sdma_txreq *, int, int)) + void (*cb)(struct sdma_txreq *, int)) { return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); } diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/staging/rdma/hfi1/sdma_txreq.h index 2effb35b9b91..bf7d777d756e 100644 --- a/drivers/staging/rdma/hfi1/sdma_txreq.h +++ b/drivers/staging/rdma/hfi1/sdma_txreq.h @@ -93,7 +93,7 @@ struct sdma_desc { #define SDMA_TXREQ_F_USE_AHG 0x0004 struct sdma_txreq; -typedef void (*callback_t)(struct sdma_txreq *, int, int); +typedef void (*callback_t)(struct sdma_txreq *, int); struct iowait; struct sdma_txreq { diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index ac903099843e..dfa9ef209793 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -273,7 +273,7 @@ struct user_sdma_txreq { static int user_sdma_send_pkts(struct user_sdma_request *, unsigned); static int num_user_pages(const struct iovec *); -static void user_sdma_txreq_cb(struct sdma_txreq *, int, int); +static void user_sdma_txreq_cb(struct sdma_txreq *, int); static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, @@ -388,7 +388,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) init_waitqueue_head(&pq->wait); iowait_init(&pq->busy, 0, NULL, defer_packet_queue, - activate_packet_queue); + activate_packet_queue, NULL); pq->reqidx = 0; snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, fd->subctxt); @@ -1341,8 +1341,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, * tx request have been processed by the DMA engine. Called in * interrupt context. */ -static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status, - int drain) +static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) { struct user_sdma_txreq *tx = container_of(txreq, struct user_sdma_txreq, txreq); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index d900374abe70..31419666cc69 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -130,8 +130,7 @@ MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); static void verbs_sdma_complete( struct sdma_txreq *cookie, - int status, - int drained); + int status); static int pio_wait(struct rvt_qp *qp, struct send_context *sc, @@ -523,8 +522,7 @@ void update_sge(struct rvt_sge_state *ss, u32 length) /* New API */ static void verbs_sdma_complete( struct sdma_txreq *cookie, - int status, - int drained) + int status) { struct verbs_txreq *tx = container_of(cookie, struct verbs_txreq, txreq); @@ -539,18 +537,6 @@ static void verbs_sdma_complete( hdr = &tx->phdr.hdr; hfi1_rc_send_complete(qp, hdr); } - if (drained) { - /* - * This happens when the send engine notes - * a QP in the error state and cannot - * do the flush work until that QP's - * sdma work has finished. - */ - if (qp->s_flags & RVT_S_WAIT_DMA) { - qp->s_flags &= ~RVT_S_WAIT_DMA; - hfi1_schedule_send(qp); - } - } spin_unlock(&qp->s_lock); hfi1_put_txreq(tx); -- cgit v1.2.3-59-g8ed1b From 35f6befc8441d20724a41bafc810b7c8f5a92986 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 12:46:10 -0800 Subject: staging/rdma/hfi1: Add qp to send context mapping for PIO PIO send context mapping is changed from per-VL to QPN based. qp to send context mapping is done using a mapping infrastructure similar to the current vl to sdma engine mapping. Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 6 + drivers/staging/rdma/hfi1/init.c | 2 + drivers/staging/rdma/hfi1/pio.c | 241 +++++++++++++++++++++++++++++++++++++- drivers/staging/rdma/hfi1/pio.h | 106 +++++++++++++++++ drivers/staging/rdma/hfi1/qp.c | 24 ++++ drivers/staging/rdma/hfi1/qp.h | 1 + drivers/staging/rdma/hfi1/verbs.c | 12 -- drivers/staging/rdma/hfi1/verbs.h | 2 - 8 files changed, 375 insertions(+), 19 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 43d48613d48e..4d5a18ece115 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -841,6 +841,12 @@ struct hfi1_devdata { spinlock_t sc_lock; /* Per VL data. Enough for all VLs but not all elements are set/used. */ struct per_vl_data vld[PER_VL_SEND_CONTEXTS]; + /* lock for pio_map */ + spinlock_t pio_map_lock; + /* array of kernel send contexts */ + struct send_context **kernel_send_context; + /* array of vl maps */ + struct pio_vl_map __rcu *pio_map; /* seqlock for sc2vl */ seqlock_t sc2vl_lock; u64 sc2vl[4]; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 112cb6c09857..423c6996e93c 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1050,6 +1050,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) mutex_init(&dd->qsfp_i2c_mutex); seqlock_init(&dd->sc2vl_lock); spin_lock_init(&dd->sde_map_lock); + spin_lock_init(&dd->pio_map_lock); init_waitqueue_head(&dd->event_queue); dd->int_counter = alloc_percpu(u64); @@ -1317,6 +1318,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) } } kfree(tmp); + free_pio_map(dd); /* must follow rcv context free - need to remove rcv's hooks */ for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) sc_free(dd->send_contexts[ctxt].sc); diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index f5aab0ed39d7..69bbe22aae55 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -312,7 +312,7 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd) if (i == SC_ACK) { count = dd->n_krcv_queues; } else if (i == SC_KERNEL) { - count = num_vls + 1 /* VL15 */; + count = (INIT_SC_PER_VL * num_vls) + 1 /* VL15 */; } else if (count == SCC_PER_CPU) { count = dd->num_rcv_contexts - dd->n_krcv_queues; } else if (count < 0) { @@ -1687,11 +1687,217 @@ done: spin_unlock(&dd->sc_lock); } +/* + * pio_select_send_context_vl() - select send context + * @dd: devdata + * @selector: a spreading factor + * @vl: this vl + * + * This function returns a send context based on the selector and a vl. + * The mapping fields are protected by RCU + */ +struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, + u32 selector, u8 vl) +{ + struct pio_vl_map *m; + struct pio_map_elem *e; + struct send_context *rval; + + /* + * NOTE This should only happen if SC->VL changed after the initial + * checks on the QP/AH + * Default will return VL0's send context below + */ + if (unlikely(vl >= num_vls)) { + rval = NULL; + goto done; + } + + rcu_read_lock(); + m = rcu_dereference(dd->pio_map); + if (unlikely(!m)) { + rcu_read_unlock(); + return dd->vld[0].sc; + } + e = m->map[vl & m->mask]; + rval = e->ksc[selector & e->mask]; + rcu_read_unlock(); + +done: + rval = !rval ? dd->vld[0].sc : rval; + return rval; +} + +/* + * pio_select_send_context_sc() - select send context + * @dd: devdata + * @selector: a spreading factor + * @sc5: the 5 bit sc + * + * This function returns an send context based on the selector and an sc + */ +struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, + u32 selector, u8 sc5) +{ + u8 vl = sc_to_vlt(dd, sc5); + + return pio_select_send_context_vl(dd, selector, vl); +} + +/* + * Free the indicated map struct + */ +static void pio_map_free(struct pio_vl_map *m) +{ + int i; + + for (i = 0; m && i < m->actual_vls; i++) + kfree(m->map[i]); + kfree(m); +} + +/* + * Handle RCU callback + */ +static void pio_map_rcu_callback(struct rcu_head *list) +{ + struct pio_vl_map *m = container_of(list, struct pio_vl_map, list); + + pio_map_free(m); +} + +/* + * pio_map_init - called when #vls change + * @dd: hfi1_devdata + * @port: port number + * @num_vls: number of vls + * @vl_scontexts: per vl send context mapping (optional) + * + * This routine changes the mapping based on the number of vls. + * + * vl_scontexts is used to specify a non-uniform vl/send context + * loading. NULL implies auto computing the loading and giving each + * VL an uniform distribution of send contexts per VL. + * + * The auto algorithm computers the sc_per_vl and the number of extra + * send contexts. Any extra send contexts are added from the last VL + * on down + * + * rcu locking is used here to control access to the mapping fields. + * + * If either the num_vls or num_send_contexts are non-power of 2, the + * array sizes in the struct pio_vl_map and the struct pio_map_elem are + * rounded up to the next highest power of 2 and the first entry is + * reused in a round robin fashion. + * + * If an error occurs the map change is not done and the mapping is not + * chaged. + * + */ +int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) +{ + int i, j; + int extra, sc_per_vl; + int scontext = 1; + int num_kernel_send_contexts = 0; + u8 lvl_scontexts[OPA_MAX_VLS]; + struct pio_vl_map *oldmap, *newmap; + + if (!vl_scontexts) { + /* send context 0 reserved for VL15 */ + for (i = 1; i < dd->num_send_contexts; i++) + if (dd->send_contexts[i].type == SC_KERNEL) + num_kernel_send_contexts++; + /* truncate divide */ + sc_per_vl = num_kernel_send_contexts / num_vls; + /* extras */ + extra = num_kernel_send_contexts % num_vls; + vl_scontexts = lvl_scontexts; + /* add extras from last vl down */ + for (i = num_vls - 1; i >= 0; i--, extra--) + vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0); + } + /* build new map */ + newmap = kzalloc(sizeof(*newmap) + + roundup_pow_of_two(num_vls) * + sizeof(struct pio_map_elem *), + GFP_KERNEL); + if (!newmap) + goto bail; + newmap->actual_vls = num_vls; + newmap->vls = roundup_pow_of_two(num_vls); + newmap->mask = (1 << ilog2(newmap->vls)) - 1; + for (i = 0; i < newmap->vls; i++) { + /* save for wrap around */ + int first_scontext = scontext; + + if (i < newmap->actual_vls) { + int sz = roundup_pow_of_two(vl_scontexts[i]); + + /* only allocate once */ + newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + + sz * sizeof(struct + send_context *), + GFP_KERNEL); + if (!newmap->map[i]) + goto bail; + newmap->map[i]->mask = (1 << ilog2(sz)) - 1; + /* assign send contexts */ + for (j = 0; j < sz; j++) { + if (dd->kernel_send_context[scontext]) + newmap->map[i]->ksc[j] = + dd->kernel_send_context[scontext]; + if (++scontext >= first_scontext + + vl_scontexts[i]) + /* wrap back to first send context */ + scontext = first_scontext; + } + } else { + /* just re-use entry without allocating */ + newmap->map[i] = newmap->map[i % num_vls]; + } + scontext = first_scontext + vl_scontexts[i]; + } + /* newmap in hand, save old map */ + spin_lock_irq(&dd->pio_map_lock); + oldmap = rcu_dereference_protected(dd->pio_map, + lockdep_is_held(&dd->pio_map_lock)); + + /* publish newmap */ + rcu_assign_pointer(dd->pio_map, newmap); + + spin_unlock_irq(&dd->pio_map_lock); + /* success, free any old map after grace period */ + if (oldmap) + call_rcu(&oldmap->list, pio_map_rcu_callback); + return 0; +bail: + /* free any partial allocation */ + pio_map_free(newmap); + return -ENOMEM; +} + +void free_pio_map(struct hfi1_devdata *dd) +{ + /* Free PIO map if allocated */ + if (rcu_access_pointer(dd->pio_map)) { + spin_lock_irq(&dd->pio_map_lock); + kfree(rcu_access_pointer(dd->pio_map)); + RCU_INIT_POINTER(dd->pio_map, NULL); + spin_unlock_irq(&dd->pio_map_lock); + synchronize_rcu(); + } + kfree(dd->kernel_send_context); + dd->kernel_send_context = NULL; +} + int init_pervl_scs(struct hfi1_devdata *dd) { int i; - u64 mask, all_vl_mask = (u64) 0x80ff; /* VLs 0-7, 15 */ + u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ + u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ u32 ctxt; + struct hfi1_pportdata *ppd = dd->pport; dd->vld[15].sc = sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); @@ -1699,6 +1905,12 @@ int init_pervl_scs(struct hfi1_devdata *dd) goto nomem; hfi1_init_ctxt(dd->vld[15].sc); dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); + + dd->kernel_send_context = kmalloc_node(dd->num_send_contexts * + sizeof(struct send_context *), + GFP_KERNEL, dd->node); + dd->kernel_send_context[0] = dd->vld[15].sc; + for (i = 0; i < num_vls; i++) { /* * Since this function does not deal with a specific @@ -1711,12 +1923,19 @@ int init_pervl_scs(struct hfi1_devdata *dd) dd->rcd[0]->rcvhdrqentsize, dd->node); if (!dd->vld[i].sc) goto nomem; - + dd->kernel_send_context[i + 1] = dd->vld[i].sc; hfi1_init_ctxt(dd->vld[i].sc); - /* non VL15 start with the max MTU */ dd->vld[i].mtu = hfi1_max_mtu; } + for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { + dd->kernel_send_context[i + 1] = + sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); + if (!dd->kernel_send_context[i + 1]) + goto nomem; + hfi1_init_ctxt(dd->kernel_send_context[i + 1]); + } + sc_enable(dd->vld[15].sc); ctxt = dd->vld[15].sc->hw_context; mask = all_vl_mask & ~(1LL << 15); @@ -1724,17 +1943,29 @@ int init_pervl_scs(struct hfi1_devdata *dd) dd_dev_info(dd, "Using send context %u(%u) for VL15\n", dd->vld[15].sc->sw_index, ctxt); + for (i = 0; i < num_vls; i++) { sc_enable(dd->vld[i].sc); ctxt = dd->vld[i].sc->hw_context; - mask = all_vl_mask & ~(1LL << i); + mask = all_vl_mask & ~(data_vls_mask); write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); } + for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) { + sc_enable(dd->kernel_send_context[i + 1]); + ctxt = dd->kernel_send_context[i + 1]->hw_context; + mask = all_vl_mask & ~(data_vls_mask); + write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); + } + + if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) + goto nomem; return 0; nomem: sc_free(dd->vld[15].sc); for (i = 0; i < num_vls; i++) sc_free(dd->vld[i].sc); + for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) + sc_free(dd->kernel_send_context[i + 1]); return -ENOMEM; } diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h index 53d3e0a79375..1dedeb250548 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/staging/rdma/hfi1/pio.h @@ -165,6 +165,112 @@ struct sc_config_sizes { short int count; }; +/* + * The diagram below details the relationship of the mapping structures + * + * Since the mapping now allows for non-uniform send contexts per vl, the + * number of send contexts for a vl is either the vl_scontexts[vl] or + * a computation based on num_kernel_send_contexts/num_vls: + * + * For example: + * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls + * + * n = roundup to next highest power of 2 using nactual + * + * In the case where there are num_kernel_send_contexts/num_vls doesn't divide + * evenly, the extras are added from the last vl downward. + * + * For the case where n > nactual, the send contexts are assigned + * in a round robin fashion wrapping back to the first send context + * for a particular vl. + * + * dd->pio_map + * | pio_map_elem[0] + * | +--------------------+ + * v | mask | + * pio_vl_map |--------------------| + * +--------------------------+ | ksc[0] -> sc 1 | + * | list (RCU) | |--------------------| + * |--------------------------| ->| ksc[1] -> sc 2 | + * | mask | --/ |--------------------| + * |--------------------------| -/ | * | + * | actual_vls (max 8) | -/ |--------------------| + * |--------------------------| --/ | ksc[n] -> sc n | + * | vls (max 8) | -/ +--------------------+ + * |--------------------------| --/ + * | map[0] |-/ + * |--------------------------| +--------------------+ + * | map[1] |--- | mask | + * |--------------------------| \---- |--------------------| + * | * | \-- | ksc[0] -> sc 1+n | + * | * | \---- |--------------------| + * | * | \->| ksc[1] -> sc 2+n | + * |--------------------------| |--------------------| + * | map[vls - 1] |- | * | + * +--------------------------+ \- |--------------------| + * \- | ksc[m] -> sc m+n | + * \ +--------------------+ + * \- + * \ + * \- +--------------------+ + * \- | mask | + * \ |--------------------| + * \- | ksc[0] -> sc 1+m+n | + * \- |--------------------| + * >| ksc[1] -> sc 2+m+n | + * |--------------------| + * | * | + * |--------------------| + * | ksc[o] -> sc o+m+n | + * +--------------------+ + * + */ + +/* Initial number of send contexts per VL */ +#define INIT_SC_PER_VL 2 + +/* + * struct pio_map_elem - mapping for a vl + * @mask - selector mask + * @ksc - array of kernel send contexts for this vl + * + * The mask is used to "mod" the selector to + * produce index into the trailing array of + * kscs + */ +struct pio_map_elem { + u32 mask; + struct send_context *ksc[0]; +}; + +/* + * struct pio_vl_map - mapping for a vl + * @list - rcu head for free callback + * @mask - vl mask to "mod" the vl to produce an index to map array + * @actual_vls - number of vls + * @vls - numbers of vls rounded to next power of 2 + * @map - array of pio_map_elem entries + * + * This is the parent mapping structure. The trailing members of the + * struct point to pio_map_elem entries, which in turn point to an + * array of kscs for that vl. + */ +struct pio_vl_map { + struct rcu_head list; + u32 mask; + u8 actual_vls; + u8 vls; + struct pio_map_elem *map[0]; +}; + +int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, + u8 *vl_scontexts); +void free_pio_map(struct hfi1_devdata *dd); +struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, + u32 selector, u8 vl); +struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, + u32 selector, u8 sc5); + /* send context functions */ int init_credit_return(struct hfi1_devdata *dd); void free_credit_return(struct hfi1_devdata *dd); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 77e91f280b21..76d6a364da2d 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -552,6 +552,30 @@ struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) return sde; } +/* + * qp_to_send_context - map a qp to a send context + * @qp: the QP + * @sc5: the 5 bit sc + * + * Return: + * A send context for the qp + */ +struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) +{ + struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + + switch (qp->ibqp.qp_type) { + case IB_QPT_SMI: + /* SMA packets to VL15 */ + return dd->vld[15].sc; + default: + break; + } + + return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, + sc5); +} + struct qp_iter { struct hfi1_ibdev *dev; struct rvt_qp *qp; diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index afc2b4d242b7..7b1c57e37c6b 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -109,6 +109,7 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth); void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag); struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5); +struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5); struct qp_iter; diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 31419666cc69..10b14dabd23b 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -783,18 +783,6 @@ static int pio_wait(struct rvt_qp *qp, return ret; } -struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) -{ - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); - struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1); - u8 vl; - - vl = sc_to_vlt(dd, sc5); - if (vl >= ppd->vls_supported && vl != 15) - return NULL; - return dd->vld[vl].sc; -} - static void verbs_pio_complete(void *arg, int code) { struct rvt_qp *qp = (struct rvt_qp *)arg; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index 8f1fde847c14..c736015b18df 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -478,8 +478,6 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); -struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5); - extern const enum ib_wc_opcode ib_hfi1_wc_opcode[]; extern const u8 hdr_len_by_opcode[]; -- cgit v1.2.3-59-g8ed1b From 8a4d3444ebfacceb3e1bf4e449a42f9b6345596c Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sun, 14 Feb 2016 12:46:01 -0800 Subject: staging/rdma/hfi1: Determine actual operational VLs Use shared credits and dedicated credits for each VL to determine the actual number of operational VLs. Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 37 +++++++++++++++++++++++++++++-------- drivers/staging/rdma/hfi1/diag.c | 2 +- drivers/staging/rdma/hfi1/hfi.h | 3 ++- 3 files changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 36e8e3e9b012..b169e892092c 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -10305,12 +10305,6 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) ppd->vls_operational = val; if (!ppd->port) ret = -EINVAL; - else - ret = sdma_map_init( - ppd->dd, - ppd->port - 1, - val, - NULL); } break; /* @@ -10721,12 +10715,15 @@ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, * raise = if the new limit is higher than the current value (may be changed * earlier in the algorithm), set the new limit to the new value */ -int set_buffer_control(struct hfi1_devdata *dd, struct buffer_control *new_bc) +int set_buffer_control(struct hfi1_pportdata *ppd, + struct buffer_control *new_bc) { + struct hfi1_devdata *dd = ppd->dd; u64 changing_mask, ld_mask, stat_mask; int change_count; int i, use_all_mask; int this_shared_changing; + int vl_count = 0, ret; /* * A0: add the variable any_shared_limit_changing below and in the * algorithm above. If removing A0 support, it can be removed. @@ -10878,6 +10875,28 @@ int set_buffer_control(struct hfi1_devdata *dd, struct buffer_control *new_bc) /* bracket the credit change with a total adjustment */ if (new_total < cur_total) set_global_limit(dd, new_total); + + /* + * Determine the actual number of operational VLS using the number of + * dedicated and shared credits for each VL. + */ + if (change_count > 0) { + for (i = 0; i < TXE_NUM_DATA_VL; i++) + if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || + be16_to_cpu(new_bc->vl[i].shared) > 0) + vl_count++; + ppd->actual_vls_operational = vl_count; + ret = sdma_map_init(dd, ppd->port - 1, vl_count ? + ppd->actual_vls_operational : + ppd->vls_operational, + NULL); + if (ret == 0) + ret = pio_map_init(dd, ppd->port - 1, vl_count ? + ppd->actual_vls_operational : + ppd->vls_operational, NULL); + if (ret) + return ret; + } return 0; } @@ -10969,7 +10988,7 @@ int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) VL_ARB_LOW_PRIO_TABLE_SIZE, t); break; case FM_TBL_BUFFER_CONTROL: - ret = set_buffer_control(ppd->dd, t); + ret = set_buffer_control(ppd, t); break; case FM_TBL_SC2VLNT: set_sc2vlnt(ppd->dd, t); @@ -13990,6 +14009,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, } ppd->vls_supported = num_vls; ppd->vls_operational = ppd->vls_supported; + ppd->actual_vls_operational = ppd->vls_supported; /* Set the default MTU. */ for (vl = 0; vl < num_vls; vl++) dd->vld[vl].mtu = hfi1_max_mtu; @@ -14074,6 +14094,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, num_vls, dd->chip_sdma_engines); num_vls = dd->chip_sdma_engines; ppd->vls_supported = dd->chip_sdma_engines; + ppd->vls_operational = ppd->vls_supported; } /* diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 9523dc1b012f..b8faee0b676c 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -1017,7 +1017,7 @@ static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd, t.vl[i].dedicated = be_per_vl_credits; t.vl[15].dedicated = cpu_to_be16(vl15_credits); - return set_buffer_control(ppd->dd, &t); + return set_buffer_control(ppd, &t); err_exit: snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d", diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 4d5a18ece115..e8c4e56f68d6 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -644,6 +644,7 @@ struct hfi1_pportdata { u16 link_speed_active; u8 vls_supported; u8 vls_operational; + u8 actual_vls_operational; /* LID mask control */ u8 lmc; /* Rx Polarity inversion (compensate for ~tx on partner) */ @@ -1522,7 +1523,7 @@ int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); -int set_buffer_control(struct hfi1_devdata *dd, struct buffer_control *bc); +int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) { -- cgit v1.2.3-59-g8ed1b From 77e7639fd782f5432c87ed7143b3e50be76c8500 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 12:46:19 -0800 Subject: staging/rdma/hfi1: Add send context sw index Print the qp's send context sw index in the qpstats Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 76d6a364da2d..cc00eca1780b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -681,7 +681,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p\n", + "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -712,7 +712,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->s_rnr_retry_cnt, sde, sde ? sde->this_idx : 0, - send_context); + send_context, + send_context ? send_context->sw_index : 0); } void qp_comm_est(struct rvt_qp *qp) -- cgit v1.2.3-59-g8ed1b From 0358a440c2e7401238372316565b654fd95e5142 Mon Sep 17 00:00:00 2001 From: Vennila Megavannan Date: Sun, 14 Feb 2016 12:46:28 -0800 Subject: staging/rdma/hfi1: add cq head and tail information to qpstats This enables debugging issues related to cq event signalling mechanism Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Vennila Megavannan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index cc00eca1780b..df905791ccfc 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -681,7 +681,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u\n", + "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u CQ %u %u\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -713,7 +713,9 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) sde, sde ? sde->this_idx : 0, send_context, - send_context ? send_context->sw_index : 0); + send_context ? send_context->sw_index : 0, + ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, + ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail); } void qp_comm_est(struct rvt_qp *qp) -- cgit v1.2.3-59-g8ed1b From 8638b77f13d2b11a4e356916526d6303e1002fe9 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:19:24 -0800 Subject: staging/rdma/hfi1: Add spaces around binary operators Add spaces around binary operators. Fixes checkpatch check: CHECK: spaces preferred around that 'x' where x is a binary operator Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 104 +++++++++++++++++----------------- drivers/staging/rdma/hfi1/debugfs.c | 2 +- drivers/staging/rdma/hfi1/driver.c | 2 +- drivers/staging/rdma/hfi1/eprom.c | 10 ++-- drivers/staging/rdma/hfi1/firmware.c | 28 ++++----- drivers/staging/rdma/hfi1/hfi.h | 2 +- drivers/staging/rdma/hfi1/intr.c | 2 +- drivers/staging/rdma/hfi1/mad.c | 22 +++---- drivers/staging/rdma/hfi1/mad.h | 2 +- drivers/staging/rdma/hfi1/pcie.c | 2 +- drivers/staging/rdma/hfi1/pio.c | 4 +- drivers/staging/rdma/hfi1/pio_copy.c | 18 +++--- drivers/staging/rdma/hfi1/qsfp.c | 6 +- drivers/staging/rdma/hfi1/qsfp.h | 2 +- drivers/staging/rdma/hfi1/rc.c | 2 +- drivers/staging/rdma/hfi1/sdma.c | 8 +-- drivers/staging/rdma/hfi1/sdma.h | 2 +- drivers/staging/rdma/hfi1/user_sdma.c | 2 +- drivers/staging/rdma/hfi1/verbs.c | 2 +- 19 files changed, 111 insertions(+), 111 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index b169e892092c..79c215e4d2a0 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -422,10 +422,10 @@ static struct flag_table pio_err_status_flags[] = { SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), /*23*/ FLAG_ENTRY("PioWriteQwValidParity", - SEC_WRITE_DROPPED|SEC_SPC_FREEZE, + SEC_WRITE_DROPPED | SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), /*24*/ FLAG_ENTRY("PioBlockQwCountParity", - SEC_WRITE_DROPPED|SEC_SPC_FREEZE, + SEC_WRITE_DROPPED | SEC_SPC_FREEZE, SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), /*25*/ FLAG_ENTRY("PioVlfVlLenParity", SEC_SPC_FREEZE, @@ -1196,7 +1196,7 @@ CNTR_ELEM(#name, \ #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx #define OVR_ELM(ctx) \ CNTR_ELEM("RcvHdrOvr" #ctx, \ - (RCV_HDR_OVFL_CNT + ctx*0x100), \ + (RCV_HDR_OVFL_CNT + ctx * 0x100), \ 0, CNTR_NORMAL, port_access_u64_csr) /* 32bit TXE */ @@ -5259,7 +5259,7 @@ static char *is_various_name(char *buf, size_t bsize, unsigned int source) if (source < ARRAY_SIZE(various_names)) strncpy(buf, various_names[source], bsize); else - snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START); + snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); return buf; } @@ -6318,7 +6318,7 @@ void reset_link_credits(struct hfi1_devdata *dd) /* remove all previous VL credit limits */ for (i = 0; i < TXE_NUM_DATA_VL; i++) - write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0); + write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); write_global_credit(dd, 0, 0, 0); /* reset the CM block */ @@ -7573,7 +7573,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) /* if the link is already going down or disabled, do not * queue another */ if ((ppd->host_link_state - & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN)) + & (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || ppd->link_enabled == 0) { dd_dev_info(dd, "%s: not queuing link down\n", __func__); @@ -7991,7 +7991,7 @@ static irqreturn_t general_interrupt(int irq, void *data) /* phase 2: call the appropriate handler */ for_each_set_bit(bit, (unsigned long *)®s[0], - CCE_NUM_INT_CSRS*64) { + CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); } @@ -8014,12 +8014,12 @@ static irqreturn_t sdma_interrupt(int irq, void *data) /* This read_csr is really bad in the hot path */ status = read_csr(dd, - CCE_INT_STATUS + (8*(IS_SDMA_START/64))) + CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) & sde->imask; if (likely(status)) { /* clear the interrupt(s) */ write_csr(dd, - CCE_INT_CLEAR + (8*(IS_SDMA_START/64)), + CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), status); /* handle the interrupt(s) */ @@ -8944,10 +8944,10 @@ static u16 opa_to_vc_link_widths(u16 opa_widths) u16 from; u16 to; } opa_link_xlate[] = { - { OPA_LINK_WIDTH_1X, 1 << (1-1) }, - { OPA_LINK_WIDTH_2X, 1 << (2-1) }, - { OPA_LINK_WIDTH_3X, 1 << (3-1) }, - { OPA_LINK_WIDTH_4X, 1 << (4-1) }, + { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, + { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, + { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, + { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, }; for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { @@ -9725,7 +9725,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd) c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) - << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)| + << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); @@ -10290,7 +10290,7 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) * The VL Arbitrator high limit is sent in units of 4k * bytes, while HFI stores it in units of 64 bytes. */ - val *= 4096/64; + val *= 4096 / 64; reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); @@ -10507,7 +10507,7 @@ static int get_buffer_control(struct hfi1_devdata *dd, /* OPA and HFI have a 1-1 mapping */ for (i = 0; i < TXE_NUM_DATA_VL; i++) - read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]); + read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); @@ -11207,16 +11207,16 @@ u32 hdrqempty(struct hfi1_ctxtdata *rcd) static u32 encoded_size(u32 size) { switch (size) { - case 4*1024: return 0x1; - case 8*1024: return 0x2; - case 16*1024: return 0x3; - case 32*1024: return 0x4; - case 64*1024: return 0x5; - case 128*1024: return 0x6; - case 256*1024: return 0x7; - case 512*1024: return 0x8; - case 1*1024*1024: return 0x9; - case 2*1024*1024: return 0xa; + case 4 * 1024: return 0x1; + case 8 * 1024: return 0x2; + case 16 * 1024: return 0x3; + case 32 * 1024: return 0x4; + case 64 * 1024: return 0x5; + case 128 * 1024: return 0x6; + case 256 * 1024: return 0x7; + case 512 * 1024: return 0x8; + case 1 * 1024 * 1024: return 0x9; + case 2 * 1024 * 1024: return 0xa; } return 0x1; /* if invalid, go with the minimum size */ } @@ -12324,12 +12324,12 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable) if (enable) { /* enable all interrupts */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) - write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0); + write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0); init_qsfp_int(dd); } else { for (i = 0; i < CCE_NUM_INT_CSRS; i++) - write_csr(dd, CCE_INT_MASK + (8*i), 0ull); + write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); } } @@ -12341,7 +12341,7 @@ static void clear_all_interrupts(struct hfi1_devdata *dd) int i; for (i = 0; i < CCE_NUM_INT_CSRS; i++) - write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0); + write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); @@ -12421,10 +12421,10 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; n = isrc % 8; - reg = read_csr(dd, CCE_INT_MAP + (8*m)); - reg &= ~((u64)0xff << (8*n)); - reg |= ((u64)msix_intr & 0xff) << (8*n); - write_csr(dd, CCE_INT_MAP + (8*m), reg); + reg = read_csr(dd, CCE_INT_MAP + (8 * m)); + reg &= ~((u64)0xff << (8 * n)); + reg |= ((u64)msix_intr & 0xff) << (8 * n); + write_csr(dd, CCE_INT_MAP + (8 * m), reg); } static void remap_sdma_interrupts(struct hfi1_devdata *dd, @@ -12437,11 +12437,11 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd, * SDMAProgress * SDMAIdle */ - remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine, + remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine, msix_intr); - remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine, + remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine, msix_intr); - remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine, + remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine, msix_intr); } @@ -12520,9 +12520,9 @@ static int request_msix_irqs(struct hfi1_devdata *dd) * Set the interrupt register and mask for this * context's interrupt. */ - rcd->ireg = (IS_RCVAVAIL_START+idx) / 64; + rcd->ireg = (IS_RCVAVAIL_START + idx) / 64; rcd->imask = ((u64)1) << - ((IS_RCVAVAIL_START+idx) % 64); + ((IS_RCVAVAIL_START + idx) % 64); handler = receive_context_interrupt; thread = receive_context_thread; arg = rcd; @@ -12542,7 +12542,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) if (arg == NULL) continue; /* make sure the name is terminated */ - me->name[sizeof(me->name)-1] = 0; + me->name[sizeof(me->name) - 1] = 0; ret = request_threaded_irq(me->msix.vector, handler, thread, 0, me->name, arg); @@ -12581,7 +12581,7 @@ static void reset_interrupts(struct hfi1_devdata *dd) /* all chip interrupts map to MSI-X 0 */ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) - write_csr(dd, CCE_INT_MAP + (8*i), 0); + write_csr(dd, CCE_INT_MAP + (8 * i), 0); } static int set_up_interrupts(struct hfi1_devdata *dd) @@ -12831,7 +12831,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) /* CceIntMap */ for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) - write_csr(dd, CCE_INT_MAP+(8*i), 0); + write_csr(dd, CCE_INT_MAP + (8 * i), 0); /* SendCtxtCreditReturnAddr */ for (i = 0; i < dd->chip_send_contexts; i++) @@ -12849,12 +12849,12 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); for (j = 0; j < RXE_NUM_TID_FLOWS; j++) - write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0); + write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); } /* RcvArray */ for (i = 0; i < dd->chip_rcv_array_count; i++) - write_csr(dd, RCV_ARRAY + (8*i), + write_csr(dd, RCV_ARRAY + (8 * i), RCV_ARRAY_RT_WRITE_ENABLE_SMASK); /* RcvQPMapTable */ @@ -13092,15 +13092,15 @@ static void reset_txe_csrs(struct hfi1_devdata *dd) write_csr(dd, SEND_ERR_CLEAR, ~0ull); /* SEND_ERR_FORCE read-only */ for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) - write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0); + write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) - write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0); - for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++) - write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0); + write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); + for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++) + write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) - write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0); + write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) - write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0); + write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); @@ -13111,7 +13111,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd) write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); for (i = 0; i < TXE_NUM_DATA_VL; i++) - write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0); + write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); /* SEND_CM_CREDIT_USED_VL read-only */ /* SEND_CM_CREDIT_USED_VL15 read-only */ @@ -13403,7 +13403,7 @@ static void init_chip(struct hfi1_devdata *dd) write_csr(dd, RCV_CTXT_CTRL, 0); /* mask all interrupt sources */ for (i = 0; i < CCE_NUM_INT_CSRS; i++) - write_csr(dd, CCE_INT_MASK + (8*i), 0ull); + write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); /* * DC Reset: do a full DC reset before the register clear. @@ -14404,7 +14404,7 @@ static void handle_temp_err(struct hfi1_devdata *dd) dd_dev_emerg(dd, "Critical temperature reached! Forcing device into freeze mode!\n"); dd->flags |= HFI1_FORCED_FREEZE; - start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT); + start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); /* * Shut DC down as much and as quickly as possible. * diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index d6dc339fb2a3..0ee7217507d2 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -750,7 +750,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd) ppd, &port_cntr_ops[i].ops, port_cntr_ops[i].ops.write == NULL ? - S_IRUGO : S_IRUGO|S_IWUSR); + S_IRUGO : S_IRUGO | S_IWUSR); } } diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 5d012feaa4d4..dd5187f23786 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -702,7 +702,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) */ prefetch_range(packet->ebuf, packet->tlen - ((packet->rcd->rcvhdrqentsize - - (rhf_hdrq_offset(packet->rhf)+2)) * 4)); + (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); } /* diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index 29958aa4e4fd..9a0ddd719bf2 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -99,7 +99,7 @@ /* sleep length while waiting for controller */ #define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */ -#define COUNT_DELAY_SEC(n) ((n) * (1000000/WAIT_SLEEP_US)) +#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US)) /* GPIO pins */ #define EPROM_WP_N (1ull << 14) /* EPROM write line */ @@ -254,7 +254,7 @@ static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result) int i; write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset)); - for (i = 0; i < EP_PAGE_SIZE/sizeof(u32); i++) + for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++) result[i] = (u32)read_csr(dd, ASIC_EEP_DATA); write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */ } @@ -265,7 +265,7 @@ static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result) static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) { u32 offset; - u32 buffer[EP_PAGE_SIZE/sizeof(u32)]; + u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; int ret = 0; /* reject anything not on an EPROM page boundary */ @@ -296,7 +296,7 @@ static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data) write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE); write_csr(dd, ASIC_EEP_DATA, data[0]); write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset)); - for (i = 1; i < EP_PAGE_SIZE/sizeof(u32); i++) + for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++) write_csr(dd, ASIC_EEP_DATA, data[i]); /* will close the open page */ return wait_for_not_busy(dd); @@ -308,7 +308,7 @@ static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data) static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) { u32 offset; - u32 buffer[EP_PAGE_SIZE/sizeof(u32)]; + u32 buffer[EP_PAGE_SIZE / sizeof(u32)]; int ret = 0; /* reject anything not on an EPROM page boundary */ diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 35084b754b7c..f87460d7c7f6 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -393,17 +393,17 @@ static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css) /* verify CSS header fields (most sizes are in DW, so add /4) */ if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) || invalid_header(dd, "header_len", css->header_len, - (sizeof(struct firmware_file)/4)) + (sizeof(struct firmware_file) / 4)) || invalid_header(dd, "header_version", css->header_version, CSS_HEADER_VERSION) || invalid_header(dd, "module_vendor", css->module_vendor, CSS_MODULE_VENDOR) || invalid_header(dd, "key_size", - css->key_size, KEY_SIZE/4) + css->key_size, KEY_SIZE / 4) || invalid_header(dd, "modulus_size", - css->modulus_size, KEY_SIZE/4) + css->modulus_size, KEY_SIZE / 4) || invalid_header(dd, "exponent_size", - css->exponent_size, EXPONENT_SIZE/4)) { + css->exponent_size, EXPONENT_SIZE / 4)) { return -EINVAL; } return 0; @@ -488,7 +488,7 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, ret = verify_css_header(dd, css); if (ret) { dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name); - } else if ((css->size*4) == fdet->fw->size) { + } else if ((css->size * 4) == fdet->fw->size) { /* non-augmented firmware file */ struct firmware_file *ff = (struct firmware_file *) fdet->fw->data; @@ -513,7 +513,7 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n"); ret = -EINVAL; } - } else if ((css->size*4) + AUGMENT_SIZE == fdet->fw->size) { + } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) { /* augmented firmware file */ struct augmented_firmware_file *aff = (struct augmented_firmware_file *)fdet->fw->data; @@ -536,7 +536,7 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, /* css->size check failed */ dd_dev_err(dd, "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n", - fdet->fw->size/4, (fdet->fw->size - AUGMENT_SIZE)/4, + fdet->fw->size / 4, (fdet->fw->size - AUGMENT_SIZE) / 4, css->size); ret = -EINVAL; @@ -780,7 +780,7 @@ static int retry_firmware(struct hfi1_devdata *dd, int load_result) static void write_rsa_data(struct hfi1_devdata *dd, int what, const u8 *data, int nbytes) { - int qw_size = nbytes/8; + int qw_size = nbytes / 8; int i; if (((unsigned long)data & 0x7) == 0) { @@ -788,14 +788,14 @@ static void write_rsa_data(struct hfi1_devdata *dd, int what, u64 *ptr = (u64 *)data; for (i = 0; i < qw_size; i++, ptr++) - write_csr(dd, what + (8*i), *ptr); + write_csr(dd, what + (8 * i), *ptr); } else { /* not aligned */ for (i = 0; i < qw_size; i++, data += 8) { u64 value; memcpy(&value, data, 8); - write_csr(dd, what + (8*i), value); + write_csr(dd, what + (8 * i), value); } } } @@ -808,7 +808,7 @@ static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what, const u8 *data, int nbytes) { u64 *ptr = (u64 *)data; - int qw_size = nbytes/8; + int qw_size = nbytes / 8; for (; qw_size > 0; qw_size--, ptr++) write_csr(dd, what, *ptr); @@ -1743,8 +1743,8 @@ int get_platform_config_field(struct hfi1_devdata *dd, if (len < field_len_bits) return -EINVAL; - seek = field_start_bits/8; - wlen = field_len_bits/8; + seek = field_start_bits / 8; + wlen = field_len_bits / 8; src_ptr = (u32 *)((u8 *)src_ptr + seek); @@ -1783,7 +1783,7 @@ int get_platform_config_field(struct hfi1_devdata *dd, if (!src_ptr || len < field_len_bits) return -EINVAL; - src_ptr += (field_start_bits/32); + src_ptr += (field_start_bits / 32); *data = (*src_ptr >> (field_start_bits % 32)) & ((1 << field_len_bits) - 1); diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index e8c4e56f68d6..70decdf41b21 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -718,7 +718,7 @@ struct hfi1_pportdata { /* begin congestion log related entries * cc_log_lock protects all congestion log related data */ spinlock_t cc_log_lock ____cacheline_aligned_in_smp; - u8 threshold_cong_event_map[OPA_MAX_SLS/8]; + u8 threshold_cong_event_map[OPA_MAX_SLS / 8]; u16 threshold_event_counter; struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS]; int cc_log_idx; /* index for logging events */ diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 9adab8638f21..5e6d77de0707 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -166,7 +166,7 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) reset_link_credits(dd); /* freeze after a link down to guarantee a clean egress */ - start_freeze_handling(ppd, FREEZE_SELF|FREEZE_LINK_DOWN); + start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN); ev = IB_EVENT_PORT_ERR; diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index d9efe223328b..1a9eb502f288 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -534,7 +534,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ppd = dd->pport + (port - 1); ibp = &ppd->ibport_data; - if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || + if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); @@ -600,13 +600,13 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, for (i = 0; i < ppd->vls_supported; i++) { mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU); if ((i % 2) == 0) - pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4); + pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4); else - pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu; + pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu; } /* don't forget VL 15 */ mtu = mtu_to_enum(dd->vld[15].mtu, 2048); - pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu; + pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu; pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL; pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS); pi->partenforce_filterraw |= @@ -744,7 +744,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1; + n_blocks_avail = (u16) (npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); @@ -1207,17 +1207,17 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT, ibp->rvp.vl_high_limit); - if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || + if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ppd->vls_supported; i++) { if ((i % 2) == 0) - mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4) + mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> 4) & 0xF); else - mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF); + mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & 0xF); if (mtu == 0xffff) { pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n", mtu, @@ -1236,7 +1236,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* As per OPAV1 spec: VL15 must support and be configured * for operation with a 2048 or larger MTU. */ - mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF); + mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF); if (mtu < 2048 || mtu == 0xffff) mtu = 2048; if (dd->vld[15].mtu != mtu) { @@ -1419,7 +1419,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1; + n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; if (start_block + n_blocks_sent > n_blocks_avail || n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) { @@ -3460,7 +3460,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, rcu_read_unlock(); if (resp_len) - *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1); + *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1); return reply((struct ib_mad_hdr *)smp); } diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h index f0317750e2fc..b6c88be4ee4c 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/staging/rdma/hfi1/mad.h @@ -267,7 +267,7 @@ struct opa_hfi1_cong_log { u8 congestion_flags; __be16 threshold_event_counter; __be32 current_time_stamp; - u8 threshold_cong_event_map[OPA_MAX_SLS/8]; + u8 threshold_cong_event_map[OPA_MAX_SLS / 8]; struct opa_hfi1_cong_log_event events[OPA_CONG_LOG_ELEMS]; } __packed; diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 04f2d8a37f36..019b4f83d2bd 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -840,7 +840,7 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index, { write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8), (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) - |((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); + | ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); } /* diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 69bbe22aae55..9bafedfe48f8 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -101,7 +101,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) /* Fall through */ case PSC_DATA_VL_ENABLE: /* Disallow sending on VLs not enabled */ - mask = (((~0ull)<halt_wait); } -#define BLOCK_DWORDS (PIO_BLOCK_SIZE/sizeof(u32)) +#define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32)) #define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS) /* diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c index ebb0bafc68cb..dc0c1783e10f 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/staging/rdma/hfi1/pio_copy.c @@ -52,9 +52,9 @@ /* additive distance between non-SOP and SOP space */ #define SOP_DISTANCE (TXE_PIO_SIZE / 2) -#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE-1) +#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1) /* number of QUADWORDs in a block */ -#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE/sizeof(u64)) +#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64)) /** * pio_copy - copy data block to MMIO space @@ -83,7 +83,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, dest += sizeof(u64); /* calculate where the QWORD data ends - in SOP=1 space */ - dend = dest + ((count>>1) * sizeof(u64)); + dend = dest + ((count >> 1) * sizeof(u64)); if (dend < send) { /* all QWORD data is within the SOP block, does *not* @@ -177,7 +177,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, * "zero" shift - bit shift used to zero out upper bytes. Input is * the count of LSB bytes to preserve. */ -#define zshift(x) (8 * (8-(x))) +#define zshift(x) (8 * (8 - (x))) /* * "merge" shift - bit shift used to merge with carry bytes. Input is @@ -244,7 +244,7 @@ static inline void read_extra_bytes(struct pio_buf *pbuf, pbuf->carry.val64 |= (((*(u64 *)from) >> mshift(off)) << zshift(xbytes)) - >> zshift(xbytes+pbuf->carry_bytes); + >> zshift(xbytes + pbuf->carry_bytes); off = 0; pbuf->carry_bytes += xbytes; nbytes -= xbytes; @@ -411,7 +411,7 @@ static inline void merge_write8( jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder); writeq(pbuf->carry.val64, dest); - jcopy(&pbuf->carry.val8[0], src+remainder, pbuf->carry_bytes); + jcopy(&pbuf->carry.val8[0], src + remainder, pbuf->carry_bytes); } /* @@ -463,7 +463,7 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, dest += sizeof(u64); /* calculate where the QWORD data ends - in SOP=1 space */ - dend = dest + ((nbytes>>3) * sizeof(u64)); + dend = dest + ((nbytes >> 3) * sizeof(u64)); if (dend < send) { /* all QWORD data is within the SOP block, does *not* @@ -645,7 +645,7 @@ static void mid_copy_straight(struct pio_buf *pbuf, void __iomem *dend; /* 8-byte data end */ /* calculate 8-byte data end */ - dend = dest + ((nbytes>>3) * sizeof(u64)); + dend = dest + ((nbytes >> 3) * sizeof(u64)); if (pbuf->qw_written < PIO_BLOCK_QWS) { /* @@ -713,7 +713,7 @@ static void mid_copy_straight(struct pio_buf *pbuf, /* we know carry_bytes was zero on entry to this routine */ read_low_bytes(pbuf, from, nbytes & 0x7); - pbuf->qw_written += nbytes>>3; + pbuf->qw_written += nbytes >> 3; } /* diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 42e5be494fca..a6d55a614160 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -339,7 +339,7 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) u8 *cache = &cp->cache[0]; /* ensure sane contents on invalid reads, for cable swaps */ - memset(cache, 0, (QSFP_MAX_NUM_PAGES*128)); + memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); @@ -420,7 +420,7 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) return 0; bail: - memset(cache, 0, (QSFP_MAX_NUM_PAGES*128)); + memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); return ret; } @@ -564,7 +564,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK); for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) { - sofar += scnprintf(buf + sofar, len-sofar, + sofar += scnprintf(buf + sofar, len - sofar, " %02X", bin_buff[iidx]); } sofar += scnprintf(buf + sofar, len - sofar, "\n"); diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index af59a43b2d5f..9f6e2f301040 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -214,7 +214,7 @@ struct qsfp_data { /* Helps to find our way */ struct hfi1_pportdata *ppd; struct work_struct qsfp_work; - u8 cache[QSFP_MAX_NUM_PAGES*128]; + u8 cache[QSFP_MAX_NUM_PAGES * 128]; spinlock_t qsfp_lock; u8 check_interrupt_flags; u8 reset_needed; diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 443fda8df380..5cdf1d250807 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1992,7 +1992,7 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, spin_lock_irqsave(&ppd->cc_log_lock, flags); - ppd->threshold_cong_event_map[sl/8] |= 1 << (sl % 8); + ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8); ppd->threshold_event_counter++; cc_event = &ppd->cc_events[ppd->cc_log_idx++]; diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index e79f931d06ce..395393550700 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -1020,7 +1020,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) dd->chip_sdma_mem_size); per_sdma_credits = - dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE); + dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE); /* set up freeze waitqueue */ init_waitqueue_head(&dd->sdma_unfreeze_wq); @@ -1625,10 +1625,10 @@ static void sdma_setlengen(struct sdma_engine *sde) * generation counter. */ write_sde_csr(sde, SD(LEN_GEN), - (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT) + (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT) ); write_sde_csr(sde, SD(LEN_GEN), - ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)) + ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | (4ULL << SD(LEN_GEN_GENERATION_SHIFT)) ); } @@ -3057,5 +3057,5 @@ void _sdma_engine_progress_schedule( trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); /* assume we have selected a good cpu */ write_csr(sde->dd, - CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask); + CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask); } diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index f24b5a17322b..c106d3c5b06f 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -682,7 +682,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd, dd->default_desc1; if (tx->flags & SDMA_TXREQ_F_URGENT) tx->descp[tx->num_desc].qw[1] |= - (SDMA_DESC1_HEAD_TO_HOST_FLAG| + (SDMA_DESC1_HEAD_TO_HOST_FLAG | SDMA_DESC1_INT_REQ_FLAG); } diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index dfa9ef209793..9fe18b082fa9 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -1204,7 +1204,7 @@ static int set_txreq_header(struct user_sdma_request *req, /* Set ACK request on last packet */ if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) - hdr->bth[2] |= cpu_to_be32(1UL<<31); + hdr->bth[2] |= cpu_to_be32(1UL << 31); /* Set the new offset */ hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 10b14dabd23b..acf1132951e3 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -874,7 +874,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords); } else { if (ss) { - seg_pio_copy_start(pbuf, pbc, hdr, hdrwords*4); + seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4); while (len) { void *addr = ss->sge.vaddr; u32 slen = ss->sge.length; -- cgit v1.2.3-59-g8ed1b From 74182acd7f6b5782d72bf608db233348d2120af0 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:19:32 -0800 Subject: staging/rdma/hfi1: Remove multiple blank lines Remove multiple blank lines to fix checkpatch check: CHECK: Please don't use multiple blank lines Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 ------ drivers/staging/rdma/hfi1/debugfs.c | 3 --- drivers/staging/rdma/hfi1/file_ops.c | 2 -- drivers/staging/rdma/hfi1/hfi.h | 2 -- drivers/staging/rdma/hfi1/init.c | 1 - drivers/staging/rdma/hfi1/intr.c | 1 - drivers/staging/rdma/hfi1/mad.c | 1 - drivers/staging/rdma/hfi1/mad.h | 1 - drivers/staging/rdma/hfi1/pcie.c | 1 - drivers/staging/rdma/hfi1/pio.c | 1 - drivers/staging/rdma/hfi1/pio.h | 2 -- drivers/staging/rdma/hfi1/sdma.c | 2 -- drivers/staging/rdma/hfi1/sdma.h | 4 ---- drivers/staging/rdma/hfi1/sysfs.c | 6 ------ drivers/staging/rdma/hfi1/trace.h | 4 ---- drivers/staging/rdma/hfi1/twsi.h | 1 - drivers/staging/rdma/hfi1/ud.c | 1 - drivers/staging/rdma/hfi1/verbs.c | 2 -- 18 files changed, 41 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 79c215e4d2a0..0a593bdf1761 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -986,7 +986,6 @@ static struct flag_table dc8051_info_host_msg_flags[] = { FLAG_ENTRY0("Link going down", 0x0100), }; - static u32 encoded_size(u32 size); static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); @@ -1285,7 +1284,6 @@ static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, { u64 ret; - if (mode == CNTR_MODE_R) { ret = read_csr(dd, csr); } else if (mode == CNTR_MODE_W) { @@ -10748,7 +10746,6 @@ int set_buffer_control(struct hfi1_pportdata *ppd, #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) #define NUM_USABLE_VLS 16 /* look at VL15 and less */ - /* find the new total credits, do sanity check on unused VLs */ for (i = 0; i < OPA_MAX_VLS; i++) { if (valid_vl(i)) { @@ -11891,7 +11888,6 @@ static int init_cntrs(struct hfi1_devdata *dd) if (!dd->scntrs) goto bail; - /* allocate space for the counter names */ dd->cntrnameslen = sz; dd->cntrnames = kmalloc(sz, GFP_KERNEL); @@ -12060,7 +12056,6 @@ bail: return -ENOMEM; } - static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) { switch (chip_lstate) { @@ -14282,7 +14277,6 @@ static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, return (u16)delta_cycles; } - /** * create_pbc - build a pbc for transmission * @flags: special case flags or-ed in built pbc diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 0ee7217507d2..7cb43488d796 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -102,7 +102,6 @@ do { \ pr_warn("create of %s failed\n", name); \ } while (0) - #define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \ DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO) @@ -127,7 +126,6 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos) return pos; } - static void _opcode_stats_seq_stop(struct seq_file *s, void *v) __releases(RCU) { @@ -308,7 +306,6 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos) return pos; } - static void _sdes_seq_stop(struct seq_file *s, void *v) __releases(RCU) { diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index cc681f7bc570..b52cb78c1f45 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -162,7 +162,6 @@ enum mmap_types { #define dbg(fmt, ...) \ pr_info(fmt, ##__VA_ARGS__) - static inline int is_valid_mmap(u64 token) { return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC); @@ -1589,7 +1588,6 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) return filp->f_pos; } - /* NOTE: assumes unsigned long is 8 bytes */ static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 70decdf41b21..347ceca5f361 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -878,7 +878,6 @@ struct hfi1_devdata { wait_queue_head_t sdma_unfreeze_wq; atomic_t sdma_unfreeze_count; - /* hfi1_pportdata, points to array of (physical) port-specific * data structs, indexed by pidx (0..n-1) */ @@ -1598,7 +1597,6 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd) /* IB dword length mask in PBC (lower 11 bits); same for all chips */ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) - /* ctxt_flag bit offsets */ /* context has been setup */ #define HFI1_CTXT_SETUP_DONE 1 diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 423c6996e93c..3071fbce37f7 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1723,7 +1723,6 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size, rcd->egrbufs.size); - /* * Set the contexts rcv array head update threshold to the closest * power of 2 (so we can use a mask instead of modulo) below half diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 5e6d77de0707..9a9b331cc469 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -179,7 +179,6 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) signal_ib_event(ppd, ev); } - } /* diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 1a9eb502f288..b687e3f2d963 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2310,7 +2310,6 @@ static void a0_portstatus(struct hfi1_pportdata *ppd, } } - static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, struct ib_device *ibdev, u8 port, u32 *resp_len) { diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h index b6c88be4ee4c..f9e93c035d28 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/staging/rdma/hfi1/mad.h @@ -235,7 +235,6 @@ struct ib_pma_portcounters_cong { #define IB_CC_SVCTYPE_RD 0x2 #define IB_CC_SVCTYPE_UD 0x3 - /* * There should be an equivalent IB #define for the following, but * I cannot find it. diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 019b4f83d2bd..26eb610d73b4 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -447,7 +447,6 @@ void restore_pci_variables(struct hfi1_devdata *dd) pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); } - /* * BIOS may not set PCIe bus-utilization parameters for best performance. * Check and optionally adjust them to maximize our throughput. diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 9bafedfe48f8..f1f30b371425 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -852,7 +852,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, sc->credit_ctrl, thresh); - return sc; } diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h index 1dedeb250548..8d0cf1bf3f1b 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/staging/rdma/hfi1/pio.h @@ -50,7 +50,6 @@ * */ - /* send context types */ #define SC_KERNEL 0 #define SC_ACK 1 @@ -318,7 +317,6 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd); void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl); void pio_send_control(struct hfi1_devdata *dd, int op); - /* PIO copy routines */ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 395393550700..cc21272f878a 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -609,7 +609,6 @@ static void sdma_sw_clean_up_task(unsigned long opaque) * descq are ours to play with. */ - /* * In the error clean up sequence, software clean must be called * before the hardware clean so we can use the hardware head in @@ -1690,7 +1689,6 @@ static void set_sdma_integrity(struct sdma_engine *sde) write_sde_csr(sde, SD(CHECK_ENABLE), reg); } - static void init_sdma_regs( struct sdma_engine *sde, u32 credits, diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index c106d3c5b06f..cc01e818b1a9 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -65,7 +65,6 @@ /* Hardware limit for SDMA packet size */ #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1) - #define SDMA_TXREQ_S_OK 0 #define SDMA_TXREQ_S_SENDERROR 1 #define SDMA_TXREQ_S_ABORTED 2 @@ -418,7 +417,6 @@ struct sdma_engine { struct list_head flushlist; }; - int sdma_init(struct hfi1_devdata *dd, u8 port); void sdma_start(struct hfi1_devdata *dd); void sdma_exit(struct hfi1_devdata *dd); @@ -464,7 +462,6 @@ static inline int __sdma_running(struct sdma_engine *engine) return engine->state.current_state == sdma_state_s99_running; } - /** * sdma_running() - state suitability test * @engine: sdma engine @@ -494,7 +491,6 @@ void _sdma_txreq_ahgadd( u32 *ahg, u8 ahg_hlen); - /** * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG * @tx: tx request to initialize diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index f1d47e7f31d2..1f3a747ce666 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -53,7 +53,6 @@ #include "mad.h" #include "trace.h" - /* * Start of per-port congestion control structures and support code */ @@ -254,7 +253,6 @@ HFI1_SC2VL_ATTR(29); HFI1_SC2VL_ATTR(30); HFI1_SC2VL_ATTR(31); - static struct attribute *sc2vl_default_attributes[] = { &hfi1_sc2vl_attr_0.attr, &hfi1_sc2vl_attr_1.attr, @@ -360,7 +358,6 @@ HFI1_SL2SC_ATTR(29); HFI1_SL2SC_ATTR(30); HFI1_SL2SC_ATTR(31); - static struct attribute *sl2sc_default_attributes[] = { &hfi1_sl2sc_attr_0.attr, &hfi1_sl2sc_attr_1.attr, @@ -493,7 +490,6 @@ static struct kobj_type hfi1_vl2mtu_ktype = { .default_attrs = vl2mtu_default_attributes }; - /* end of per-port file structures and support code */ /* @@ -535,7 +531,6 @@ static ssize_t show_boardversion(struct device *device, return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion); } - static ssize_t show_nctxts(struct device *device, struct device_attribute *attr, char *buf) { @@ -702,7 +697,6 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, } kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD); - ret = kobject_init_and_add(&ppd->pport_cc_kobj, &port_cc_ktype, kobj, "CCMgtA"); if (ret) { diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index fcae96e5b784..a13215ffdddd 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -380,7 +380,6 @@ const char *parse_sdma_flags( #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1) - #define lrh_name(lrh) { HFI1_##lrh, #lrh } #define show_lnh(lrh) \ __print_symbolic(lrh, \ @@ -427,7 +426,6 @@ __print_symbolic(opcode, \ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \ ib_opcode_name(CNP)) - #define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x" #define BTH_PRN \ "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \ @@ -563,7 +561,6 @@ DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr, #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_snoop - TRACE_EVENT(snoop_capture, TP_PROTO(struct hfi1_devdata *dd, int hdr_len, @@ -760,7 +757,6 @@ DECLARE_EVENT_CLASS(hfi1_bct_template, ) ); - DEFINE_EVENT(hfi1_bct_template, bct_set, TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc), TP_ARGS(dd, bc)); diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h index 6cb30e59b00f..0722ac83e7dd 100644 --- a/drivers/staging/rdma/hfi1/twsi.h +++ b/drivers/staging/rdma/hfi1/twsi.h @@ -65,5 +65,4 @@ int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr, int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr, const void *buffer, int len); - #endif /* _TWSI_H */ diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index da4e465ae846..5779f3a820ab 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -647,7 +647,6 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5, return 0; } - /** * hfi1_ud_rcv - receive an incoming UD packet * @ibp: the port the packet came in on diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index acf1132951e3..466055bd5107 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -376,7 +376,6 @@ dropit: return 0; } - /** * hfi1_ib_rcv - process an incoming packet * @packet: data packet information @@ -965,7 +964,6 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) goto bad; - /* Is the pkey = 0x0, or 0x8000? */ if ((pkey & PKEY_LOW_15_MASK) == 0) goto bad; -- cgit v1.2.3-59-g8ed1b From 50e5dcbed6b36212c40e8fee18a7f5c7bb0aca13 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:19:41 -0800 Subject: staging/rdma/hfi1: Remove space after cast Remove the space after a cast to fix checkpatch check: CHECK: No space is necessary after a cast Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 ++--- drivers/staging/rdma/hfi1/debugfs.c | 4 +-- drivers/staging/rdma/hfi1/dma.c | 12 ++++----- drivers/staging/rdma/hfi1/driver.c | 6 ++--- drivers/staging/rdma/hfi1/firmware.c | 2 +- drivers/staging/rdma/hfi1/hfi.h | 4 +-- drivers/staging/rdma/hfi1/mad.c | 48 +++++++++++++++++------------------ drivers/staging/rdma/hfi1/pcie.c | 4 +-- drivers/staging/rdma/hfi1/pio.c | 2 +- drivers/staging/rdma/hfi1/rc.c | 14 +++++----- drivers/staging/rdma/hfi1/ruc.c | 8 +++--- drivers/staging/rdma/hfi1/sdma.c | 10 ++++---- drivers/staging/rdma/hfi1/trace.c | 2 +- drivers/staging/rdma/hfi1/user_sdma.c | 2 +- drivers/staging/rdma/hfi1/verbs.h | 4 +-- 15 files changed, 64 insertions(+), 64 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 0a593bdf1761..cf578654e48b 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -5436,7 +5436,7 @@ static void update_rcverr_timer(unsigned long opaque) OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); queue_work(ppd->hfi1_wq, &ppd->link_bounce_work); } - dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt; + dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); } @@ -6366,7 +6366,7 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort) reg | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT)); - (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ + (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ if (!abort) { udelay(1); /* must hold for the longer of 16cclks or 20ns */ write_csr(dd, DCC_CFG_RESET, reg); @@ -13407,7 +13407,7 @@ static void init_chip(struct hfi1_devdata *dd) * across the clear. */ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); - (void) read_csr(dd, CCE_DC_CTRL); + (void)read_csr(dd, CCE_DC_CTRL); if (use_flr) { /* diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 7cb43488d796..f309c5fd7b74 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -149,8 +149,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v) if (!n_packets && !n_bytes) return SEQ_SKIP; seq_printf(s, "%02llx %llu/%llu\n", i, - (unsigned long long) n_packets, - (unsigned long long) n_bytes); + (unsigned long long)n_packets, + (unsigned long long)n_bytes); return 0; } diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/staging/rdma/hfi1/dma.c index e03bd735173c..afe572dfeb10 100644 --- a/drivers/staging/rdma/hfi1/dma.c +++ b/drivers/staging/rdma/hfi1/dma.c @@ -52,7 +52,7 @@ #include "verbs.h" -#define BAD_DMA_ADDRESS ((u64) 0) +#define BAD_DMA_ADDRESS ((u64)0) /* * The following functions implement driver specific replacements @@ -74,7 +74,7 @@ static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr, if (WARN_ON(!valid_dma_direction(direction))) return BAD_DMA_ADDRESS; - return (u64) cpu_addr; + return (u64)cpu_addr; } static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, @@ -95,7 +95,7 @@ static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page, if (offset + size > PAGE_SIZE) return BAD_DMA_ADDRESS; - addr = (u64) page_address(page); + addr = (u64)page_address(page); if (addr) addr += offset; @@ -120,7 +120,7 @@ static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl, return BAD_DMA_ADDRESS; for_each_sg(sgl, sg, nents, i) { - addr = (u64) page_address(sg_page(sg)); + addr = (u64)page_address(sg_page(sg)); if (!addr) { ret = 0; break; @@ -161,14 +161,14 @@ static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size, if (p) addr = page_address(p); if (dma_handle) - *dma_handle = (u64) addr; + *dma_handle = (u64)addr; return addr; } static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, u64 dma_handle) { - free_pages((unsigned long) cpu_addr, get_order(size)); + free_pages((unsigned long)cpu_addr, get_order(size)); } struct ib_dma_mapping_ops hfi1_dma_mapping_ops = { diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index dd5187f23786..6082935bc435 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -594,7 +594,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) while (1) { struct hfi1_devdata *dd = rcd->dd; struct hfi1_ibport *ibp = &rcd->ppd->ibport_data; - __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head + + __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + dd->rhf_offset; struct rvt_qp *qp; struct hfi1_ib_header *hdr; @@ -730,7 +730,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) } } - packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff + + packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + packet->rcd->dd->rhf_offset; packet->rhf = rhf_to_cpu(packet->rhf_addr); @@ -969,7 +969,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) /* On to the next packet */ packet.rhqoff += packet.rsize; - packet.rhf_addr = (__le32 *) rcd->rcvhdrq + + packet.rhf_addr = (__le32 *)rcd->rcvhdrq + packet.rhqoff + dd->rhf_offset; packet.rhf = rhf_to_cpu(packet.rhf_addr); diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index f87460d7c7f6..31550a377f17 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1838,7 +1838,7 @@ void read_guid(struct hfi1_devdata *dd) { /* Take the DC out of reset to get a valid GUID value */ write_csr(dd, CCE_DC_CTRL, 0); - (void) read_csr(dd, CCE_DC_CTRL); + (void)read_csr(dd, CCE_DC_CTRL); dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID); dd_dev_info(dd, "GUID %llx", diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 347ceca5f361..de82f8e78914 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1668,7 +1668,7 @@ void hfi1_release_user_pages(struct page **, size_t, bool); static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) { - *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL; + *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL; } static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) @@ -1677,7 +1677,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) * volatile because it's a DMA target from the chip, routine is * inlined, and don't want register caching or reordering. */ - return (u32) le64_to_cpu(*rcd->rcvhdrtail_kvaddr); + return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr); } /* diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index b687e3f2d963..a56d7dc2d020 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -744,7 +744,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - n_blocks_avail = (u16) (npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; + n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1; size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16); @@ -758,7 +758,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, return reply((struct ib_mad_hdr *)smp); } - p = (__be16 *) data; + p = (__be16 *)data; q = (u16 *)data; /* get the real pkeys if we are requesting the first block */ if (start_block == 0) { @@ -1406,7 +1406,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, struct hfi1_devdata *dd = dd_from_ibdev(ibdev); u32 n_blocks_sent = OPA_AM_NBLK(am); u32 start_block = am & 0x7ff; - u16 *p = (u16 *) data; + u16 *p = (u16 *)data; __be16 *q = (__be16 *)data; int i; u16 n_blocks_avail; @@ -1586,7 +1586,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, { u32 n_blocks = OPA_AM_NBLK(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - void *vp = (void *) data; + void *vp = (void *)data; size_t size = 4 * sizeof(u64); if (n_blocks != 1) { @@ -1609,7 +1609,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, u32 n_blocks = OPA_AM_NBLK(am); int async_update = OPA_AM_ASYNC(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - void *vp = (void *) data; + void *vp = (void *)data; struct hfi1_pportdata *ppd; int lstate; @@ -1641,7 +1641,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; - void *vp = (void *) data; + void *vp = (void *)data; int size; if (n_blocks != 1) { @@ -1666,7 +1666,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data, u32 n_blocks = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; - void *vp = (void *) data; + void *vp = (void *)data; int lstate; if (n_blocks != 1) { @@ -1699,7 +1699,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data, u32 lstate; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; - struct opa_port_state_info *psi = (struct opa_port_state_info *) data; + struct opa_port_state_info *psi = (struct opa_port_state_info *)data; if (nports != 1) { smp->status |= IB_SMP_INVALID_FIELD; @@ -1748,7 +1748,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data, u8 ls_new, ps_new; struct hfi1_ibport *ibp; struct hfi1_pportdata *ppd; - struct opa_port_state_info *psi = (struct opa_port_state_info *) data; + struct opa_port_state_info *psi = (struct opa_port_state_info *)data; int ret, invalid = 0; if (nports != 1) { @@ -1834,7 +1834,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data, u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; - struct buffer_control *p = (struct buffer_control *) data; + struct buffer_control *p = (struct buffer_control *)data; int size; if (num_ports != 1) { @@ -1857,7 +1857,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data, u32 num_ports = OPA_AM_NPORT(am); struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_pportdata *ppd; - struct buffer_control *p = (struct buffer_control *) data; + struct buffer_control *p = (struct buffer_control *)data; if (num_ports != 1) { smp->status |= IB_SMP_INVALID_FIELD; @@ -1930,10 +1930,10 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, switch (section) { case OPA_VLARB_LOW_ELEMENTS: - (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p); + (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p); break; case OPA_VLARB_HIGH_ELEMENTS: - (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); + (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); break; /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX * can be changed from the default values */ @@ -2522,7 +2522,7 @@ static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, idx_from_vl(vl)); if (tmp < sum_vl_xmit_wait) { /* we wrapped */ - sum_vl_xmit_wait = (u64) ~0; + sum_vl_xmit_wait = (u64)~0; break; } sum_vl_xmit_wait = tmp; @@ -3287,7 +3287,7 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, { int i; struct opa_congestion_setting_attr *p = - (struct opa_congestion_setting_attr *) data; + (struct opa_congestion_setting_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct opa_congestion_setting_entry_shadow *entries; @@ -3326,7 +3326,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data, u32 *resp_len) { struct opa_congestion_setting_attr *p = - (struct opa_congestion_setting_attr *) data; + (struct opa_congestion_setting_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct opa_congestion_setting_entry_shadow *entries; @@ -3418,7 +3418,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, u32 *resp_len) { struct ib_cc_table_attr *cc_table_attr = - (struct ib_cc_table_attr *) data; + (struct ib_cc_table_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 start_block = OPA_AM_START_BLK(am); @@ -3475,7 +3475,7 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, struct ib_device *ibdev, u8 port, u32 *resp_len) { - struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data; + struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data; struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 start_block = OPA_AM_START_BLK(am); @@ -3559,7 +3559,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, u32 *resp_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - struct opa_led_info *p = (struct opa_led_info *) data; + struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); u64 reg; @@ -3584,7 +3584,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, u32 *resp_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); - struct opa_led_info *p = (struct opa_led_info *) data; + struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK); @@ -3800,7 +3800,7 @@ static int subn_get_opa_aggregate(struct opa_smp *smp, /* zero the payload for this segment */ memset(next_smp + sizeof(*agg), 0, agg_data_len); - (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data, + (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data, ibdev, port, NULL); if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); @@ -3844,7 +3844,7 @@ static int subn_set_opa_aggregate(struct opa_smp *smp, return reply((struct ib_mad_hdr *)smp); } - (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data, + (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data, ibdev, port, NULL); if (smp->status & ~IB_SMP_DIRECTION) { set_aggr_error(agg); @@ -3989,7 +3989,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags, smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) - (void) check_mkey(to_iport(ibdev, port_num), + (void)check_mkey(to_iport(ibdev, port_num), (struct ib_mad_hdr *)smp, 0, smp->mkey, smp->route.dr.dr_slid, smp->route.dr.return_path, @@ -4079,7 +4079,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) - (void) check_mkey(to_iport(ibdev, port_num), + (void)check_mkey(to_iport(ibdev, port_num), (struct ib_mad_hdr *)smp, 0, smp->mkey, (__force __be32)smp->dr_slid, diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 26eb610d73b4..0368516fd0bc 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -233,7 +233,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, */ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) { - u64 __iomem *base = (void __iomem *) dd->kregbase; + u64 __iomem *base = (void __iomem *)dd->kregbase; dd->flags &= ~HFI1_PRESENT; dd->kregbase = NULL; @@ -1188,7 +1188,7 @@ retry: /* step 5h: arm gasket logic */ /* hold DC in reset across the SBR */ write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); - (void) read_csr(dd, CCE_DC_CTRL); /* DC reset hold */ + (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */ /* save firmware control across the SBR */ fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL); diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index f1f30b371425..3817731832ec 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -130,7 +130,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) if (write) { write_csr(dd, SEND_CTRL, reg); if (flush) - (void) read_csr(dd, SEND_CTRL); /* flush write */ + (void)read_csr(dd, SEND_CTRL); /* flush write */ } spin_unlock_irqrestore(&dd->sendctrl_lock, flags); diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 5cdf1d250807..24f2b6562c54 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1610,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { __be32 *p = ohdr->u.at.atomic_ack_eth; - val = ((u64) be32_to_cpu(p[0]) << 32) | + val = ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]); } else val = 0; @@ -1708,7 +1708,7 @@ read_last: aeth = be32_to_cpu(ohdr->u.aeth); hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0); WARN_ON(qp->s_rdma_read_sge.num_sge); - (void) do_rc_ack(qp, aeth, psn, + (void)do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST), 0, rcd); goto ack_done; } @@ -1906,7 +1906,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, * or the send tasklet is already backed up to send an * earlier entry, we can ignore this request. */ - if (!e || e->opcode != (u8) opcode || old_req) + if (!e || e->opcode != (u8)opcode || old_req) goto unlock_done; qp->s_tail_ack_queue = prev; break; @@ -2430,7 +2430,7 @@ send_last: e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; - vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | + vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) | be32_to_cpu(ateth->vaddr[1]); if (unlikely(vaddr & (sizeof(u64) - 1))) goto nack_inv_unlck; @@ -2441,11 +2441,11 @@ send_last: IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; /* Perform atomic OP and save result. */ - maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + maddr = (atomic64_t *)qp->r_sge.sge.vaddr; sdata = be64_to_cpu(ateth->swap_data); e->atomic_data = (opcode == OP(FETCH_ADD)) ? - (u64) atomic64_add_return(sdata, maddr) - sdata : - (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + (u64)atomic64_add_return(sdata, maddr) - sdata : + (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); rvt_put_mr(qp->r_sge.sge.mr); diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 70f42c93210c..6f0005a93c44 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -508,12 +508,12 @@ do_write: IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ - maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + maddr = (atomic64_t *)qp->r_sge.sge.vaddr; sdata = wqe->atomic_wr.compare_add; - *(u64 *) sqp->s_sge.sge.vaddr = + *(u64 *)sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? - (u64) atomic64_add_return(sdata, maddr) - sdata : - (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, + (u64)atomic64_add_return(sdata, maddr) - sdata : + (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, sdata, wqe->atomic_wr.swap); rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index cc21272f878a..9379419598fc 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -534,7 +534,7 @@ static void sdma_err_progress_check(unsigned long data) static void sdma_hw_clean_up_task(unsigned long opaque) { - struct sdma_engine *sde = (struct sdma_engine *) opaque; + struct sdma_engine *sde = (struct sdma_engine *)opaque; u64 statuscsr; while (1) { @@ -594,7 +594,7 @@ static void sdma_flush_descq(struct sdma_engine *sde) static void sdma_sw_clean_up_task(unsigned long opaque) { - struct sdma_engine *sde = (struct sdma_engine *) opaque; + struct sdma_engine *sde = (struct sdma_engine *)opaque; unsigned long flags; spin_lock_irqsave(&sde->tail_lock, flags); @@ -1345,8 +1345,8 @@ retry: use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && (dd->flags & HFI1_HAS_SDMA_TIMEOUT); hwhead = use_dmahead ? - (u16) le64_to_cpu(*sde->head_dma) : - (u16) read_sde_csr(sde, SD(HEAD)); + (u16)le64_to_cpu(*sde->head_dma) : + (u16)read_sde_csr(sde, SD(HEAD)); if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { u16 cnt; @@ -3021,7 +3021,7 @@ void sdma_freeze(struct hfi1_devdata *dd) * software clean will read engine CSRs, so must be completed before * the next step, which will clear the engine CSRs. */ - (void) wait_event_interruptible(dd->sdma_unfreeze_wq, + (void)wait_event_interruptible(dd->sdma_unfreeze_wq, atomic_read(&dd->sdma_unfreeze_count) <= 0); /* no need to check results - done no matter what */ } diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c index 9eadec5be3b0..923ca550318a 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/staging/rdma/hfi1/trace.c @@ -158,7 +158,7 @@ const char *parse_everbs_hdrs( eh->atomic_eth.rkey, (unsigned long long)ib_u64_get( (__be32 *)&eh->atomic_eth.swap_data), - (unsigned long long) ib_u64_get( + (unsigned long long)ib_u64_get( (__be32 *)&eh->atomic_eth.compare_data)); break; /* deth */ diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 9fe18b082fa9..03a10c880315 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -1030,7 +1030,7 @@ free_tx: */ static inline int num_user_pages(const struct iovec *iov) { - const unsigned long addr = (unsigned long) iov->iov_base; + const unsigned long addr = (unsigned long)iov->iov_base; const unsigned long len = iov->iov_len; const unsigned long spage = addr & PAGE_MASK; const unsigned long epage = (addr + len - 1) & PAGE_MASK; diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index c736015b18df..dc623c6e902d 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -346,7 +346,7 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, */ static inline int cmp_msn(u32 a, u32 b) { - return (((int) a) - ((int) b)) << 8; + return (((int)a) - ((int)b)) << 8; } /* @@ -355,7 +355,7 @@ static inline int cmp_msn(u32 a, u32 b) */ static inline int cmp_psn(u32 a, u32 b) { - return (((int) a) - ((int) b)) << PSN_SHIFT; + return (((int)a) - ((int)b)) << PSN_SHIFT; } /* -- cgit v1.2.3-59-g8ed1b From d125a6c66b972e8c6768707c2814107df5963f5f Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:19:49 -0800 Subject: staging/rdma/hfi1: Fix comparison to NULL Convert pointer comparisons to NULL to !pointer to fix checkpatch check: CHECK: Comparison to NULL could be written "!pointer" Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 +++--- drivers/staging/rdma/hfi1/debugfs.c | 2 +- drivers/staging/rdma/hfi1/driver.c | 6 +++--- drivers/staging/rdma/hfi1/init.c | 4 ++-- drivers/staging/rdma/hfi1/mad.c | 8 ++++---- drivers/staging/rdma/hfi1/pio.c | 2 +- drivers/staging/rdma/hfi1/rc.c | 2 +- drivers/staging/rdma/hfi1/sysfs.c | 4 ++-- drivers/staging/rdma/hfi1/verbs.c | 4 ++-- 9 files changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index cf578654e48b..ea0ffd467cdf 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12260,7 +12260,7 @@ u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir, int hfi1_init_ctxt(struct send_context *sc) { - if (sc != NULL) { + if (sc) { struct hfi1_devdata *dd = sc->dd; u64 reg; u8 set = (sc->type == SC_USER ? @@ -12371,7 +12371,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd) struct hfi1_msix_entry *me = dd->msix_entries; for (i = 0; i < dd->num_msix_entries; i++, me++) { - if (me->arg == NULL) /* => no irq, no affinity */ + if (!me->arg) /* => no irq, no affinity */ continue; hfi1_put_irq_affinity(dd, &dd->msix_entries[i]); free_irq(me->msix.vector, me->arg); @@ -12534,7 +12534,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) continue; } /* no argument, no interrupt */ - if (arg == NULL) + if (!arg) continue; /* make sure the name is terminated */ me->name[sizeof(me->name) - 1] = 0; diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index f309c5fd7b74..fa3df1f75f5d 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -746,7 +746,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd) ibd->hfi1_ibdev_dbg, ppd, &port_cntr_ops[i].ops, - port_cntr_ops[i].ops.write == NULL ? + !port_cntr_ops[i].ops.write ? S_IRUGO : S_IRUGO | S_IWUSR); } } diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 6082935bc435..0c8bd9174245 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -371,7 +371,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if (rhf_use_egr_bfr(packet->rhf)) ebuf = packet->ebuf; - if (ebuf == NULL) + if (!ebuf) goto drop; /* this should never happen */ if (lnh == HFI1_LRH_BTH) @@ -402,7 +402,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK; rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); - if (qp == NULL) { + if (!qp) { rcu_read_unlock(); goto drop; } @@ -637,7 +637,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) rcu_read_lock(); qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); - if (qp == NULL) { + if (!qp) { rcu_read_unlock(); goto next; } diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 3071fbce37f7..aabdc3d9d508 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -386,7 +386,7 @@ void set_link_ipg(struct hfi1_pportdata *ppd) cc_state = get_cc_state(ppd); - if (cc_state == NULL) + if (!cc_state) /* * This should _never_ happen - rcu_read_lock() is held, * and set_link_ipg() should not be called if cc_state @@ -438,7 +438,7 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t) cc_state = get_cc_state(ppd); - if (cc_state == NULL) { + if (!cc_state) { rcu_read_unlock(); return HRTIMER_NORESTART; } diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index a56d7dc2d020..44e7fbd37646 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -3297,7 +3297,7 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, cc_state = get_cc_state(ppd); - if (cc_state == NULL) { + if (!cc_state) { rcu_read_unlock(); return reply((struct ib_mad_hdr *)smp); } @@ -3439,7 +3439,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, cc_state = get_cc_state(ppd); - if (cc_state == NULL) { + if (!cc_state) { rcu_read_unlock(); return reply((struct ib_mad_hdr *)smp); } @@ -3505,14 +3505,14 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data, } new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL); - if (new_cc_state == NULL) + if (!new_cc_state) goto getit; spin_lock(&ppd->cc_state_lock); old_cc_state = get_cc_state(ppd); - if (old_cc_state == NULL) { + if (!old_cc_state) { spin_unlock(&ppd->cc_state_lock); kfree(new_cc_state); return reply((struct ib_mad_hdr *)smp); diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 3817731832ec..7907e4c268d8 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -2002,7 +2002,7 @@ int init_credit_return(struct hfi1_devdata *dd) bytes, &dd->cr_base[i].pa, GFP_KERNEL); - if (dd->cr_base[i].va == NULL) { + if (!dd->cr_base[i].va) { set_dev_node(&dd->pcidev->dev, dd->node); dd_dev_err(dd, "Unable to allocate credit return DMA range for NUMA %d\n", diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 24f2b6562c54..99584f7f5052 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -2025,7 +2025,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, cc_state = get_cc_state(ppd); - if (cc_state == NULL) + if (!cc_state) return; /* diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index 1f3a747ce666..3c34f7788873 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -83,7 +83,7 @@ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj, rcu_read_lock(); cc_state = get_cc_state(ppd); - if (cc_state == NULL) { + if (!cc_state) { rcu_read_unlock(); return -EINVAL; } @@ -130,7 +130,7 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj, rcu_read_lock(); cc_state = get_cc_state(ppd); - if (cc_state == NULL) { + if (!cc_state) { rcu_read_unlock(); return -EINVAL; } diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 466055bd5107..c412f1c6637c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -431,7 +431,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) if (lnh != HFI1_LRH_GRH) goto drop; mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid); - if (mcast == NULL) + if (!mcast) goto drop; list_for_each_entry_rcu(p, &mcast->qp_list, list) { packet->qp = p->qp; @@ -838,7 +838,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, if (cb) iowait_pio_inc(&priv->s_iowait); pbuf = sc_buffer_alloc(sc, plen, cb, qp); - if (unlikely(pbuf == NULL)) { + if (unlikely(!pbuf)) { if (cb) verbs_pio_complete(qp, 0); if (ppd->host_link_state != HLS_UP_ACTIVE) { -- cgit v1.2.3-59-g8ed1b From 458e86ab471b44a28a736cb8b0d364f3ec0d3e3e Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:19:58 -0800 Subject: staging/rdma/hfi1: Remove blank line after an open brace Remove blank line after an open brace to fix checkpatch check: CHECK: Blank lines aren't necessary after an open brace '{' Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 5 ----- drivers/staging/rdma/hfi1/driver.c | 3 --- drivers/staging/rdma/hfi1/mad.c | 2 -- drivers/staging/rdma/hfi1/qp.c | 1 - drivers/staging/rdma/hfi1/qsfp.c | 2 -- drivers/staging/rdma/hfi1/sdma.c | 2 -- 6 files changed, 15 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index ea0ffd467cdf..05e4f07e137d 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1538,7 +1538,6 @@ static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, u64 __percpu *cntr, int vl, int mode, u64 data) { - u64 ret = 0; if (vl != CNTR_INVALID_VL) @@ -5931,7 +5930,6 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); if (reg & QSFP_HFI0_MODPRST_N) { - dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n", __func__); @@ -5995,7 +5993,6 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) } if (reg & QSFP_HFI0_INT_N) { - dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n", __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); @@ -9266,7 +9263,6 @@ void qsfp_event(struct work_struct *work) dc_start(dd); if (qd->cache_refresh_required) { - set_qsfp_int_n(ppd, 0); wait_for_qsfp_init(ppd); @@ -10122,7 +10118,6 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) "%s: logical state did not change to ACTIVE\n", __func__); } else { - /* tell all engines to go running */ sdma_all_running(dd); diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 0c8bd9174245..b5dfdb658ec7 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -440,7 +440,6 @@ drop: static inline void init_packet(struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet) { - packet->rsize = rcd->rcvhdrqentsize; /* words */ packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ packet->rcd = rcd; @@ -755,7 +754,6 @@ static inline void process_rcv_update(int last, struct hfi1_packet *packet) static inline void finish_packet(struct hfi1_packet *packet) { - /* * Nothing we need to free for the packet. * @@ -769,7 +767,6 @@ static inline void finish_packet(struct hfi1_packet *packet) static inline void process_rcv_qp_work(struct hfi1_packet *packet) { - struct hfi1_ctxtdata *rcd; struct rvt_qp *qp, *nqp; diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 44e7fbd37646..a7e5f9288d1a 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2183,7 +2183,6 @@ struct opa_port_error_info_msg { __be32 error_info_select_mask; __be32 reserved1; struct _port_ei { - u8 port_number; u8 reserved2[7]; @@ -3140,7 +3139,6 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), 8 * sizeof(vl_select_mask)) { - if (counter_select & CS_PORT_XMIT_DATA) write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index df905791ccfc..90246737a3a1 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -464,7 +464,6 @@ static int iowait_sleep( spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { - /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index a6d55a614160..aa9c62b0d2af 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -359,7 +359,6 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) /* Is paging enabled? */ if (!(cache[2] & 4)) { - /* Paging enabled, page 03 required */ if ((cache[195] & 0xC0) == 0xC0) { /* all */ @@ -520,7 +519,6 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) lenstr[1] = '\0'; if (ppd->qsfp_info.cache_valid) { - if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 9379419598fc..c0ff07943936 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -475,7 +475,6 @@ static void sdma_err_halt_wait(struct work_struct *work) static void sdma_err_progress_check_schedule(struct sdma_engine *sde) { if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { - unsigned index; struct hfi1_devdata *dd = sde->dd; @@ -1238,7 +1237,6 @@ void sdma_exit(struct hfi1_devdata *dd) for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; ++this_idx) { - sde = &dd->per_sdma[this_idx]; if (!list_empty(&sde->dmawait)) dd_dev_err(dd, "sde %u: dmawait list not empty!\n", -- cgit v1.2.3-59-g8ed1b From 5161fc3ef60260343c2ffc1b42c9a92ba954d846 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:06 -0800 Subject: staging/rdma/hfi1: Remove blank line before close brace Remove extra blank line before close brace to fix checkpatch check: CHECK: Blank lines aren't necessary before a close brace '}' Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 1 - drivers/staging/rdma/hfi1/driver.c | 1 - drivers/staging/rdma/hfi1/intr.c | 1 - drivers/staging/rdma/hfi1/mad.c | 2 -- drivers/staging/rdma/hfi1/pcie.c | 1 - drivers/staging/rdma/hfi1/sysfs.c | 1 - drivers/staging/rdma/hfi1/uc.c | 1 - drivers/staging/rdma/hfi1/ud.c | 2 -- drivers/staging/rdma/hfi1/user_sdma.c | 2 -- 9 files changed, 12 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 05e4f07e137d..51256ba9d293 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13710,7 +13710,6 @@ static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | 64ull * cu << SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); - } static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index b5dfdb658ec7..fee5e395608a 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -762,7 +762,6 @@ static inline void finish_packet(struct hfi1_packet *packet) */ update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, packet->etail, rcv_intr_dynamic, packet->numpkt); - } static inline void process_rcv_qp_work(struct hfi1_packet *packet) diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 9a9b331cc469..685fb4d9c924 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -178,7 +178,6 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) /* notify IB of the link change */ signal_ib_event(ppd, ev); } - } /* diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index a7e5f9288d1a..adfd0a9cead7 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -3805,7 +3805,6 @@ static int subn_get_opa_aggregate(struct opa_smp *smp, return reply((struct ib_mad_hdr *)smp); } next_smp += agg_size; - } return reply((struct ib_mad_hdr *)smp); @@ -3849,7 +3848,6 @@ static int subn_set_opa_aggregate(struct opa_smp *smp, return reply((struct ib_mad_hdr *)smp); } next_smp += agg_size; - } return reply((struct ib_mad_hdr *)smp); diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 0368516fd0bc..725e2829a510 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -316,7 +316,6 @@ do_intx: nvec, ret); *msixcnt = 0; hfi1_enable_intx(dd->pcidev); - } /* return the PCIe link speed from the given link status */ diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index 3c34f7788873..fe232c105742 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -568,7 +568,6 @@ static ssize_t show_serial(struct device *device, struct hfi1_devdata *dd = dd_from_dev(dev); return scnprintf(buf, PAGE_SIZE, "%s", dd->serial); - } static ssize_t store_chip_reset(struct device *device, diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index e58ec15dd892..afdf53958ab4 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -604,5 +604,4 @@ drop: op_err: hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); return; - } diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 5779f3a820ab..c3f069725be6 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -772,7 +772,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey); if (mgmt_pkey_idx < 0) goto drop; - } if (unlikely(qkey != qp->qkey)) { hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, @@ -810,7 +809,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey); if (mgmt_pkey_idx < 0) goto drop; - } if (qp->ibqp.qp_num > 1 && diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 03a10c880315..097d2789f120 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -713,7 +713,6 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, msecs_to_jiffies( SDMA_IOWAIT_TIMEOUT)); } - } *count += idx; return 0; @@ -1194,7 +1193,6 @@ static int set_txreq_header(struct user_sdma_request *req, if (ret) return ret; goto done; - } hdr->bth[2] = cpu_to_be32( -- cgit v1.2.3-59-g8ed1b From d0d236ea34e6ce2d9106a8f61f92b6af3995d6ad Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:15 -0800 Subject: staging/rdma/hfi1: Fix logical continuations Move logical continuations to previous line to fix checkpatch check: CHECK: Logical continuations should be on the previous line Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 55 +++++++++++++++++------------------- drivers/staging/rdma/hfi1/driver.c | 6 ++-- drivers/staging/rdma/hfi1/file_ops.c | 12 ++++---- drivers/staging/rdma/hfi1/firmware.c | 20 +++++-------- drivers/staging/rdma/hfi1/intr.c | 3 +- drivers/staging/rdma/hfi1/mad.c | 12 ++++---- drivers/staging/rdma/hfi1/sdma.c | 4 +-- 7 files changed, 51 insertions(+), 61 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 51256ba9d293..b4c017abab9f 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6552,8 +6552,8 @@ void handle_sma_message(struct work_struct *work) * * Can activate the node. Discard otherwise. */ - if (ppd->host_link_state == HLS_UP_ARMED - && ppd->is_active_optimize_enabled) { + if (ppd->host_link_state == HLS_UP_ARMED && + ppd->is_active_optimize_enabled) { ppd->neighbor_normal = 1; ret = set_link_state(ppd, HLS_UP_ACTIVE); if (ret) @@ -7032,8 +7032,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, * handle_verify_cap(). The ASIC 8051 firmware does not correctly * set the max_rate field in handle_verify_cap until v0.19. */ - if ((dd->icode == ICODE_RTL_SILICON) - && (dd->dc8051_ver < dc8051_ver(0, 19))) { + if ((dd->icode == ICODE_RTL_SILICON) && + (dd->dc8051_ver < dc8051_ver(0, 19))) { /* max_rate: 0 = 12.5G, 1 = 25G */ switch (max_rate) { case 0: @@ -7358,10 +7358,8 @@ retry: /* downgrade is disabled */ /* bounce if not at starting active width */ - if ((ppd->link_width_active != - ppd->link_width_downgrade_tx_active) - || (ppd->link_width_active != - ppd->link_width_downgrade_rx_active)) { + if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) || + (ppd->link_width_active != ppd->link_width_downgrade_rx_active)) { dd_dev_err(ppd->dd, "Link downgrade is disabled and link has downgraded, downing link\n"); dd_dev_err(ppd->dd, @@ -7371,8 +7369,8 @@ retry: ppd->link_width_downgrade_rx_active); do_bounce = 1; } - } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 - || (lwde & ppd->link_width_downgrade_rx_active) == 0) { + } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || + (lwde & ppd->link_width_downgrade_rx_active) == 0) { /* Tx or Rx is outside the enabled policy */ dd_dev_err(ppd->dd, "Link is outside of downgrade allowed, downing link\n"); @@ -7567,9 +7565,9 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) if (queue_link_down) { /* if the link is already going down or disabled, do not * queue another */ - if ((ppd->host_link_state - & (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) - || ppd->link_enabled == 0) { + if ((ppd->host_link_state & + (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || + ppd->link_enabled == 0) { dd_dev_info(dd, "%s: not queuing link down\n", __func__); } else { @@ -8888,10 +8886,9 @@ static int init_loopback(struct hfi1_devdata *dd) * * Accept all valid loopback values. */ - if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) - && (loopback == LOOPBACK_SERDES - || loopback == LOOPBACK_LCB - || loopback == LOOPBACK_CABLE)) { + if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && + (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || + loopback == LOOPBACK_CABLE)) { loopback = LOOPBACK_LCB; quick_linkup = 1; return 0; @@ -10020,8 +10017,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) state = dd->link_default; /* interpret poll -> poll as a link bounce */ - poll_bounce = ppd->host_link_state == HLS_DN_POLL - && state == HLS_DN_POLL; + poll_bounce = ppd->host_link_state == HLS_DN_POLL && + state == HLS_DN_POLL; dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, link_state_name(ppd->host_link_state), @@ -10048,8 +10045,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) switch (state) { case HLS_UP_INIT: - if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup - || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { + if (ppd->host_link_state == HLS_DN_POLL && + (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { /* * Quick link up jumps from polling to here. * @@ -10779,8 +10776,8 @@ int set_buffer_control(struct hfi1_pportdata *ppd, != cur_bc.vl[i].shared; if (this_shared_changing) any_shared_limit_changing = 1; - if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated - || this_shared_changing) { + if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || + this_shared_changing) { changing[i] = 1; changing_mask |= stat_mask; change_count++; @@ -11227,8 +11224,8 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); /* if the context already enabled, don't do the extra steps */ - if ((op & HFI1_RCVCTRL_CTXT_ENB) - && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { + if ((op & HFI1_RCVCTRL_CTXT_ENB) && + !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { /* reset the tail and hdr addresses, and sequence count */ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, rcd->rcvhdrq_phys); @@ -11344,8 +11341,8 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl); /* work around sticky RcvCtxtStatus.BlockedRHQFull */ - if (did_enable - && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { + if (did_enable && + (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); if (reg != 0) { dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", @@ -13989,8 +13986,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, /* link width active is 0 when link is down */ /* link width downgrade active is 0 when link is down */ - if (num_vls < HFI1_MIN_VLS_SUPPORTED - || num_vls > HFI1_MAX_VLS_SUPPORTED) { + if (num_vls < HFI1_MIN_VLS_SUPPORTED || + num_vls > HFI1_MAX_VLS_SUPPORTED) { hfi1_early_err(&pdev->dev, "Invalid num_vls %u, using %u VLs\n", num_vls, HFI1_MAX_VLS_SUPPORTED); diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index fee5e395608a..3ef297ecdd60 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -1123,9 +1123,9 @@ int set_mtu(struct hfi1_pportdata *ppd) ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); mutex_lock(&ppd->hls_lock); - if (ppd->host_link_state == HLS_UP_INIT - || ppd->host_link_state == HLS_UP_ARMED - || ppd->host_link_state == HLS_UP_ACTIVE) + if (ppd->host_link_state == HLS_UP_INIT || + ppd->host_link_state == HLS_UP_ARMED || + ppd->host_link_state == HLS_UP_ACTIVE) is_up = 1; drain = !is_ax(dd) && is_up; diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index b52cb78c1f45..5077ee069154 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1626,12 +1626,12 @@ static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, * them. These registers are defined as having a read value * of 0. */ - else if (csr_off == ASIC_GPIO_CLEAR - || csr_off == ASIC_GPIO_FORCE - || csr_off == ASIC_QSFP1_CLEAR - || csr_off == ASIC_QSFP1_FORCE - || csr_off == ASIC_QSFP2_CLEAR - || csr_off == ASIC_QSFP2_FORCE) + else if (csr_off == ASIC_GPIO_CLEAR || + csr_off == ASIC_GPIO_FORCE || + csr_off == ASIC_QSFP1_CLEAR || + csr_off == ASIC_QSFP1_FORCE || + csr_off == ASIC_QSFP2_CLEAR || + csr_off == ASIC_QSFP2_FORCE) data = 0; else if (csr_off >= barlen) { /* diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 31550a377f17..1af5e3406f04 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -391,19 +391,13 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what, static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css) { /* verify CSS header fields (most sizes are in DW, so add /4) */ - if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) - || invalid_header(dd, "header_len", css->header_len, - (sizeof(struct firmware_file) / 4)) - || invalid_header(dd, "header_version", - css->header_version, CSS_HEADER_VERSION) - || invalid_header(dd, "module_vendor", - css->module_vendor, CSS_MODULE_VENDOR) - || invalid_header(dd, "key_size", - css->key_size, KEY_SIZE / 4) - || invalid_header(dd, "modulus_size", - css->modulus_size, KEY_SIZE / 4) - || invalid_header(dd, "exponent_size", - css->exponent_size, EXPONENT_SIZE / 4)) { + if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) || + invalid_header(dd, "header_len", css->header_len, (sizeof(struct firmware_file) / 4)) || + invalid_header(dd, "header_version", css->header_version, CSS_HEADER_VERSION) || + invalid_header(dd, "module_vendor", css->module_vendor, CSS_MODULE_VENDOR) || + invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) || + invalid_header(dd, "modulus_size", css->modulus_size, KEY_SIZE / 4) || + invalid_header(dd, "exponent_size", css->exponent_size, EXPONENT_SIZE / 4)) { return -EINVAL; } return 0; diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 685fb4d9c924..03cebae672a3 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -131,8 +131,7 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) * NOTE: This uses this device's vAU, vCU, and vl15_init for * the remote values. Both sides must be using the values. */ - if (quick_linkup - || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { + if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { set_up_vl15(dd, dd->vau, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); ppd->neighbor_guid = diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index adfd0a9cead7..ae594f49c78d 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1170,8 +1170,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ppd->port_error_action = be32_to_cpu(pi->port_error_action); lwe = be16_to_cpu(pi->link_width.enabled); if (lwe) { - if (lwe == OPA_LINK_WIDTH_RESET - || lwe == OPA_LINK_WIDTH_RESET_OLD) + if (lwe == OPA_LINK_WIDTH_RESET || + lwe == OPA_LINK_WIDTH_RESET_OLD) set_link_width_enabled(ppd, ppd->link_width_supported); else if ((lwe & ~ppd->link_width_supported) == 0) set_link_width_enabled(ppd, lwe); @@ -1180,8 +1180,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, } lwe = be16_to_cpu(pi->link_width_downgrade.enabled); /* LWD.E is always applied - 0 means "disabled" */ - if (lwe == OPA_LINK_WIDTH_RESET - || lwe == OPA_LINK_WIDTH_RESET_OLD) { + if (lwe == OPA_LINK_WIDTH_RESET || + lwe == OPA_LINK_WIDTH_RESET_OLD) { set_link_width_downgrade_enabled(ppd, ppd->link_width_downgrade_supported); } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) { @@ -2335,8 +2335,8 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - if (nports != 1 || (port_num && port_num != port) - || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { + if (nports != 1 || (port_num && port_num != port) || + num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)pmp); } diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index c0ff07943936..c8c0aace70ff 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -672,8 +672,8 @@ static void sdma_set_state(struct sdma_engine *sde, ss->previous_op = ss->current_op; ss->current_state = next_state; - if (ss->previous_state != sdma_state_s99_running - && next_state == sdma_state_s99_running) + if (ss->previous_state != sdma_state_s99_running && + next_state == sdma_state_s99_running) sdma_flush(sde); if (action[next_state].op_enable) -- cgit v1.2.3-59-g8ed1b From f4d507cdccd708a873dc4d6268a09475779af82d Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:25 -0800 Subject: staging/rdma/hfi1: Add blank link after declarations Add blank line after declarations to fix checkpatch check: CHECK: Please use a blank line after function/struct/union/enum declarations Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 2 ++ drivers/staging/rdma/hfi1/debugfs.c | 1 + drivers/staging/rdma/hfi1/firmware.c | 2 ++ drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/pcie.c | 1 + drivers/staging/rdma/hfi1/pio.h | 1 + 6 files changed, 8 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index b4c017abab9f..bdc561087a51 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6995,6 +6995,7 @@ static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) static const u8 bit_counts[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 }; + static inline u8 nibble_to_count(u8 nibble) { return bit_counts[nibble & 0xf]; @@ -10410,6 +10411,7 @@ static int vl_arb_match_cache(struct vl_arb_cache *cache, { return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); } + /* end functions related to vl arbitration table caching */ static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index fa3df1f75f5d..e02c5278d77a 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -71,6 +71,7 @@ static const struct seq_operations _##name##_seq_ops = { \ .stop = _##name##_seq_stop, \ .show = _##name##_seq_show \ } + #define DEBUGFS_SEQ_FILE_OPEN(name) \ static int _##name##_open(struct inode *inode, struct file *s) \ { \ diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 1af5e3406f04..16c9dc7917c7 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -113,6 +113,7 @@ struct css_header { u32 exponent_size; /* in DWORDs */ u32 reserved[22]; }; + /* expected field values */ #define CSS_MODULE_TYPE 0x00000006 #define CSS_HEADER_LEN 0x000000a1 @@ -172,6 +173,7 @@ enum fw_state { FW_FINAL, FW_ERR }; + static enum fw_state fw_state = FW_EMPTY; static int fw_err; static struct firmware_details fw_8051; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index de82f8e78914..805535eca040 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1495,6 +1495,7 @@ static inline int valid_ib_mtu(unsigned int mtu) mtu == 1024 || mtu == 2048 || mtu == 4096; } + static inline int valid_opa_max_mtu(unsigned int mtu) { return mtu >= 2048 && diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 725e2829a510..b169166d48b6 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -529,6 +529,7 @@ static void tune_pcie_caps(struct hfi1_devdata *dd) pcie_set_readrq(dd->pcidev, ep_mrrs); } } + /* End of PCIe capability tuning */ /* diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h index 8d0cf1bf3f1b..09a5eebf4b56 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/staging/rdma/hfi1/pio.h @@ -105,6 +105,7 @@ struct send_context { struct hfi1_devdata *dd; /* device */ void __iomem *base_addr; /* start of PIO memory */ union pio_shadow_ring *sr; /* shadow ring */ + volatile __le64 *hw_free; /* HW free counter */ struct work_struct halt_work; /* halted context work queue entry */ unsigned long flags; /* flags */ -- cgit v1.2.3-59-g8ed1b From 58721b8f8c71a643edf9d51be159c5db39d843c6 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:33 -0800 Subject: staging/rdma/hfi1: Remove unnecessary parentheses Remove unnecessary parentheses around addressof single $Lvals to fix checkpatch check: CHECK: Unnecessary parentheses around $var Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mad.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index ae594f49c78d..36bd6faeadf4 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2416,7 +2416,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; - vlinfo = &(rsp->vls[0]); + vlinfo = &rsp->vls[0]; vfi = 0; /* The vl_select_mask has been checked above, and we know * that it contains only entries which represent valid VLs. @@ -2609,7 +2609,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = (struct _port_dctrs *)&(req->port[0]); + rsp = (struct _port_dctrs *)&req->port[0]; memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port; @@ -2632,7 +2632,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, cpu_to_be64(get_error_counter_summary(ibdev, port, res_lli, res_ler)); - vlinfo = &(rsp->vls[0]); + vlinfo = &rsp->vls[0]; vfi = 0; /* The vl_select_mask has been checked above, and we know * that it contains only entries which represent valid VLs. @@ -2816,7 +2816,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = (struct _port_ectrs *)&(req->port[0]); + rsp = (struct _port_ectrs *)&req->port[0]; ibp = to_iport(ibdev, port_num); ppd = ppd_from_ibp(ibp); @@ -2836,7 +2836,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; - vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]); + vlinfo = (struct _vls_ectrs *)&rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), @@ -2952,7 +2952,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, u64 reg; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = (struct _port_ei *)&(req->port[0]); + rsp = (struct _port_ei *)&req->port[0]; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); @@ -3192,7 +3192,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, u32 error_info_select; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = (struct _port_ei *)&(req->port[0]); + rsp = (struct _port_ei *)&req->port[0]; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); -- cgit v1.2.3-59-g8ed1b From 3f34d9588ff3b8adc4b5828327554fce98a94204 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:42 -0800 Subject: staging/rdma/hfi1: Use BIT_ULL macro Use BIT_ULL macro to fix checkpatch check: CHECK: Prefer using the BIT_ULL macro Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.h | 16 ++++++++-------- drivers/staging/rdma/hfi1/eprom.c | 2 +- drivers/staging/rdma/hfi1/sdma.h | 8 ++++---- 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 6c581e0bd65f..2b30aaa08055 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -93,15 +93,15 @@ #define TXE_PIO_SEND (TXE + TXE_PIO_SEND_OFFSET) /* PBC flags */ -#define PBC_INTR (1ull << 31) +#define PBC_INTR BIT_ULL(31) #define PBC_DC_INFO_SHIFT (30) -#define PBC_DC_INFO (1ull << PBC_DC_INFO_SHIFT) -#define PBC_TEST_EBP (1ull << 29) -#define PBC_PACKET_BYPASS (1ull << 28) -#define PBC_CREDIT_RETURN (1ull << 25) -#define PBC_INSERT_BYPASS_ICRC (1ull << 24) -#define PBC_TEST_BAD_ICRC (1ull << 23) -#define PBC_FECN (1ull << 22) +#define PBC_DC_INFO BIT_ULL(PBC_DC_INFO_SHIFT) +#define PBC_TEST_EBP BIT_ULL(29) +#define PBC_PACKET_BYPASS BIT_ULL(28) +#define PBC_CREDIT_RETURN BIT_ULL(25) +#define PBC_INSERT_BYPASS_ICRC BIT_ULL(24) +#define PBC_TEST_BAD_ICRC BIT_ULL(23) +#define PBC_FECN BIT_ULL(22) /* PbcInsertHcrc field settings */ #define PBC_IHCRC_LKDETH 0x0 /* insert @ local KDETH offset */ diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index 9a0ddd719bf2..d7250af1d08b 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -102,7 +102,7 @@ #define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US)) /* GPIO pins */ -#define EPROM_WP_N (1ull << 14) /* EPROM write line */ +#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */ /* * Use the EP mutex to guard against other callers from within the driver. diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index cc01e818b1a9..0ee22c4c5ce1 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -107,8 +107,8 @@ /* * Bits defined in the send DMA descriptor. */ -#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63) -#define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62) +#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63) +#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62) #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 #define SDMA_DESC0_BYTE_COUNT_MASK \ @@ -152,8 +152,8 @@ ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) #define SDMA_DESC1_GENERATION_SMASK \ (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) -#define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1) -#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0) +#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1) +#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0) enum sdma_states { sdma_state_s00_hw_down, -- cgit v1.2.3-59-g8ed1b From f3ff8189419e34b61c0e1040174dbd6701bf3428 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:50 -0800 Subject: staging/rdma/hfi1: Split multiple assignments Split multiple assignments into individual assignments to fix checkpatch check: CHECK: multiple assignments should be avoided Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 ++++-- drivers/staging/rdma/hfi1/mad.c | 8 ++++---- drivers/staging/rdma/hfi1/sdma.h | 3 ++- 3 files changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index bdc561087a51..233958d8061d 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12459,8 +12459,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd) /* calculate the ranges we are going to use */ first_general = 0; - first_sdma = last_general = first_general + 1; - first_rx = last_sdma = first_sdma + dd->num_sdma; + last_general = first_general + 1; + first_sdma = last_general; + last_sdma = first_sdma + dd->num_sdma; + first_rx = last_sdma; last_rx = first_rx + dd->n_krcv_queues; /* diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 36bd6faeadf4..118a09e6ff7e 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -896,8 +896,8 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd, u32 logical_old = driver_logical_state(ppd); int ret, logical_allowed, physical_allowed; - logical_allowed = ret = - logical_transition_allowed(logical_old, logical_new); + ret = logical_transition_allowed(logical_old, logical_new); + logical_allowed = ret; if (ret == HFI_TRANSITION_DISALLOWED || ret == HFI_TRANSITION_UNDEFINED) { @@ -907,8 +907,8 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd, return ret; } - physical_allowed = ret = - physical_transition_allowed(physical_old, physical_new); + ret = physical_transition_allowed(physical_old, physical_new); + physical_allowed = ret; if (ret == HFI_TRANSITION_DISALLOWED || ret == HFI_TRANSITION_UNDEFINED) { diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 0ee22c4c5ce1..0c5f501ee937 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -565,7 +565,8 @@ static inline int sdma_txinit_ahg( tx->complete = cb; tx->coalesce_buf = NULL; tx->wait = NULL; - tx->tlen = tx->packet_len = tlen; + tx->packet_len = tlen; + tx->tlen = tx->packet_len; tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; tx->descs[0].qw[1] = 0; if (flags & SDMA_TXREQ_F_AHG_COPY) -- cgit v1.2.3-59-g8ed1b From 16733b8822017b84d2abdb8ae2b6c7d554a4e0d0 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:20:58 -0800 Subject: staging/rdma/hfi1: Fix misspellings Fix misspelled word based on checkpatch check: CHECK: 'ffoo' may be misspelled - perhaps 'foo'? Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 2 +- drivers/staging/rdma/hfi1/qsfp.c | 2 +- drivers/staging/rdma/hfi1/sdma.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 233958d8061d..53e3273fdf73 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -8080,7 +8080,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) * Receive packet IRQ handler. This routine expects to be on its own IRQ. * This routine will try to handle packets immediately (latency), but if * it finds too many, it will invoke the thread handler (bandwitdh). The - * chip receive interupt is *not* cleared down until this or the thread (if + * chip receive interrupt is *not* cleared down until this or the thread (if * invoked) is finished. The intent is to avoid extra interrupts while we * are processing packets anyway. */ diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index aa9c62b0d2af..bdb1504b2ade 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -216,7 +216,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, while (count < len) { /* - * Set the qsfp page based on a zero-based addresss + * Set the qsfp page based on a zero-based address * and a page size of QSFP_PAGESIZE bytes. */ page = (u8)(addr / QSFP_PAGESIZE); diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index c8c0aace70ff..cd818de47c66 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -2739,7 +2739,7 @@ enomem: * This function calls _extend_sdma_tx_descs to extend or allocate * coalesce buffer. If there is a allocated coalesce buffer, it will * copy the input packet data into the coalesce buffer. It also adds - * coalesce buffer descriptor once whe whole packet is received. + * coalesce buffer descriptor once when whole packet is received. * * Return: * <0 - error -- cgit v1.2.3-59-g8ed1b From 3db68f4672be95d6f8b0482f1e14c4257b1ee45e Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:07 -0800 Subject: staging/rdma/hfi1: Remove CamelCase Remove CamelCase to fix checkpatch check: CHECK: Avoid CamelCase: Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 2b30aaa08055..32e91e7ef20b 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -212,7 +212,7 @@ #define PLS_CONFIGPHY_DEBOUCE 0x40 #define PLS_CONFIGPHY_ESTCOMM 0x41 #define PLS_CONFIGPHY_ESTCOMM_TXRX_HUNT 0x42 -#define PLS_CONFIGPHY_ESTcOMM_LOCAL_COMPLETE 0x43 +#define PLS_CONFIGPHY_ESTCOMM_LOCAL_COMPLETE 0x43 #define PLS_CONFIGPHY_OPTEQ 0x44 #define PLS_CONFIGPHY_OPTEQ_OPTIMIZING 0x44 #define PLS_CONFIGPHY_OPTEQ_LOCAL_COMPLETE 0x45 -- cgit v1.2.3-59-g8ed1b From fcdd76df519e7be5e1094a4bf995374398c44efc Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:16 -0800 Subject: staging/rdma/hfi1: Use pointer instead of struct name Use sizeof(*p) instead of sizeof(struct foo) to fix checkpatch check: CHECK: Prefer alloc(sizeof(*p)...) over alloc(sizeof(struct foo)...) Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/pio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 7907e4c268d8..b0a2a4526a7c 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -700,7 +700,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, if (dd->flags & HFI1_FROZEN) return NULL; - sc = kzalloc_node(sizeof(struct send_context), GFP_KERNEL, numa); + sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa); if (!sc) return NULL; -- cgit v1.2.3-59-g8ed1b From d17c0cada704d6d5a291425192fb5148fb99cca1 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:26 -0800 Subject: staging/rdma/hfi1: Remove void function return statement Remove return statement at the end of a void function to fix checkpatch warning: WARNING: void function return statements are not generally useful Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/uc.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index afdf53958ab4..89154014e8ae 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -603,5 +603,4 @@ drop: op_err: hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - return; } -- cgit v1.2.3-59-g8ed1b From 6a14c5ea380c1260772c70b9fd0a1492131f6116 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:34 -0800 Subject: staging/rdma/hfi1: Add comment for spinlock_t definition Add comments describing the spinlock for spinlock_t definitions to fix checkpatch check: CHECK: spinlock_t definition without comment Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 3 +++ drivers/staging/rdma/hfi1/qsfp.h | 1 + drivers/staging/rdma/hfi1/sdma.h | 1 + 3 files changed, 5 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 805535eca040..774d8ffa94ef 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -313,6 +313,7 @@ struct hfi1_ctxtdata { */ struct task_struct *progress; struct list_head sdma_queues; + /* protect sdma queues */ spinlock_t sdma_qlock; /* Is ASPM interrupt supported for this context */ @@ -380,6 +381,7 @@ struct hfi1_snoop_data { int mode_flag; struct cdev cdev; struct device *class_dev; + /* protect snoop data */ spinlock_t snoop_lock; struct list_head queue; wait_queue_head_t waitq; @@ -561,6 +563,7 @@ enum { }; struct vl_arb_cache { + /* protect vl arb cache */ spinlock_t lock; struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE]; }; diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index 9f6e2f301040..c391750bf9d2 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -215,6 +215,7 @@ struct qsfp_data { struct hfi1_pportdata *ppd; struct work_struct qsfp_work; u8 cache[QSFP_MAX_NUM_PAGES * 128]; + /* protect qsfp data */ spinlock_t qsfp_lock; u8 check_interrupt_flags; u8 reset_needed; diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 0c5f501ee937..5aec18b58189 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -412,6 +412,7 @@ struct sdma_engine { u32 progress_check_head; /* private: */ struct work_struct flush_worker; + /* protect flush list */ spinlock_t flushlist_lock; /* private: */ struct list_head flushlist; -- cgit v1.2.3-59-g8ed1b From 4d114fdd90ab4152a1477593edd9375be71d282d Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:43 -0800 Subject: staging/rdma/hfi1: Fix block comments Fix block comments with proper formatting to fix checkpatch warnings: WARNING: Block comments use * on subsequent lines WARNING: Block comments use a trailing */ on a separate line Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 87 +++++++++++++++++++++++------------ drivers/staging/rdma/hfi1/chip.h | 12 +++-- drivers/staging/rdma/hfi1/file_ops.c | 6 ++- drivers/staging/rdma/hfi1/firmware.c | 18 +++++--- drivers/staging/rdma/hfi1/hfi.h | 6 ++- drivers/staging/rdma/hfi1/init.c | 12 +++-- drivers/staging/rdma/hfi1/mad.c | 62 ++++++++++++++++--------- drivers/staging/rdma/hfi1/mad.h | 6 ++- drivers/staging/rdma/hfi1/pcie.c | 6 ++- drivers/staging/rdma/hfi1/pio.c | 12 +++-- drivers/staging/rdma/hfi1/pio_copy.c | 30 ++++++++---- drivers/staging/rdma/hfi1/platform.h | 8 ++-- drivers/staging/rdma/hfi1/sdma.c | 3 +- drivers/staging/rdma/hfi1/user_sdma.c | 42 +++++++++++------ 14 files changed, 203 insertions(+), 107 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 53e3273fdf73..8e84060a8efd 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6392,14 +6392,18 @@ static void dc_shutdown(struct hfi1_devdata *dd) spin_unlock_irqrestore(&dd->dc8051_lock, flags); /* Shutdown the LCB */ lcb_shutdown(dd, 1); - /* Going to OFFLINE would have causes the 8051 to put the + /* + * Going to OFFLINE would have causes the 8051 to put the * SerDes into reset already. Just need to shut down the 8051, - * itself. */ + * itself. + */ write_csr(dd, DC_DC8051_CFG_RST, 0x1); } -/* Calling this after the DC has been brought out of reset should not - * do any damage. */ +/* + * Calling this after the DC has been brought out of reset should not + * do any damage. + */ static void dc_start(struct hfi1_devdata *dd) { unsigned long flags; @@ -6525,8 +6529,10 @@ void handle_sma_message(struct work_struct *work) u64 msg; int ret; - /* msg is bytes 1-4 of the 40-bit idle message - the command code - is stripped off */ + /* + * msg is bytes 1-4 of the 40-bit idle message - the command code + * is stripped off + */ ret = read_idle_sma(dd, &msg); if (ret) return; @@ -6815,8 +6821,10 @@ void handle_link_up(struct work_struct *work) } } -/* Several pieces of LNI information were cached for SMA in ppd. - * Reset these on link down */ +/* + * Several pieces of LNI information were cached for SMA in ppd. + * Reset these on link down + */ static void reset_neighbor_info(struct hfi1_pportdata *ppd) { ppd->neighbor_guid = 0; @@ -6862,8 +6870,10 @@ void handle_link_down(struct work_struct *work) /* disable the port */ clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); - /* If there is no cable attached, turn the DC off. Otherwise, - * start the link bring up. */ + /* + * If there is no cable attached, turn the DC off. Otherwise, + * start the link bring up. + */ if (!qsfp_mod_present(ppd)) { dc_shutdown(ppd->dd); } else { @@ -7564,8 +7574,10 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) } if (queue_link_down) { - /* if the link is already going down or disabled, do not - * queue another */ + /* + * if the link is already going down or disabled, do not + * queue another + */ if ((ppd->host_link_state & (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || ppd->link_enabled == 0) { @@ -7712,8 +7724,10 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) /* set status bit */ dd->err_info_rcvport.status_and_code |= OPA_EI_STATUS_SMASK; - /* save first 2 flits in the packet that caused - * the error */ + /* + * save first 2 flits in the packet that caused + * the error + */ dd->err_info_rcvport.packet_flit1 = hdr0; dd->err_info_rcvport.packet_flit2 = hdr1; } @@ -7913,8 +7927,10 @@ static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) } static const struct is_table is_table[] = { -/* start end - name func interrupt func */ +/* + * start end + * name func interrupt func + */ { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, is_misc_err_name, is_misc_err_int }, { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, @@ -10763,8 +10779,10 @@ int set_buffer_control(struct hfi1_pportdata *ppd, */ memset(changing, 0, sizeof(changing)); memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); - /* NOTE: Assumes that the individual VL bits are adjacent and in - increasing order */ + /* + * NOTE: Assumes that the individual VL bits are adjacent and in + * increasing order + */ stat_mask = SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; changing_mask = 0; @@ -11129,8 +11147,10 @@ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) } rcd->rcvavail_timeout = timeout; - /* timeout cannot be larger than rcv_intr_timeout_csr which has already - been verified to be in range */ + /* + * timeout cannot be larger than rcv_intr_timeout_csr which has already + * been verified to be in range + */ write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); } @@ -11323,8 +11343,10 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) if (op & HFI1_RCVCTRL_TIDFLOW_DIS) rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { - /* In one-packet-per-eager mode, the size comes from - the RcvArray entry. */ + /* + * In one-packet-per-eager mode, the size comes from + * the RcvArray entry. + */ rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; } @@ -12524,7 +12546,8 @@ static int request_msix_irqs(struct hfi1_devdata *dd) me->type = IRQ_RCVCTXT; } else { /* not in our expected range - complain, then - ignore it */ + * ignore it + */ dd_dev_err(dd, "Unexpected extra MSI-X interrupt %d\n", i); continue; @@ -12830,8 +12853,10 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) /* PIO Send buffers */ /* SDMA Send buffers */ - /* These are not normally read, and (presently) have no method - to be read, so are not pre-initialized */ + /* + * These are not normally read, and (presently) have no method + * to be read, so are not pre-initialized + */ /* RcvHdrAddr */ /* RcvHdrTailAddr */ @@ -13026,8 +13051,10 @@ static void reset_misc_csrs(struct hfi1_devdata *dd) write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); } - /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can - only be written 128-byte chunks */ + /* + * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can + * only be written 128-byte chunks + */ /* init RSA engine to clear lingering errors */ write_csr(dd, MISC_CFG_RSA_CMD, 1); write_csr(dd, MISC_CFG_RSA_MU, 0); @@ -14045,8 +14072,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) & CCE_REVISION_CHIP_REV_MINOR_MASK; - /* obtain the hardware ID - NOT related to unit, which is a - software enumeration */ + /* + * obtain the hardware ID - NOT related to unit, which is a + * software enumeration + */ reg = read_csr(dd, CCE_REVISION2); dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) & CCE_REVISION2_HFI_ID_MASK; diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 32e91e7ef20b..0b7055b14d17 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -79,8 +79,10 @@ #define PIO_CMASK 0x7ff /* counter mask for free and fill counters */ #define MAX_EAGER_ENTRIES 2048 /* max receive eager entries */ #define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */ -/* Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed - at 64 bytes for all generation one devices */ +/* + * Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed + * at 64 bytes for all generation one devices + */ #define CM_VAU 3 /* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */ #define CM_GLOBAL_CREDITS 0x940 @@ -518,8 +520,10 @@ enum { #define LCB_CRC_48B 0x2 /* 48b CRC */ #define LCB_CRC_12B_16B_PER_LANE 0x3 /* 12b-16b per lane CRC */ -/* the following enum is (almost) a copy/paste of the definition - * in the OPA spec, section 20.2.2.6.8 (PortInfo) */ +/* + * the following enum is (almost) a copy/paste of the definition + * in the OPA spec, section 20.2.2.6.8 (PortInfo) + */ enum { PORT_LTP_CRC_MODE_NONE = 0, PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */ diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 5077ee069154..c4b9dd49dfa7 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -388,8 +388,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, break; } if (dd->flags & HFI1_FORCED_FREEZE) { - /* Don't allow context reset if we are into - * forced freeze */ + /* + * Don't allow context reset if we are into + * forced freeze + */ ret = -ENODEV; break; } diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 16c9dc7917c7..3a7163dab39e 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1294,8 +1294,10 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd, /* step 3: enable XDMEM access */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40); /* step 4: load firmware into SBus Master XDMEM */ - /* NOTE: the dmem address, write_en, and wdata are all pre-packed, - we only need to pick up the bytes and write them */ + /* + * NOTE: the dmem address, write_en, and wdata are all pre-packed, + * we only need to pick up the bytes and write them + */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER, *(u32 *)&fdet->firmware_ptr[i]); @@ -1305,8 +1307,10 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd, /* step 6: allow SBus Spico to run */ sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000); - /* steps 7-11: run RSA, if it succeeds, firmware is available to - be swapped */ + /* + * steps 7-11: run RSA, if it succeeds, firmware is available to + * be swapped + */ return run_rsa(dd, "PCIe serdes", fdet->signature); } @@ -1744,8 +1748,10 @@ int get_platform_config_field(struct hfi1_devdata *dd, src_ptr = (u32 *)((u8 *)src_ptr + seek); - /* We expect the field to be byte aligned and whole byte - * lengths if we are here */ + /* + * We expect the field to be byte aligned and whole byte + * lengths if we are here + */ memcpy(data, src_ptr, wlen); return 0; } diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 774d8ffa94ef..4db5ad9921a9 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -718,8 +718,10 @@ struct hfi1_pportdata { /* CA's max number of 64 entry units in the congestion control table */ u8 cc_max_table_entries; - /* begin congestion log related entries - * cc_log_lock protects all congestion log related data */ + /* + * begin congestion log related entries + * cc_log_lock protects all congestion log related data + */ spinlock_t cc_log_lock ____cacheline_aligned_in_smp; u8 threshold_cong_event_map[OPA_MAX_SLS / 8]; u16 threshold_event_counter; diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index aabdc3d9d508..f794604bea2a 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -790,8 +790,10 @@ done: for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; - /* start the serdes - must be after interrupts are - enabled so we are notified when the link goes up */ + /* + * start the serdes - must be after interrupts are + * enabled so we are notified when the link goes up + */ lastfail = bringup_serdes(ppd); if (lastfail) dd_dev_info(dd, @@ -1188,8 +1190,10 @@ static int __init hfi1_mod_init(void) user_credit_return_threshold = 100; compute_krcvqs(); - /* sanitize receive interrupt count, time must wait until after - the hardware type is known */ + /* + * sanitize receive interrupt count, time must wait until after + * the hardware type is known + */ if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK) rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK; /* reject invalid combinations */ diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 118a09e6ff7e..13cf66fe2aca 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -696,8 +696,10 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp); - /* this counter is 16 bits wide, but the replay_depth.wire - * variable is only 8 bits */ + /* + * this counter is 16 bits wide, but the replay_depth.wire + * variable is only 8 bits + */ if (tmp > 0xff) tmp = 0xff; pi->replay_depth.wire = tmp; @@ -1621,8 +1623,10 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data, /* IB numbers ports from 1, hw from 0 */ ppd = dd->pport + (port - 1); lstate = driver_lstate(ppd); - /* it's known that async_update is 0 by this point, but include - * the explicit check for clarity */ + /* + * it's known that async_update is 0 by this point, but include + * the explicit check for clarity + */ if (!async_update && (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) { smp->status |= IB_SMP_INVALID_FIELD; @@ -1797,8 +1801,10 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1) #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK) - /* check that addr is within spec, and - * addr and (addr + len - 1) are on the same "page" */ + /* + * check that addr is within spec, and + * addr and (addr + len - 1) are on the same "page" + */ if (addr >= 4096 || (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) { smp->status |= IB_SMP_INVALID_FIELD; @@ -1935,8 +1941,10 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data, case OPA_VLARB_HIGH_ELEMENTS: (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p); break; - /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX - * can be changed from the default values */ + /* + * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX + * can be changed from the default values + */ case OPA_VLARB_PREEMPT_ELEMENTS: /* FALLTHROUGH */ case OPA_VLARB_PREEMPT_MATRIX: @@ -2148,8 +2156,10 @@ struct opa_port_data_counters_msg { }; struct opa_port_error_counters64_msg { - /* Request contains first two fields, response contains the - * whole magilla */ + /* + * Request contains first two fields, response contains the + * whole magilla + */ __be64 port_select_mask[4]; __be32 vl_select_mask; @@ -2673,11 +2683,12 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, /* rsp->port_vl_xmit_time_cong is 0 for HFIs */ /* rsp->port_vl_xmit_wasted_bw ??? */ /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? - * does this differ from rsp->vls[vfi].port_vl_xmit_wait */ + * does this differ from rsp->vls[vfi].port_vl_xmit_wait + */ /*rsp->vls[vfi].port_vl_mark_fecn = - cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT - + offset)); - */ + * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + * + offset)); + */ vlinfo++; vfi++; } @@ -2996,8 +3007,10 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, /* ExcessiverBufferOverrunInfo */ reg = read_csr(dd, RCV_ERR_INFO); if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) { - /* if the RcvExcessBufferOverrun bit is set, save SC of - * first pkt that encountered an excess buffer overrun */ + /* + * if the RcvExcessBufferOverrun bit is set, save SC of + * first pkt that encountered an excess buffer overrun + */ u8 tmp = (u8)reg; tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK; @@ -3093,8 +3106,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0); /* Only applicable for switch */ - /*if (counter_select & CS_PORT_MARK_FECN) - write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/ + /* if (counter_select & CS_PORT_MARK_FECN) + * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0); + */ if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS) write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0); @@ -3167,9 +3181,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_PORT_RCV_BUBBLE) write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0); - /*if (counter_select & CS_PORT_MARK_FECN) - write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0); - */ + /* if (counter_select & CS_PORT_MARK_FECN) + * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0); + */ /* port_vl_xmit_discards ??? */ } @@ -3226,8 +3240,10 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, /* ExcessiverBufferOverrunInfo */ if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO) - /* status bit is essentially kept in the h/w - bit 5 of - * RCV_ERR_INFO */ + /* + * status bit is essentially kept in the h/w - bit 5 of + * RCV_ERR_INFO + */ write_csr(dd, RCV_ERR_INFO, RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h index f9e93c035d28..9ebaaf939d34 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/staging/rdma/hfi1/mad.h @@ -51,8 +51,10 @@ #define _HFI1_MAD_H #include -#define USE_PI_LED_ENABLE 1 /* use led enabled bit in struct - * opa_port_states, if available */ +#define USE_PI_LED_ENABLE 1 /* + * use led enabled bit in struct + * opa_port_states, if available + */ #include #include #ifndef PI_LED_ENABLE_SUP diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index b169166d48b6..4d9fd3b5ef1e 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -284,9 +284,11 @@ static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt, struct msix_entry *msix_entry; int i; - /* We can't pass hfi1_msix_entry array to msix_setup + /* + * We can't pass hfi1_msix_entry array to msix_setup * so use a dummy msix_entry array and copy the allocated - * irq back to the hfi1_msix_entry array. */ + * irq back to the hfi1_msix_entry array. + */ msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) { ret = -ENOMEM; diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index b0a2a4526a7c..191b260d173d 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -177,8 +177,10 @@ static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = { /* memory pool information, used when calculating final sizes */ struct mem_pool_info { - int centipercent; /* 100th of 1% of memory to use, -1 if blocks - already set */ + int centipercent; /* + * 100th of 1% of memory to use, -1 if blocks + * already set + */ int count; /* count of contexts in the pool */ int blocks; /* block size of the pool */ int size; /* context size, in blocks */ @@ -1429,8 +1431,10 @@ retry: next = head + 1; if (next >= sc->sr_size) next = 0; - /* update the head - must be last! - the releaser can look at fields - in pbuf once we move the head */ + /* + * update the head - must be last! - the releaser can look at fields + * in pbuf once we move the head + */ smp_wmb(); sc->sr_head = next; spin_unlock_irqrestore(&sc->alloc_lock, flags); diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c index dc0c1783e10f..6f97d228563b 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/staging/rdma/hfi1/pio_copy.c @@ -86,8 +86,10 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, dend = dest + ((count >> 1) * sizeof(u64)); if (dend < send) { - /* all QWORD data is within the SOP block, does *not* - reach the end of the SOP block */ + /* + * all QWORD data is within the SOP block, does *not* + * reach the end of the SOP block + */ while (dest < dend) { writeq(*(u64 *)from, dest); @@ -152,8 +154,10 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, writeq(val.val64, dest); dest += sizeof(u64); } - /* fill in rest of block, no need to check pbuf->end - as we only wrap on a block boundary */ + /* + * fill in rest of block, no need to check pbuf->end + * as we only wrap on a block boundary + */ while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) { writeq(0, dest); dest += sizeof(u64); @@ -466,8 +470,10 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, dend = dest + ((nbytes >> 3) * sizeof(u64)); if (dend < send) { - /* all QWORD data is within the SOP block, does *not* - reach the end of the SOP block */ + /* + * all QWORD data is within the SOP block, does *not* + * reach the end of the SOP block + */ while (dest < dend) { writeq(*(u64 *)from, dest); @@ -562,8 +568,10 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) void __iomem *send; /* SOP end */ void __iomem *xend; - /* calculate the end of data or end of block, whichever - comes first */ + /* + * calculate the end of data or end of block, whichever + * comes first + */ send = pbuf->start + PIO_BLOCK_SIZE; xend = send < dend ? send : dend; @@ -656,8 +664,10 @@ static void mid_copy_straight(struct pio_buf *pbuf, void __iomem *send; /* SOP end */ void __iomem *xend; - /* calculate the end of data or end of block, whichever - comes first */ + /* + * calculate the end of data or end of block, whichever + * comes first + */ send = pbuf->start + PIO_BLOCK_SIZE; xend = send < dend ? send : dend; diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/staging/rdma/hfi1/platform.h index cc280cca9b9c..1f41bdc61235 100644 --- a/drivers/staging/rdma/hfi1/platform.h +++ b/drivers/staging/rdma/hfi1/platform.h @@ -186,9 +186,9 @@ static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = { */ /* - *===================================================== + * ===================================================== * System table encodings - *==================================================== + * ===================================================== */ #define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041 #define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4 @@ -208,9 +208,9 @@ enum platform_config_qsfp_power_class_encoding { }; /* - *===================================================== + * ==================================================== * Port table encodings - *==================================================== + * ==================================================== */ enum platform_config_port_type_encoding { PORT_TYPE_UNKNOWN, diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index cd818de47c66..5f62d0229088 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -2219,7 +2219,8 @@ static void __sdma_process_event(struct sdma_engine *sde, * of link up, then we need to start up. * This can happen when hw down is requested while * bringing the link up with traffic active on - * 7220, e.g. */ + * 7220, e.g. + */ ss->go_s99_running = 1; /* fall through and start dma engine */ case sdma_event_e10_go_hw_start: diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 097d2789f120..b6d09267492b 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -179,8 +179,10 @@ struct user_sdma_iovec { unsigned npages; /* array of pinned pages for this vector */ struct page **pages; - /* offset into the virtual address space of the vector at - * which we last left off. */ + /* + * offset into the virtual address space of the vector at + * which we last left off. + */ u64 offset; }; @@ -596,8 +598,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, } req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]); - /* Calculate the initial TID offset based on the values of - KDETH.OFFSET and KDETH.OM that are passed in. */ + /* + * Calculate the initial TID offset based on the values of + * KDETH.OFFSET and KDETH.OM that are passed in. + */ req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) * (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ? KDETH_OM_LARGE : KDETH_OM_SMALL); @@ -742,8 +746,10 @@ static inline u32 compute_data_length(struct user_sdma_request *req, } else if (req_opcode(req->info.ctrl) == EXPECTED) { u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) * PAGE_SIZE; - /* Get the data length based on the remaining space in the - * TID pair. */ + /* + * Get the data length based on the remaining space in the + * TID pair. + */ len = min(tidlen - req->tidoffset, (u32)req->info.fragsize); /* If we've filled up the TID pair, move to the next one. */ if (unlikely(!len) && ++req->tididx < req->n_tids && @@ -753,9 +759,11 @@ static inline u32 compute_data_length(struct user_sdma_request *req, req->tidoffset = 0; len = min_t(u32, tidlen, req->info.fragsize); } - /* Since the TID pairs map entire pages, make sure that we + /* + * Since the TID pairs map entire pages, make sure that we * are not going to try to send more data that we have - * remaining. */ + * remaining. + */ len = min(len, req->data_len - req->sent); } else len = min(req->data_len - req->sent, (u32)req->info.fragsize); @@ -979,8 +987,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) req->sent += data_sent; if (req->data_len) { tx->iovecs[tx->idx].vec->offset += iov_offset; - /* If we've reached the end of the io vector, mark it - * so the callback can unpin the pages and free it. */ + /* + * If we've reached the end of the io vector, mark it + * so the callback can unpin the pages and free it. + */ if (tx->iovecs[tx->idx].vec->offset == tx->iovecs[tx->idx].vec->iov.iov_len) tx->iovecs[tx->idx].flags |= @@ -1216,8 +1226,10 @@ static int set_txreq_header(struct user_sdma_request *req, if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * PAGE_SIZE)) { req->tidoffset = 0; - /* Since we don't copy all the TIDs, all at once, - * we have to check again. */ + /* + * Since we don't copy all the TIDs, all at once, + * we have to check again. + */ if (++req->tididx > req->n_tids - 1 || !req->tids[req->tididx]) { return -EINVAL; @@ -1298,8 +1310,10 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) * PAGE_SIZE)) { req->tidoffset = 0; - /* Since we don't copy all the TIDs, all at once, - * we have to check again. */ + /* + * Since we don't copy all the TIDs, all at once, + * we have to check again. + */ if (++req->tididx > req->n_tids - 1 || !req->tids[req->tididx]) { return -EINVAL; -- cgit v1.2.3-59-g8ed1b From 17fb4f2923d7fc7ee778dedc0aa60ab6f402f56c Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:21:52 -0800 Subject: staging/rdma/hfi1: Fix code alignment Fix code alignment to fix checkpatch check: CHECK: Alignment should match open parenthesis Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 921 ++++++++++++------------ drivers/staging/rdma/hfi1/debugfs.c | 34 +- drivers/staging/rdma/hfi1/driver.c | 32 +- drivers/staging/rdma/hfi1/eprom.c | 39 +- drivers/staging/rdma/hfi1/file_ops.c | 8 +- drivers/staging/rdma/hfi1/firmware.c | 124 ++-- drivers/staging/rdma/hfi1/hfi.h | 5 +- drivers/staging/rdma/hfi1/init.c | 24 +- drivers/staging/rdma/hfi1/intr.c | 12 +- drivers/staging/rdma/hfi1/mad.c | 141 ++-- drivers/staging/rdma/hfi1/pcie.c | 99 ++- drivers/staging/rdma/hfi1/pio.c | 71 +- drivers/staging/rdma/hfi1/pio.h | 4 +- drivers/staging/rdma/hfi1/pio_copy.c | 14 +- drivers/staging/rdma/hfi1/qsfp.c | 2 +- drivers/staging/rdma/hfi1/rc.c | 4 +- drivers/staging/rdma/hfi1/ruc.c | 14 +- drivers/staging/rdma/hfi1/sdma.c | 178 +++-- drivers/staging/rdma/hfi1/sysfs.c | 36 +- drivers/staging/rdma/hfi1/trace.c | 47 +- drivers/staging/rdma/hfi1/trace.h | 1280 ++++++++++++++++----------------- drivers/staging/rdma/hfi1/twsi.c | 2 +- drivers/staging/rdma/hfi1/uc.c | 2 +- drivers/staging/rdma/hfi1/ud.c | 7 +- drivers/staging/rdma/hfi1/user_sdma.c | 2 +- drivers/staging/rdma/hfi1/verbs.c | 3 +- 26 files changed, 1508 insertions(+), 1597 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 8e84060a8efd..ce61883d146c 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -944,7 +944,7 @@ static struct flag_table dc8051_err_flags[] = { FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)), FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)), FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES", - D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), + D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)), }; @@ -958,7 +958,7 @@ static struct flag_table dc8051_info_err_flags[] = { FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME), FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET), FLAG_ENTRY0("Serdes internal loopback failure", - FAILED_SERDES_INTERNAL_LOOPBACK), + FAILED_SERDES_INTERNAL_LOOPBACK), FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT), FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING), FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE), @@ -1147,11 +1147,8 @@ struct cntr_entry { /* * accessor for stat element, context either dd or ppd */ - u64 (*rw_cntr)(const struct cntr_entry *, - void *context, - int vl, - int mode, - u64 data); + u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, + int mode, u64 data); }; #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 @@ -1300,7 +1297,7 @@ static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, /* Dev Access */ static u64 dev_access_u32_csr(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; u64 csr = entry->csr; @@ -1358,7 +1355,7 @@ static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, } static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, - int vl, int mode, u64 data) + int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; @@ -1379,7 +1376,7 @@ static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, } static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, - int vl, int mode, u64 data) + int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; u32 csr = entry->csr; @@ -1403,7 +1400,7 @@ static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, /* Port Access */ static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, - int vl, int mode, u64 data) + int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; @@ -1413,7 +1410,7 @@ static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, } static u64 port_access_u64_csr(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; u64 val; @@ -1453,7 +1450,7 @@ static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, } static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, - int vl, int mode, u64 data) + int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; @@ -1463,7 +1460,7 @@ static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, } static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, - int vl, int mode, u64 data) + int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; @@ -1484,7 +1481,7 @@ static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, } static u64 access_sw_xmit_discards(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; u64 zero = 0; @@ -1501,7 +1498,8 @@ static u64 access_sw_xmit_discards(const struct cntr_entry *entry, } static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, + u64 data) { struct hfi1_pportdata *ppd = context; @@ -1513,7 +1511,7 @@ static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, } static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_pportdata *ppd = context; @@ -1569,7 +1567,7 @@ static u64 access_sw_cpu_intr(const struct cntr_entry *entry, } static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = context; @@ -1610,7 +1608,7 @@ static u64 access_sw_kmem_wait(const struct cntr_entry *entry, } static u64 access_sw_send_schedule(const struct cntr_entry *entry, - void *context, int vl, int mode, u64 data) + void *context, int vl, int mode, u64 data) { struct hfi1_devdata *dd = (struct hfi1_devdata *)context; @@ -4965,28 +4963,28 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, - CNTR_SYNTH | CNTR_VL), + CNTR_SYNTH | CNTR_VL), [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, - CNTR_SYNTH | CNTR_VL), + CNTR_SYNTH | CNTR_VL), [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, - CNTR_SYNTH | CNTR_VL), + CNTR_SYNTH | CNTR_VL), [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT, - access_sw_link_dn_cnt), + access_sw_link_dn_cnt), [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT, - access_sw_link_up_cnt), + access_sw_link_up_cnt), [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL, access_sw_unknown_frame_cnt), [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT, - access_sw_xmit_discards), + access_sw_xmit_discards), [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0, - CNTR_SYNTH | CNTR_32BIT | CNTR_VL, - access_sw_xmit_discards), + CNTR_SYNTH | CNTR_32BIT | CNTR_VL, + access_sw_xmit_discards), [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH, - access_xmit_constraint_errs), + access_xmit_constraint_errs), [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH, - access_rcv_constraint_errs), + access_rcv_constraint_errs), [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), @@ -5002,9 +5000,9 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, access_sw_cpu_rc_acks), [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, - access_sw_cpu_rc_qacks), + access_sw_cpu_rc_qacks), [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL, - access_sw_cpu_rc_delayed_comp), + access_sw_cpu_rc_delayed_comp), [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), @@ -5153,7 +5151,7 @@ done: * the buffer. End in '*' if the buffer is too short. */ static char *flag_string(char *buf, int buf_len, u64 flags, - struct flag_table *table, int table_size) + struct flag_table *table, int table_size) { char extra[32]; char *p = buf; @@ -5214,10 +5212,8 @@ static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) if (source < ARRAY_SIZE(cce_misc_names)) strncpy(buf, cce_misc_names[source], bsize); else - snprintf(buf, - bsize, - "Reserved%u", - source + IS_GENERAL_ERR_START); + snprintf(buf, bsize, "Reserved%u", + source + IS_GENERAL_ERR_START); return buf; } @@ -5341,51 +5337,56 @@ static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) static char *cce_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags)); + cce_err_status_flags, + ARRAY_SIZE(cce_err_status_flags)); } static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags)); + rxe_err_status_flags, + ARRAY_SIZE(rxe_err_status_flags)); } static char *misc_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, misc_err_status_flags, - ARRAY_SIZE(misc_err_status_flags)); + ARRAY_SIZE(misc_err_status_flags)); } static char *pio_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags)); + pio_err_status_flags, + ARRAY_SIZE(pio_err_status_flags)); } static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - sdma_err_status_flags, - ARRAY_SIZE(sdma_err_status_flags)); + sdma_err_status_flags, + ARRAY_SIZE(sdma_err_status_flags)); } static char *egress_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags)); + egress_err_status_flags, + ARRAY_SIZE(egress_err_status_flags)); } static char *egress_err_info_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags)); + egress_err_info_flags, + ARRAY_SIZE(egress_err_info_flags)); } static char *send_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - send_err_status_flags, - ARRAY_SIZE(send_err_status_flags)); + send_err_status_flags, + ARRAY_SIZE(send_err_status_flags)); } static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) @@ -5398,7 +5399,7 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) * report or record it. */ dd_dev_info(dd, "CCE Error: %s\n", - cce_err_status_string(buf, sizeof(buf), reg)); + cce_err_status_string(buf, sizeof(buf), reg)); if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { @@ -5428,11 +5429,11 @@ static void update_rcverr_timer(unsigned long opaque) u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && - ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { + ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); - set_link_down_reason(ppd, - OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, - OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); + set_link_down_reason( + ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, + OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); queue_work(ppd->hfi1_wq, &ppd->link_bounce_work); } dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; @@ -5461,7 +5462,7 @@ static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) int i = 0; dd_dev_info(dd, "Receive Error: %s\n", - rxe_err_status_string(buf, sizeof(buf), reg)); + rxe_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_RXE_FREEZE_ERR) { int flags = 0; @@ -5488,7 +5489,7 @@ static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) int i = 0; dd_dev_info(dd, "Misc Error: %s", - misc_err_status_string(buf, sizeof(buf), reg)); + misc_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) incr_cntr64(&dd->misc_err_status_cnt[i]); @@ -5501,7 +5502,7 @@ static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) int i = 0; dd_dev_info(dd, "PIO Error: %s\n", - pio_err_status_string(buf, sizeof(buf), reg)); + pio_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_PIO_FREEZE_ERR) start_freeze_handling(dd->pport, 0); @@ -5518,7 +5519,7 @@ static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) int i = 0; dd_dev_info(dd, "SDMA Error: %s\n", - sdma_err_status_string(buf, sizeof(buf), reg)); + sdma_err_status_string(buf, sizeof(buf), reg)); if (reg & ALL_SDMA_FREEZE_ERR) start_freeze_handling(dd->pport, 0); @@ -5560,8 +5561,8 @@ static void handle_send_egress_err_info(struct hfi1_devdata *dd, write_csr(dd, SEND_EGRESS_ERR_INFO, info); dd_dev_info(dd, - "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", - info, egress_err_info_string(buf, sizeof(buf), info), src); + "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", + info, egress_err_info_string(buf, sizeof(buf), info), src); /* Eventually add other counters for each bit */ if (info & PORT_DISCARD_EGRESS_ERRS) { @@ -5699,7 +5700,7 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) if (reg) dd_dev_info(dd, "Egress Error: %s\n", - egress_err_status_string(buf, sizeof(buf), reg)); + egress_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) @@ -5713,7 +5714,7 @@ static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) int i = 0; dd_dev_info(dd, "Send Error: %s\n", - send_err_status_string(buf, sizeof(buf), reg)); + send_err_status_string(buf, sizeof(buf), reg)); for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { if (reg & (1ull << i)) @@ -5759,7 +5760,7 @@ static void interrupt_clear_down(struct hfi1_devdata *dd, u64 mask; dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", - eri->desc, reg); + eri->desc, reg); /* * Read-modify-write so any other masked bits * remain masked. @@ -5783,14 +5784,15 @@ static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) interrupt_clear_down(dd, 0, eri); } else { dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", - source); + source); } } static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) { return flag_string(buf, buf_len, flags, - sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags)); + sc_err_status_flags, + ARRAY_SIZE(sc_err_status_flags)); } /* @@ -5815,15 +5817,15 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, sw_index = dd->hw_to_sw[hw_context]; if (sw_index >= dd->num_send_contexts) { dd_dev_err(dd, - "out of range sw index %u for send context %u\n", - sw_index, hw_context); + "out of range sw index %u for send context %u\n", + sw_index, hw_context); return; } sci = &dd->send_contexts[sw_index]; sc = sci->sc; if (!sc) { dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, - sw_index, hw_context); + sw_index, hw_context); return; } @@ -5833,7 +5835,8 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd, status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, - send_context_err_status_string(flags, sizeof(flags), status)); + send_context_err_status_string(flags, sizeof(flags), + status)); if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); @@ -5918,8 +5921,8 @@ static void is_various_int(struct hfi1_devdata *dd, unsigned int source) interrupt_clear_down(dd, 0, eri); else dd_dev_info(dd, - "%s: Unimplemented/reserved interrupt %d\n", - __func__, source); + "%s: Unimplemented/reserved interrupt %d\n", + __func__, source); } static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) @@ -5931,7 +5934,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) if (reg & QSFP_HFI0_MODPRST_N) { dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n", - __func__); + __func__); if (!qsfp_mod_present(ppd)) { ppd->driver_link_ready = 0; @@ -5949,7 +5952,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) ppd->qsfp_info.reset_needed = 0; ppd->qsfp_info.limiting_active = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, - flags); + flags); /* Invert the ModPresent pin now to detect plug-in */ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, qsfp_int_mgmt); @@ -5977,7 +5980,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) ppd->qsfp_info.cache_valid = 0; ppd->qsfp_info.cache_refresh_required = 1; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, - flags); + flags); /* * Stop inversion of ModPresent pin to detect @@ -5994,7 +5997,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) if (reg & QSFP_HFI0_INT_N) { dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n", - __func__); + __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 1; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); @@ -6010,11 +6013,11 @@ static int request_host_lcb_access(struct hfi1_devdata *dd) int ret; ret = do_8051_command(dd, HCMD_MISC, - (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT, - NULL); + (u64)HCMD_MISC_REQUEST_LCB_ACCESS << + LOAD_DATA_FIELD_ID_SHIFT, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "%s: command failed with error %d\n", - __func__, ret); + __func__, ret); } return ret == HCMD_SUCCESS ? 0 : -EBUSY; } @@ -6024,11 +6027,11 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd) int ret; ret = do_8051_command(dd, HCMD_MISC, - (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT, - NULL); + (u64)HCMD_MISC_GRANT_LCB_ACCESS << + LOAD_DATA_FIELD_ID_SHIFT, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "%s: command failed with error %d\n", - __func__, ret); + __func__, ret); } return ret == HCMD_SUCCESS ? 0 : -EBUSY; } @@ -6040,8 +6043,8 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd) static inline void set_host_lcb_access(struct hfi1_devdata *dd) { write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, - DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK - | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); + DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | + DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); } /* @@ -6051,7 +6054,7 @@ static inline void set_host_lcb_access(struct hfi1_devdata *dd) static inline void set_8051_lcb_access(struct hfi1_devdata *dd) { write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, - DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); + DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); } /* @@ -6085,7 +6088,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) /* this access is valid only when the link is up */ if ((ppd->host_link_state & HLS_UP) == 0) { dd_dev_info(dd, "%s: link state %s not up\n", - __func__, link_state_name(ppd->host_link_state)); + __func__, link_state_name(ppd->host_link_state)); ret = -EBUSY; goto done; } @@ -6094,8 +6097,8 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) ret = request_host_lcb_access(dd); if (ret) { dd_dev_err(dd, - "%s: unable to acquire LCB access, err %d\n", - __func__, ret); + "%s: unable to acquire LCB access, err %d\n", + __func__, ret); goto done; } set_host_lcb_access(dd); @@ -6132,7 +6135,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) if (dd->lcb_access_count == 0) { dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", - __func__); + __func__); goto done; } @@ -6141,8 +6144,8 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) ret = request_8051_lcb_access(dd); if (ret) { dd_dev_err(dd, - "%s: unable to release LCB access, err %d\n", - __func__, ret); + "%s: unable to release LCB access, err %d\n", + __func__, ret); /* restore host access if the grant didn't work */ set_host_lcb_access(dd); goto done; @@ -6174,9 +6177,10 @@ static void init_lcb_access(struct hfi1_devdata *dd) static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) { write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, - DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK - | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT - | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); + DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | + (u64)return_code << + DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | + (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); } /* @@ -6214,7 +6218,7 @@ void handle_8051_request(struct work_struct *work) case HREQ_SET_TX_EQ_ABS: case HREQ_SET_TX_EQ_REL: dd_dev_info(dd, "8051 request: request 0x%x not supported\n", - type); + type); hreq_response(dd, HREQ_NOT_SUPPORTED, 0); break; @@ -6272,11 +6276,11 @@ static void write_global_credit(struct hfi1_devdata *dd, u8 vau, u16 total, u16 shared) { write_csr(dd, SEND_CM_GLOBAL_CREDIT, - ((u64)total - << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) - | ((u64)shared - << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) - | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); + ((u64)total << + SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | + ((u64)shared << + SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | + ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); } /* @@ -6355,14 +6359,13 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort) write_csr(dd, DC_LCB_CFG_RUN, 0); /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, - 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); + 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); reg = read_csr(dd, DCC_CFG_RESET); - write_csr(dd, DCC_CFG_RESET, - reg - | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) - | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT)); + write_csr(dd, DCC_CFG_RESET, reg | + (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) | + (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT)); (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ if (!abort) { udelay(1); /* must hold for the longer of 16cclks or 20ns */ @@ -6419,7 +6422,7 @@ static void dc_start(struct hfi1_devdata *dd) ret = wait_fm_ready(dd, TIMEOUT_8051_START); if (ret) { dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", - __func__); + __func__); } /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ write_csr(dd, DCC_CFG_RESET, 0x10); @@ -6512,7 +6515,7 @@ static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, - DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); + DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); } @@ -6571,8 +6574,8 @@ void handle_sma_message(struct work_struct *work) break; default: dd_dev_err(dd, - "%s: received unexpected SMA idle message 0x%llx\n", - __func__, msg); + "%s: received unexpected SMA idle message 0x%llx\n", + __func__, msg); break; } } @@ -6664,10 +6667,9 @@ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) if (time_after(jiffies, timeout)) { dd_dev_err(dd, - "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", - freeze ? "" : "un", - reg & ALL_FROZE, - freeze ? ALL_FROZE : 0ull); + "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", + freeze ? "" : "un", reg & ALL_FROZE, + freeze ? ALL_FROZE : 0ull); return; } usleep_range(80, 120); @@ -6792,7 +6794,7 @@ void handle_freeze(struct work_struct *work) void handle_link_up(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, - link_up_work); + link_up_work); set_link_state(ppd, HLS_UP_INIT); /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ @@ -6811,10 +6813,10 @@ void handle_link_up(struct work_struct *work) if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { /* oops - current speed is not enabled, bounce */ dd_dev_err(ppd->dd, - "Link speed active 0x%x is outside enabled 0x%x, downing link\n", - ppd->link_speed_active, ppd->link_speed_enabled); + "Link speed active 0x%x is outside enabled 0x%x, downing link\n", + ppd->link_speed_active, ppd->link_speed_enabled); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, - OPA_LINKDOWN_REASON_SPEED_POLICY); + OPA_LINKDOWN_REASON_SPEED_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); tune_serdes(ppd); start_link(ppd); @@ -6896,7 +6898,7 @@ void handle_link_bounce(struct work_struct *work) start_link(ppd); } else { dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", - __func__, link_state_name(ppd->host_link_state)); + __func__, link_state_name(ppd->host_link_state)); } } @@ -6993,7 +6995,7 @@ static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) case 3: return OPA_LINK_WIDTH_3X; default: dd_dev_info(dd, "%s: invalid width %d, using 4\n", - __func__, width); + __func__, width); /* fall through */ case 4: return OPA_LINK_WIDTH_4X; } @@ -7031,7 +7033,7 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, /* read the active lanes */ read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, - &rx_polarity_inversion, &max_rate); + &rx_polarity_inversion, &max_rate); read_local_lni(dd, &enable_lane_rx); /* convert to counts */ @@ -7052,8 +7054,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, break; default: dd_dev_err(dd, - "%s: unexpected max rate %d, using 25Gb\n", - __func__, (int)max_rate); + "%s: unexpected max rate %d, using 25Gb\n", + __func__, (int)max_rate); /* fall through */ case 1: dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; @@ -7062,8 +7064,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, } dd_dev_info(dd, - "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", - enable_lane_tx, tx, enable_lane_rx, rx); + "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", + enable_lane_tx, tx, enable_lane_rx, rx); *tx_width = link_width_to_bits(dd, tx); *rx_width = link_width_to_bits(dd, rx); } @@ -7166,13 +7168,8 @@ void handle_verify_cap(struct work_struct *work) */ read_vc_remote_phy(dd, &power_management, &continious); - read_vc_remote_fabric( - dd, - &vau, - &z, - &vcu, - &vl15buf, - &partner_supported_crc); + read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, + &partner_supported_crc); read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); read_remote_device_id(dd, &device_id, &device_rev); /* @@ -7183,19 +7180,16 @@ void handle_verify_cap(struct work_struct *work) /* print the active widths */ get_link_widths(dd, &active_tx, &active_rx); dd_dev_info(dd, - "Peer PHY: power management 0x%x, continuous updates 0x%x\n", - (int)power_management, (int)continious); + "Peer PHY: power management 0x%x, continuous updates 0x%x\n", + (int)power_management, (int)continious); dd_dev_info(dd, - "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", - (int)vau, - (int)z, - (int)vcu, - (int)vl15buf, - (int)partner_supported_crc); + "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", + (int)vau, (int)z, (int)vcu, (int)vl15buf, + (int)partner_supported_crc); dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", - (u32)remote_tx_rate, (u32)link_widths); + (u32)remote_tx_rate, (u32)link_widths); dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", - (u32)device_id, (u32)device_rev); + (u32)device_id, (u32)device_rev); /* * The peer vAU value just read is the peer receiver value. HFI does * not support a transmit vAU of 0 (AU == 8). We advertised that @@ -7230,10 +7224,10 @@ void handle_verify_cap(struct work_struct *work) reg = read_csr(dd, SEND_CM_CTRL); if (crc_val == LCB_CRC_14B && crc_14b_sideband) { write_csr(dd, SEND_CM_CTRL, - reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); + reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); } else { write_csr(dd, SEND_CM_CTRL, - reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); + reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); } ppd->link_speed_active = 0; /* invalid value */ @@ -7258,7 +7252,7 @@ void handle_verify_cap(struct work_struct *work) } if (ppd->link_speed_active == 0) { dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", - __func__, (int)remote_tx_rate); + __func__, (int)remote_tx_rate); ppd->link_speed_active = OPA_LINK_SPEED_25G; } @@ -7314,9 +7308,9 @@ void handle_verify_cap(struct work_struct *work) read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) & DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK; dd_dev_info(dd, - "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n", - ppd->neighbor_guid, ppd->neighbor_type, - ppd->mgmt_allowed, ppd->neighbor_fm_security); + "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n", + ppd->neighbor_guid, ppd->neighbor_type, + ppd->mgmt_allowed, ppd->neighbor_fm_security); if (ppd->mgmt_allowed) add_full_mgmt_pkey(ppd); @@ -7369,27 +7363,28 @@ retry: /* downgrade is disabled */ /* bounce if not at starting active width */ - if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) || - (ppd->link_width_active != ppd->link_width_downgrade_rx_active)) { + if ((ppd->link_width_active != + ppd->link_width_downgrade_tx_active) || + (ppd->link_width_active != + ppd->link_width_downgrade_rx_active)) { dd_dev_err(ppd->dd, - "Link downgrade is disabled and link has downgraded, downing link\n"); + "Link downgrade is disabled and link has downgraded, downing link\n"); dd_dev_err(ppd->dd, - " original 0x%x, tx active 0x%x, rx active 0x%x\n", - ppd->link_width_active, - ppd->link_width_downgrade_tx_active, - ppd->link_width_downgrade_rx_active); + " original 0x%x, tx active 0x%x, rx active 0x%x\n", + ppd->link_width_active, + ppd->link_width_downgrade_tx_active, + ppd->link_width_downgrade_rx_active); do_bounce = 1; } } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || (lwde & ppd->link_width_downgrade_rx_active) == 0) { /* Tx or Rx is outside the enabled policy */ dd_dev_err(ppd->dd, - "Link is outside of downgrade allowed, downing link\n"); + "Link is outside of downgrade allowed, downing link\n"); dd_dev_err(ppd->dd, - " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", - lwde, - ppd->link_width_downgrade_tx_active, - ppd->link_width_downgrade_rx_active); + " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", + lwde, ppd->link_width_downgrade_tx_active, + ppd->link_width_downgrade_rx_active); do_bounce = 1; } @@ -7398,7 +7393,7 @@ done: if (do_bounce) { set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, - OPA_LINKDOWN_REASON_WIDTH_POLICY); + OPA_LINKDOWN_REASON_WIDTH_POLICY); set_link_state(ppd, HLS_DN_OFFLINE); tune_serdes(ppd); start_link(ppd); @@ -7481,9 +7476,10 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { queue_link_down = 1; dd_dev_info(dd, "Link error: %s\n", - dc8051_info_err_string(buf, - sizeof(buf), - err & FAILED_LNI)); + dc8051_info_err_string(buf, + sizeof(buf), + err & + FAILED_LNI)); } err &= ~(u64)FAILED_LNI; } @@ -7495,7 +7491,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) if (err) { /* report remaining errors, but do not do anything */ dd_dev_err(dd, "8051 info error: %s\n", - dc8051_info_err_string(buf, sizeof(buf), err)); + dc8051_info_err_string(buf, sizeof(buf), + err)); } /* @@ -7548,8 +7545,9 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) if (host_msg) { /* report remaining messages, but do not do anything */ dd_dev_info(dd, "8051 info host message: %s\n", - dc8051_info_host_msg_string(buf, sizeof(buf), - host_msg)); + dc8051_info_host_msg_string(buf, + sizeof(buf), + host_msg)); } reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; @@ -7562,15 +7560,15 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) */ dd_dev_err(dd, "Lost 8051 heartbeat\n"); write_csr(dd, DC_DC8051_ERR_EN, - read_csr(dd, DC_DC8051_ERR_EN) - & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); + read_csr(dd, DC_DC8051_ERR_EN) & + ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; } if (reg) { /* report the error, but do not do anything */ dd_dev_err(dd, "8051 error: %s\n", - dc8051_err_string(buf, sizeof(buf), reg)); + dc8051_err_string(buf, sizeof(buf), reg)); } if (queue_link_down) { @@ -7582,7 +7580,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || ppd->link_enabled == 0) { dd_dev_info(dd, "%s: not queuing link down\n", - __func__); + __func__); } else { queue_work(ppd->hfi1_wq, &ppd->link_down_work); } @@ -7760,7 +7758,7 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) /* just report this */ dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra); dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n", - hdr0, hdr1); + hdr0, hdr1); reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; } @@ -7779,7 +7777,7 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) /* report any remaining errors */ if (reg) dd_dev_info(dd, "DCC Error: %s\n", - dcc_err_string(buf, sizeof(buf), reg)); + dcc_err_string(buf, sizeof(buf), reg)); if (lcl_reason == 0) lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; @@ -7796,7 +7794,7 @@ static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) char buf[96]; dd_dev_info(dd, "LCB Error: %s\n", - lcb_err_string(buf, sizeof(buf), reg)); + lcb_err_string(buf, sizeof(buf), reg)); } /* @@ -7886,7 +7884,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) err_detail = "out of range"; } dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", - err_detail, source); + err_detail, source); } /* @@ -7912,7 +7910,7 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) err_detail = "out of range"; } dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", - err_detail, source); + err_detail, source); } /* @@ -7923,7 +7921,7 @@ static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) char name[64]; dd_dev_err(dd, "unexpected %s interrupt\n", - is_reserved_name(name, sizeof(name), source)); + is_reserved_name(name, sizeof(name), source)); } static const struct is_table is_table[] = { @@ -8001,7 +7999,7 @@ static irqreturn_t general_interrupt(int irq, void *data) /* phase 2: call the appropriate handler */ for_each_set_bit(bit, (unsigned long *)®s[0], - CCE_NUM_INT_CSRS * 64) { + CCE_NUM_INT_CSRS * 64) { is_interrupt(dd, bit); } @@ -8024,19 +8022,19 @@ static irqreturn_t sdma_interrupt(int irq, void *data) /* This read_csr is really bad in the hot path */ status = read_csr(dd, - CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) - & sde->imask; + CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) + & sde->imask; if (likely(status)) { /* clear the interrupt(s) */ write_csr(dd, - CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), - status); + CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), + status); /* handle the interrupt(s) */ sdma_engine_interrupt(sde, status); } else dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n", - sde->this_idx); + sde->this_idx); return IRQ_HANDLED; } @@ -8436,8 +8434,8 @@ int load_8051_config(struct hfi1_devdata *dd, u8 field_id, ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, - "load 8051 config: field id %d, lane %d, err %d\n", - (int)field_id, (int)lane_id, ret); + "load 8051 config: field id %d, lane %d, err %d\n", + (int)field_id, (int)lane_id, ret); } return ret; } @@ -8474,7 +8472,7 @@ int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, } else { *result = 0; dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", - __func__, lane_id, field_id); + __func__, lane_id, field_id); } return ret; @@ -8511,7 +8509,7 @@ static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits, u32 frame; read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG, - &frame); + &frame); *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; @@ -8593,7 +8591,7 @@ static void read_vc_remote_link_width(struct hfi1_devdata *dd, u32 frame; read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, - &frame); + &frame); *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) & REMOTE_TX_RATE_MASK; *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; @@ -8633,7 +8631,7 @@ void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) *link_quality = 0; if (dd->pport->host_link_state & HLS_UP) { ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, - &frame); + &frame); if (ret == 0) *link_quality = (frame >> LINK_QUALITY_SHIFT) & LINK_QUALITY_MASK; @@ -8693,10 +8691,9 @@ static void check_fabric_firmware_versions(struct hfi1_devdata *dd) for (lane = 0; lane < 4; lane++) { ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame); if (ret) { - dd_dev_err( - dd, - "Unable to read lane %d firmware details\n", - lane); + dd_dev_err(dd, + "Unable to read lane %d firmware details\n", + lane); continue; } version = (frame >> SPICO_ROM_VERSION_SHIFT) @@ -8704,8 +8701,8 @@ static void check_fabric_firmware_versions(struct hfi1_devdata *dd) prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT) & SPICO_ROM_PROD_ID_MASK; dd_dev_info(dd, - "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n", - lane, version, prod_id); + "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n", + lane, version, prod_id); } } @@ -8718,11 +8715,10 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) { int ret; - ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, - type, data_out); + ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "read idle message: type %d, err %d\n", - (u32)type, ret); + (u32)type, ret); return -EINVAL; } dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); @@ -8739,8 +8735,8 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) */ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) { - return read_idle_message(dd, - (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data); + return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, + data); } /* @@ -8756,7 +8752,7 @@ static int send_idle_message(struct hfi1_devdata *dd, u64 data) ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", - data, ret); + data, ret); return -EINVAL; } return 0; @@ -8771,8 +8767,8 @@ int send_idle_sma(struct hfi1_devdata *dd, u64 message) { u64 data; - data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) - | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); + data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | + ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); return send_idle_message(dd, data); } @@ -8794,7 +8790,7 @@ static int do_quick_linkup(struct hfi1_devdata *dd) /* LCB_CFG_LOOPBACK.VAL = 2 */ /* LCB_CFG_LANE_WIDTH.VAL = 0 */ write_csr(dd, DC_LCB_CFG_LOOPBACK, - IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); + IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); } @@ -8806,25 +8802,24 @@ static int do_quick_linkup(struct hfi1_devdata *dd) if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { /* LCB_CFG_RUN.EN = 1 */ write_csr(dd, DC_LCB_CFG_RUN, - 1ull << DC_LCB_CFG_RUN_EN_SHIFT); + 1ull << DC_LCB_CFG_RUN_EN_SHIFT); /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ timeout = jiffies + msecs_to_jiffies(10); while (1) { - reg = read_csr(dd, - DC_LCB_STS_LINK_TRANSFER_ACTIVE); + reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); if (reg) break; if (time_after(jiffies, timeout)) { dd_dev_err(dd, - "timeout waiting for LINK_TRANSFER_ACTIVE\n"); + "timeout waiting for LINK_TRANSFER_ACTIVE\n"); return -ETIMEDOUT; } udelay(2); } write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, - 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); + 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); } if (!loopback) { @@ -8836,10 +8831,9 @@ static int do_quick_linkup(struct hfi1_devdata *dd) * done with LCB set up before resuming. */ dd_dev_err(dd, - "Pausing for peer to be finished with LCB set up\n"); + "Pausing for peer to be finished with LCB set up\n"); msleep(5000); - dd_dev_err(dd, - "Continuing with quick linkup\n"); + dd_dev_err(dd, "Continuing with quick linkup\n"); } write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ @@ -8853,8 +8847,8 @@ static int do_quick_linkup(struct hfi1_devdata *dd) ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, - "%s: set physical link state to quick LinkUp failed with return %d\n", - __func__, ret); + "%s: set physical link state to quick LinkUp failed with return %d\n", + __func__, ret); set_host_lcb_access(dd); write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ @@ -8879,8 +8873,8 @@ static int set_serdes_loopback_mode(struct hfi1_devdata *dd) if (ret == HCMD_SUCCESS) return 0; dd_dev_err(dd, - "Set physical link state to SerDes Loopback failed with return %d\n", - ret); + "Set physical link state to SerDes Loopback failed with return %d\n", + ret); if (ret >= 0) ret = -EINVAL; return ret; @@ -8895,7 +8889,7 @@ static int init_loopback(struct hfi1_devdata *dd) /* all loopbacks should disable self GUID check */ write_csr(dd, DC_DC8051_CFG_MODE, - (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); + (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); /* * The simulator has only one loopback option - LCB. Switch @@ -8926,7 +8920,7 @@ static int init_loopback(struct hfi1_devdata *dd) /* not supported in emulation due to emulation RTL changes */ if (dd->icode == ICODE_FPGA_EMULATION) { dd_dev_err(dd, - "LCB loopback not supported in emulation\n"); + "LCB loopback not supported in emulation\n"); return -EINVAL; } return 0; @@ -8982,7 +8976,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd) /* set the local tx rate - need to read-modify-write */ ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, - &rx_polarity_inversion, &ppd->local_tx_rate); + &rx_polarity_inversion, &ppd->local_tx_rate); if (ret) goto set_local_link_attributes_fail; @@ -9003,15 +8997,16 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd) enable_lane_tx = 0xF; /* enable all four lanes */ ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, - rx_polarity_inversion, ppd->local_tx_rate); + rx_polarity_inversion, ppd->local_tx_rate); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; /* * DC supports continuous updates. */ - ret = write_vc_local_phy(dd, 0 /* no power management */, - 1 /* continuous updates */); + ret = write_vc_local_phy(dd, + 0 /* no power management */, + 1 /* continuous updates */); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; @@ -9022,7 +9017,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd) goto set_local_link_attributes_fail; ret = write_vc_local_link_width(dd, 0, 0, - opa_to_vc_link_widths(ppd->link_width_enabled)); + opa_to_vc_link_widths( + ppd->link_width_enabled)); if (ret != HCMD_SUCCESS) goto set_local_link_attributes_fail; @@ -9033,8 +9029,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd) set_local_link_attributes_fail: dd_dev_err(dd, - "Failed to set local link attributes, return 0x%x\n", - ret); + "Failed to set local link attributes, return 0x%x\n", + ret); return ret; } @@ -9047,25 +9043,25 @@ int start_link(struct hfi1_pportdata *ppd) { if (!ppd->link_enabled) { dd_dev_info(ppd->dd, - "%s: stopping link start because link is disabled\n", - __func__); + "%s: stopping link start because link is disabled\n", + __func__); return 0; } if (!ppd->driver_link_ready) { dd_dev_info(ppd->dd, - "%s: stopping link start because driver is not ready\n", - __func__); + "%s: stopping link start because driver is not ready\n", + __func__); return 0; } if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES || - loopback == LOOPBACK_LCB || - ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) + loopback == LOOPBACK_LCB || + ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) return set_link_state(ppd, HLS_DN_POLL); dd_dev_info(ppd->dd, - "%s: stopping link start because no cable is present\n", - __func__); + "%s: stopping link start because no cable is present\n", + __func__); return -EAGAIN; } @@ -9121,20 +9117,19 @@ void reset_qsfp(struct hfi1_pportdata *ppd) mask = (u64)QSFP_HFI0_RESET_N; qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE); qsfp_mask |= mask; - write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask); + write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask); - qsfp_mask = read_csr(dd, dd->hfi1_id ? - ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); + qsfp_mask = read_csr(dd, + dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); qsfp_mask &= ~mask; write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); + dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); udelay(10); qsfp_mask |= mask; write_csr(dd, - dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); + dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); wait_for_qsfp_init(ppd); @@ -9151,102 +9146,86 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, struct hfi1_devdata *dd = ppd->dd; if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || - (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) - dd_dev_info(dd, - "%s: QSFP cable on fire\n", - __func__); + (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) + dd_dev_info(dd, "%s: QSFP cable on fire\n", + __func__); if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || - (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) - dd_dev_info(dd, - "%s: QSFP cable temperature too low\n", - __func__); + (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) + dd_dev_info(dd, "%s: QSFP cable temperature too low\n", + __func__); if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || - (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) - dd_dev_info(dd, - "%s: QSFP supply voltage too high\n", - __func__); + (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) + dd_dev_info(dd, "%s: QSFP supply voltage too high\n", + __func__); if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || - (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) - dd_dev_info(dd, - "%s: QSFP supply voltage too low\n", - __func__); + (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) + dd_dev_info(dd, "%s: QSFP supply voltage too low\n", + __func__); /* Byte 2 is vendor specific */ if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || - (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable RX channel 1/2 power too high\n", - __func__); + (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n", + __func__); if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || - (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable RX channel 1/2 power too low\n", - __func__); + (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n", + __func__); if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || - (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable RX channel 3/4 power too high\n", - __func__); + (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n", + __func__); if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || - (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable RX channel 3/4 power too low\n", - __func__); + (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n", + __func__); if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || - (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 1/2 bias too high\n", - __func__); + (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n", + __func__); if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || - (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 1/2 bias too low\n", - __func__); + (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n", + __func__); if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || - (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 3/4 bias too high\n", - __func__); + (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n", + __func__); if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || - (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 3/4 bias too low\n", - __func__); + (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n", + __func__); if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || - (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 1/2 power too high\n", - __func__); + (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n", + __func__); if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || - (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 1/2 power too low\n", - __func__); + (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n", + __func__); if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || - (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 3/4 power too high\n", - __func__); + (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n", + __func__); if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || - (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) - dd_dev_info(dd, - "%s: Cable TX channel 3/4 power too low\n", - __func__); + (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) + dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n", + __func__); /* Bytes 9-10 and 11-12 are reserved */ /* Bytes 13-15 are vendor specific */ @@ -9298,8 +9277,8 @@ void qsfp_event(struct work_struct *work) if (qsfp_read(ppd, dd->hfi1_id, 6, &qsfp_interrupt_status[0], 16) != 16) { dd_dev_info(dd, - "%s: Failed to read status of QSFP module\n", - __func__); + "%s: Failed to read status of QSFP module\n", + __func__); } else { unsigned long flags; @@ -9308,7 +9287,7 @@ void qsfp_event(struct work_struct *work) spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 0; spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, - flags); + flags); } } } @@ -9430,7 +9409,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0, - OPA_LINKDOWN_REASON_SMA_DISABLED); + OPA_LINKDOWN_REASON_SMA_DISABLED); set_link_state(ppd, HLS_DN_OFFLINE); /* disable the port */ @@ -9486,8 +9465,8 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, pa = 0; } else if (type > PT_INVALID) { dd_dev_err(dd, - "unexpected receive array type %u for index %u, not handled\n", - type, index); + "unexpected receive array type %u for index %u, not handled\n", + type, index); goto done; } @@ -9702,12 +9681,15 @@ static void set_send_length(struct hfi1_pportdata *ppd) /* all kernel receive contexts have the same hdrqentsize */ for (i = 0; i < ppd->vls_supported; i++) { sc_set_cr_threshold(dd->vld[i].sc, - sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu, - dd->rcd[0]->rcvhdrqentsize)); + sc_mtu_to_threshold(dd->vld[i].sc, + dd->vld[i].mtu, + dd->rcd[0]-> + rcvhdrqentsize)); } sc_set_cr_threshold(dd->vld[15].sc, - sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu, - dd->rcd[0]->rcvhdrqentsize)); + sc_mtu_to_threshold(dd->vld[15].sc, + dd->vld[15].mtu, + dd->rcd[0]->rcvhdrqentsize)); /* Adjust maximum MTU for the port in DC */ dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : @@ -9768,8 +9750,8 @@ static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs) break; if (time_after(jiffies, timeout)) { dd_dev_err(dd, - "timeout waiting for phy link state 0x%x, current state is 0x%x\n", - state, curr_state); + "timeout waiting for phy link state 0x%x, current state is 0x%x\n", + state, curr_state); return -ETIMEDOUT; } usleep_range(1950, 2050); /* sleep 2ms-ish */ @@ -9812,12 +9794,12 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) if (do_transition) { ret = set_physical_link_state(dd, - PLS_OFFLINE | (rem_reason << 8)); + PLS_OFFLINE | (rem_reason << 8)); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, - "Failed to transition to Offline link state, return %d\n", - ret); + "Failed to transition to Offline link state, return %d\n", + ret); return -EINVAL; } if (ppd->offline_disabled_reason == @@ -9862,7 +9844,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) ret = wait_fm_ready(dd, 7000); if (ret) { dd_dev_err(dd, - "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); + "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); /* state is really offline, so make it so */ ppd->host_link_state = HLS_DN_OFFLINE; return ret; @@ -9885,8 +9867,8 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) read_last_local_state(dd, &last_local_state); read_last_remote_state(dd, &last_remote_state); dd_dev_err(dd, - "LNI failure last states: local 0x%08x, remote 0x%08x\n", - last_local_state, last_remote_state); + "LNI failure last states: local 0x%08x, remote 0x%08x\n", + last_local_state, last_remote_state); } /* the active link width (downgrade) is 0 on link down */ @@ -10038,10 +10020,10 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) state == HLS_DN_POLL; dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, - link_state_name(ppd->host_link_state), - link_state_name(orig_new_state), - poll_bounce ? "(bounce) " : "", - link_state_reason_name(ppd, state)); + link_state_name(ppd->host_link_state), + link_state_name(orig_new_state), + poll_bounce ? "(bounce) " : "", + link_state_reason_name(ppd, state)); was_up = !!(ppd->host_link_state & HLS_UP); @@ -10071,7 +10053,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) * simulator jumps from polling to link up. * Accept that here. */ - /* OK */; + /* OK */ } else if (ppd->host_link_state != HLS_GOING_UP) { goto unexpected; } @@ -10082,8 +10064,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) /* logical state didn't change, stay at going_up */ ppd->host_link_state = HLS_GOING_UP; dd_dev_err(dd, - "%s: logical state did not change to INIT\n", - __func__); + "%s: logical state did not change to INIT\n", + __func__); } else { /* clear old transient LINKINIT_REASON code */ if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) @@ -10107,8 +10089,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) /* logical state didn't change, stay at init */ ppd->host_link_state = HLS_UP_INIT; dd_dev_err(dd, - "%s: logical state did not change to ARMED\n", - __func__); + "%s: logical state did not change to ARMED\n", + __func__); } /* * The simulator does not currently implement SMA messages, @@ -10129,8 +10111,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) /* logical state didn't change, stay at armed */ ppd->host_link_state = HLS_UP_ARMED; dd_dev_err(dd, - "%s: logical state did not change to ACTIVE\n", - __func__); + "%s: logical state did not change to ACTIVE\n", + __func__); } else { /* tell all engines to go running */ sdma_all_running(dd); @@ -10178,8 +10160,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret1 = set_physical_link_state(dd, PLS_POLLING); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, - "Failed to transition to Polling link state, return 0x%x\n", - ret1); + "Failed to transition to Polling link state, return 0x%x\n", + ret1); ret = -EINVAL; } } @@ -10209,8 +10191,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret1 = set_physical_link_state(dd, PLS_DISABLED); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, - "Failed to transition to Disabled link state, return 0x%x\n", - ret1); + "Failed to transition to Disabled link state, return 0x%x\n", + ret1); ret = -EINVAL; break; } @@ -10238,8 +10220,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) ret1 = set_physical_link_state(dd, PLS_LINKUP); if (ret1 != HCMD_SUCCESS) { dd_dev_err(dd, - "Failed to transition to link up state, return 0x%x\n", - ret1); + "Failed to transition to link up state, return 0x%x\n", + ret1); ret = -EINVAL; break; } @@ -10250,7 +10232,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ default: dd_dev_info(dd, "%s: state 0x%x: not supported\n", - __func__, state); + __func__, state); ret = -EINVAL; break; } @@ -10270,8 +10252,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) unexpected: dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", - __func__, link_state_name(ppd->host_link_state), - link_state_name(state)); + __func__, link_state_name(ppd->host_link_state), + link_state_name(state)); ret = -EINVAL; done: @@ -10359,8 +10341,8 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) default: if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) dd_dev_info(ppd->dd, - "%s: which %s, val 0x%x: not implemented\n", - __func__, ib_cfg_name(which), val); + "%s: which %s, val 0x%x: not implemented\n", + __func__, ib_cfg_name(which), val); break; } return ret; @@ -10569,41 +10551,41 @@ static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) { write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, - DC_SC_VL_VAL(15_0, - 0, dp->vlnt[0] & 0xf, - 1, dp->vlnt[1] & 0xf, - 2, dp->vlnt[2] & 0xf, - 3, dp->vlnt[3] & 0xf, - 4, dp->vlnt[4] & 0xf, - 5, dp->vlnt[5] & 0xf, - 6, dp->vlnt[6] & 0xf, - 7, dp->vlnt[7] & 0xf, - 8, dp->vlnt[8] & 0xf, - 9, dp->vlnt[9] & 0xf, - 10, dp->vlnt[10] & 0xf, - 11, dp->vlnt[11] & 0xf, - 12, dp->vlnt[12] & 0xf, - 13, dp->vlnt[13] & 0xf, - 14, dp->vlnt[14] & 0xf, - 15, dp->vlnt[15] & 0xf)); + DC_SC_VL_VAL(15_0, + 0, dp->vlnt[0] & 0xf, + 1, dp->vlnt[1] & 0xf, + 2, dp->vlnt[2] & 0xf, + 3, dp->vlnt[3] & 0xf, + 4, dp->vlnt[4] & 0xf, + 5, dp->vlnt[5] & 0xf, + 6, dp->vlnt[6] & 0xf, + 7, dp->vlnt[7] & 0xf, + 8, dp->vlnt[8] & 0xf, + 9, dp->vlnt[9] & 0xf, + 10, dp->vlnt[10] & 0xf, + 11, dp->vlnt[11] & 0xf, + 12, dp->vlnt[12] & 0xf, + 13, dp->vlnt[13] & 0xf, + 14, dp->vlnt[14] & 0xf, + 15, dp->vlnt[15] & 0xf)); write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, - DC_SC_VL_VAL(31_16, - 16, dp->vlnt[16] & 0xf, - 17, dp->vlnt[17] & 0xf, - 18, dp->vlnt[18] & 0xf, - 19, dp->vlnt[19] & 0xf, - 20, dp->vlnt[20] & 0xf, - 21, dp->vlnt[21] & 0xf, - 22, dp->vlnt[22] & 0xf, - 23, dp->vlnt[23] & 0xf, - 24, dp->vlnt[24] & 0xf, - 25, dp->vlnt[25] & 0xf, - 26, dp->vlnt[26] & 0xf, - 27, dp->vlnt[27] & 0xf, - 28, dp->vlnt[28] & 0xf, - 29, dp->vlnt[29] & 0xf, - 30, dp->vlnt[30] & 0xf, - 31, dp->vlnt[31] & 0xf)); + DC_SC_VL_VAL(31_16, + 16, dp->vlnt[16] & 0xf, + 17, dp->vlnt[17] & 0xf, + 18, dp->vlnt[18] & 0xf, + 19, dp->vlnt[19] & 0xf, + 20, dp->vlnt[20] & 0xf, + 21, dp->vlnt[21] & 0xf, + 22, dp->vlnt[22] & 0xf, + 23, dp->vlnt[23] & 0xf, + 24, dp->vlnt[24] & 0xf, + 25, dp->vlnt[25] & 0xf, + 26, dp->vlnt[26] & 0xf, + 27, dp->vlnt[27] & 0xf, + 28, dp->vlnt[28] & 0xf, + 29, dp->vlnt[29] & 0xf, + 30, dp->vlnt[30] & 0xf, + 31, dp->vlnt[31] & 0xf)); } static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, @@ -10611,7 +10593,7 @@ static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, { if (limit != 0) dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", - what, (int)limit, idx); + what, (int)limit, idx); } /* change only the shared limit portion of SendCmGLobalCredit */ @@ -10689,14 +10671,14 @@ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, } dd_dev_err(dd, - "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", - which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); + "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", + which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); /* * If this occurs, it is likely there was a credit loss on the link. * The only recovery from that is a link bounce. */ dd_dev_err(dd, - "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); + "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); } /* @@ -10763,9 +10745,9 @@ int set_buffer_control(struct hfi1_pportdata *ppd, continue; } nonzero_msg(dd, i, "dedicated", - be16_to_cpu(new_bc->vl[i].dedicated)); + be16_to_cpu(new_bc->vl[i].dedicated)); nonzero_msg(dd, i, "shared", - be16_to_cpu(new_bc->vl[i].shared)); + be16_to_cpu(new_bc->vl[i].shared)); new_bc->vl[i].dedicated = 0; new_bc->vl[i].shared = 0; } @@ -10836,7 +10818,7 @@ int set_buffer_control(struct hfi1_pportdata *ppd, } wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, - "shared"); + "shared"); if (change_count > 0) { for (i = 0; i < NUM_USABLE_VLS; i++) { @@ -10845,7 +10827,8 @@ int set_buffer_control(struct hfi1_pportdata *ppd, if (lowering_dedicated[i]) { set_vl_dedicated(dd, i, - be16_to_cpu(new_bc->vl[i].dedicated)); + be16_to_cpu(new_bc-> + vl[i].dedicated)); cur_bc.vl[i].dedicated = new_bc->vl[i].dedicated; } @@ -10861,7 +10844,8 @@ int set_buffer_control(struct hfi1_pportdata *ppd, if (be16_to_cpu(new_bc->vl[i].dedicated) > be16_to_cpu(cur_bc.vl[i].dedicated)) set_vl_dedicated(dd, i, - be16_to_cpu(new_bc->vl[i].dedicated)); + be16_to_cpu(new_bc-> + vl[i].dedicated)); } } @@ -10877,9 +10861,9 @@ int set_buffer_control(struct hfi1_pportdata *ppd, /* finally raise the global shared */ if (be16_to_cpu(new_bc->overall_shared_limit) > - be16_to_cpu(cur_bc.overall_shared_limit)) + be16_to_cpu(cur_bc.overall_shared_limit)) set_global_shared(dd, - be16_to_cpu(new_bc->overall_shared_limit)); + be16_to_cpu(new_bc->overall_shared_limit)); /* bracket the credit change with a total adjustment */ if (new_total < cur_total) @@ -11152,7 +11136,8 @@ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) * been verified to be in range */ write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, - (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); + (u64)timeout << + RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); } void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, @@ -11370,14 +11355,14 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); if (reg != 0) { dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", - ctxt, reg); + ctxt, reg); read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", - ctxt, reg, reg == 0 ? "not" : "still"); + ctxt, reg, reg == 0 ? "not" : "still"); } } @@ -11388,7 +11373,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) */ /* set interrupt timeout */ write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, - (u64)rcd->rcvavail_timeout << + (u64)rcd->rcvavail_timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ @@ -11863,8 +11848,7 @@ static int init_cntrs(struct hfi1_devdata *dd) dev_cntrs[i].offset = dd->ndevcntrs; for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", - dev_cntrs[i].name, - vl_from_idx(j)); + dev_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); /* Add ",32" for 32-bit counters */ if (dev_cntrs[i].flags & CNTR_32BIT) @@ -11987,8 +11971,7 @@ static int init_cntrs(struct hfi1_devdata *dd) port_cntrs[i].offset = dd->nportcntrs; for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", - port_cntrs[i].name, - vl_from_idx(j)); + port_cntrs[i].name, vl_from_idx(j)); sz += strlen(name); /* Add ",32" for 32-bit counters */ if (port_cntrs[i].flags & CNTR_32BIT) @@ -12021,8 +12004,7 @@ static int init_cntrs(struct hfi1_devdata *dd) if (port_cntrs[i].flags & CNTR_VL) { for (j = 0; j < C_VL_COUNT; j++) { snprintf(name, C_MAX_NAME, "%s%d", - port_cntrs[i].name, - vl_from_idx(j)); + port_cntrs[i].name, vl_from_idx(j)); memcpy(p, name, strlen(name)); p += strlen(name); @@ -12077,8 +12059,8 @@ static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) switch (chip_lstate) { default: dd_dev_err(dd, - "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", - chip_lstate); + "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", + chip_lstate); /* fall through */ case LSTATE_DOWN: return IB_PORT_DOWN; @@ -12097,7 +12079,7 @@ u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) switch (chip_pstate & 0xf0) { default: dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", - chip_pstate); + chip_pstate); /* fall through */ case PLS_DISABLED: return IB_PORTPHYSSTATE_DISABLED; @@ -12163,7 +12145,7 @@ u32 get_logical_state(struct hfi1_pportdata *ppd) new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd)); if (new_state != ppd->lstate) { dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", - opa_lstate_name(new_state), new_state); + opa_lstate_name(new_state), new_state); ppd->lstate = new_state; } /* @@ -12229,9 +12211,9 @@ u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd) ib_pstate = chip_to_opa_pstate(ppd->dd, pstate); if (ppd->last_pstate != ib_pstate) { dd_dev_info(ppd->dd, - "%s: physical state changed to %s (0x%x), phy 0x%x\n", - __func__, opa_pstate_name(ib_pstate), ib_pstate, - pstate); + "%s: physical state changed to %s (0x%x), phy 0x%x\n", + __func__, opa_pstate_name(ib_pstate), ib_pstate, + pstate); ppd->last_pstate = ib_pstate; } return ib_pstate; @@ -12449,11 +12431,11 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd, * SDMAIdle */ remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine, - msix_intr); + msix_intr); remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine, - msix_intr); + msix_intr); remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine, - msix_intr); + msix_intr); } static int request_intx_irq(struct hfi1_devdata *dd) @@ -12463,10 +12445,10 @@ static int request_intx_irq(struct hfi1_devdata *dd) snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d", dd->unit); ret = request_irq(dd->pcidev->irq, general_interrupt, - IRQF_SHARED, dd->intx_name, dd); + IRQF_SHARED, dd->intx_name, dd); if (ret) dd_dev_err(dd, "unable to request INTx interrupt, err %d\n", - ret); + ret); else dd->requested_intx_irq = 1; return ret; @@ -12549,7 +12531,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd) * ignore it */ dd_dev_err(dd, - "Unexpected extra MSI-X interrupt %d\n", i); + "Unexpected extra MSI-X interrupt %d\n", i); continue; } /* no argument, no interrupt */ @@ -12559,11 +12541,11 @@ static int request_msix_irqs(struct hfi1_devdata *dd) me->name[sizeof(me->name) - 1] = 0; ret = request_threaded_irq(me->msix.vector, handler, thread, 0, - me->name, arg); + me->name, arg); if (ret) { dd_dev_err(dd, - "unable to allocate %s interrupt, vector %d, index %d, err %d\n", - err_info, me->msix.vector, idx, ret); + "unable to allocate %s interrupt, vector %d, index %d, err %d\n", + err_info, me->msix.vector, idx, ret); return ret; } /* @@ -12748,11 +12730,11 @@ static int set_up_context_variables(struct hfi1_devdata *dd) dd->num_user_contexts = num_user_contexts; dd->freectxts = num_user_contexts; dd_dev_info(dd, - "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", - (int)dd->chip_rcv_contexts, - (int)dd->num_rcv_contexts, - (int)dd->n_krcv_queues, - (int)dd->num_rcv_contexts - dd->n_krcv_queues); + "rcv contexts: chip %d, used %d (kernel %d, user %d)\n", + (int)dd->chip_rcv_contexts, + (int)dd->num_rcv_contexts, + (int)dd->n_krcv_queues, + (int)dd->num_rcv_contexts - dd->n_krcv_queues); /* * Receive array allocation: @@ -12778,8 +12760,8 @@ static int set_up_context_variables(struct hfi1_devdata *dd) dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / dd->rcv_entries.group_size; dd_dev_info(dd, - "RcvArray group count too high, change to %u\n", - dd->rcv_entries.ngroups); + "RcvArray group count too high, change to %u\n", + dd->rcv_entries.ngroups); dd->rcv_entries.nctxt_extra = 0; } /* @@ -12871,7 +12853,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) /* RcvArray */ for (i = 0; i < dd->chip_rcv_array_count; i++) write_csr(dd, RCV_ARRAY + (8 * i), - RCV_ARRAY_RT_WRITE_ENABLE_SMASK); + RCV_ARRAY_RT_WRITE_ENABLE_SMASK); /* RcvQPMapTable */ for (i = 0; i < 32; i++) @@ -12903,8 +12885,8 @@ static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, return; if (time_after(jiffies, timeout)) { dd_dev_err(dd, - "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", - status_bits, reg & status_bits); + "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", + status_bits, reg & status_bits); return; } udelay(1); @@ -12936,7 +12918,7 @@ static void reset_cce_csrs(struct hfi1_devdata *dd) for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), - CCE_MSIX_TABLE_UPPER_RESETCSR); + CCE_MSIX_TABLE_UPPER_RESETCSR); } for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { /* CCE_MSIX_PBA read-only */ @@ -13120,8 +13102,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd) for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); - write_csr(dd, SEND_CM_GLOBAL_CREDIT, - SEND_CM_GLOBAL_CREDIT_RESETCSR); + write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); /* SEND_CM_CREDIT_USED_STATUS read-only */ write_csr(dd, SEND_CM_TIMER_CTRL, 0); write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); @@ -13215,8 +13196,8 @@ static void init_rbufs(struct hfi1_devdata *dd) */ if (count++ > 500) { dd_dev_err(dd, - "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", - __func__, reg); + "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", + __func__, reg); break; } udelay(2); /* do not busy-wait the CSR */ @@ -13245,8 +13226,8 @@ static void init_rbufs(struct hfi1_devdata *dd) /* give up after 100us - slowest possible at 33MHz is 73us */ if (count++ > 50) { dd_dev_err(dd, - "%s: RcvStatus.RxRbufInit not set, continuing\n", - __func__); + "%s: RcvStatus.RxRbufInit not set, continuing\n", + __func__); break; } } @@ -13272,7 +13253,7 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd) write_csr(dd, RCV_VL15, 0); /* this is a clear-down */ write_csr(dd, RCV_ERR_INFO, - RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); + RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); /* RCV_ERR_STATUS read-only */ write_csr(dd, RCV_ERR_MASK, 0); write_csr(dd, RCV_ERR_CLEAR, ~0ull); @@ -13318,8 +13299,8 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd) write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); /* RCV_EGR_OFFSET_TAIL read-only */ for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { - write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), - 0); + write_uctxt_csr(dd, i, + RCV_TID_FLOW_TABLE + (8 * j), 0); } } } @@ -13519,12 +13500,12 @@ static void init_kdeth_qp(struct hfi1_devdata *dd) kdeth_qp = DEFAULT_KDETH_QP; write_csr(dd, SEND_BTH_QP, - (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) - << SEND_BTH_QP_KDETH_QP_SHIFT); + (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) << + SEND_BTH_QP_KDETH_QP_SHIFT); write_csr(dd, RCV_BTH_QP, - (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) - << RCV_BTH_QP_KDETH_QP_SHIFT); + (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) << + RCV_BTH_QP_KDETH_QP_SHIFT); } /** @@ -13649,22 +13630,21 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt) write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]); /* add rule0 */ write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */, - RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK - << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT | - 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT); + RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK << + RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT | + 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT); write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */, - LRH_BTH_MATCH_OFFSET - << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | - LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | - LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | - ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | - QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | - ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); + LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | + LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | + LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | + ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | + QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | + ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */, - LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT | - LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT | - LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT | - LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT); + LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT | + LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT | + LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT | + LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT); /* Enable RSM */ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); kfree(rsmmap); @@ -13682,9 +13662,8 @@ static void init_rxe(struct hfi1_devdata *dd) /* enable all receive errors */ write_csr(dd, RCV_ERR_MASK, ~0ull); /* setup QPN map table - start where VL15 context leaves off */ - init_qos( - dd, - dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0); + init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? + MIN_KERNEL_KCTXTS : 0); /* * make sure RcvCtrl.RcvWcb <= PCIe Device Control * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config @@ -13721,35 +13700,33 @@ static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, u32 csr0to3, u32 csr4to7) { write_csr(dd, csr0to3, - 0ull << - SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT - | 1ull << - SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT - | 2ull * cu << - SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT - | 4ull * cu << - SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); + 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | + 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | + 2ull * cu << + SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | + 4ull * cu << + SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); write_csr(dd, csr4to7, - 8ull * cu << - SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT - | 16ull * cu << - SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT - | 32ull * cu << - SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT - | 64ull * cu << - SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); + 8ull * cu << + SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | + 16ull * cu << + SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | + 32ull * cu << + SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | + 64ull * cu << + SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); } static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) { assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, - SEND_CM_LOCAL_AU_TABLE4_TO7); + SEND_CM_LOCAL_AU_TABLE4_TO7); } void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) { assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, - SEND_CM_REMOTE_AU_TABLE4_TO7); + SEND_CM_REMOTE_AU_TABLE4_TO7); } static void init_txe(struct hfi1_devdata *dd) @@ -13995,8 +13972,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, "Functional simulator" }; - dd = hfi1_alloc_devdata(pdev, - NUM_IB_PORTS * sizeof(struct hfi1_pportdata)); + dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * + sizeof(struct hfi1_pportdata)); if (IS_ERR(dd)) goto bail; ppd = dd->pport; @@ -14083,8 +14060,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", - dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown", - (int)dd->irev); + dd->icode < ARRAY_SIZE(inames) ? + inames[dd->icode] : "unknown", (int)dd->irev); /* speeds the hardware can support */ dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index e02c5278d77a..0b0fd8a70ccf 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -150,8 +150,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v) if (!n_packets && !n_bytes) return SEQ_SKIP; seq_printf(s, "%02llx %llu/%llu\n", i, - (unsigned long long)n_packets, - (unsigned long long)n_bytes); + (unsigned long long)n_packets, + (unsigned long long)n_bytes); return 0; } @@ -246,7 +246,7 @@ __acquires(RCU) } static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr, - loff_t *pos) + loff_t *pos) { struct qp_iter *iter = iter_ptr; @@ -392,7 +392,7 @@ static ssize_t portnames_read(struct file *file, char __user *buf, /* read the per-port counters */ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { u64 *counters; size_t avail; @@ -413,7 +413,7 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, * read the per-port QSFP data for ppd */ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { struct hfi1_pportdata *ppd; char *tmp; @@ -437,7 +437,7 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf, /* Do an i2c write operation on the chain for the given HFI. */ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos, u32 target) + size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; @@ -484,21 +484,21 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, /* Do an i2c write operation on chain for HFI 0. */ static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __i2c_debugfs_write(file, buf, count, ppos, 0); } /* Do an i2c write operation on chain for HFI 1. */ static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __i2c_debugfs_write(file, buf, count, ppos, 1); } /* Do an i2c read operation on the chain for the given HFI. */ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos, u32 target) + size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; @@ -545,21 +545,21 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, /* Do an i2c read operation on chain for HFI 0. */ static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __i2c_debugfs_read(file, buf, count, ppos, 0); } /* Do an i2c read operation on chain for HFI 1. */ static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __i2c_debugfs_read(file, buf, count, ppos, 1); } /* Do a QSFP write operation on the i2c chain for the given HFI. */ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos, u32 target) + size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; @@ -605,21 +605,21 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, /* Do a QSFP write operation on i2c chain for HFI 0. */ static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __qsfp_debugfs_write(file, buf, count, ppos, 0); } /* Do a QSFP write operation on i2c chain for HFI 1. */ static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __qsfp_debugfs_write(file, buf, count, ppos, 1); } /* Do a QSFP read operation on the i2c chain for the given HFI. */ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos, u32 target) + size_t count, loff_t *ppos, u32 target) { struct hfi1_pportdata *ppd; char *buff; @@ -665,14 +665,14 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, /* Do a QSFP read operation on i2c chain for HFI 0. */ static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __qsfp_debugfs_read(file, buf, count, ppos, 0); } /* Do a QSFP read operation on i2c chain for HFI 1. */ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { return __qsfp_debugfs_read(file, buf, count, ppos, 1); } diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 3ef297ecdd60..50a3b5adab0a 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -438,7 +438,7 @@ drop: } static inline void init_packet(struct hfi1_ctxtdata *rcd, - struct hfi1_packet *packet) + struct hfi1_packet *packet) { packet->rsize = rcd->rcvhdrqentsize; /* words */ packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */ @@ -700,8 +700,9 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) * The +2 is the size of the RHF. */ prefetch_range(packet->ebuf, - packet->tlen - ((packet->rcd->rcvhdrqentsize - - (rhf_hdrq_offset(packet->rhf) + 2)) * 4)); + packet->tlen - ((packet->rcd->rcvhdrqentsize - + (rhf_hdrq_offset(packet->rhf) + + 2)) * 4)); } /* @@ -958,9 +959,9 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) prescan_rxq(rcd, &packet); while (last == RCV_PKT_OK) { - - if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet, - DROP_PACKET_OFF) == DROP_PACKET_ON)) { + if (unlikely(dd->do_drop && + atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) == + DROP_PACKET_ON)) { dd->do_drop = 0; /* On to the next packet */ @@ -990,8 +991,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) if (seq != rcd->seq_cnt) last = RCV_PKT_DONE; if (needset) { - dd_dev_info(dd, - "Switching to NO_DMA_RTAIL\n"); + dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n"); set_all_nodma_rtail(dd); needset = 0; } @@ -1234,7 +1234,7 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { /* Need to start timer */ setup_timer(&ppd->led_override_timer, run_led_override, - (unsigned long)ppd); + (unsigned long)ppd); ppd->led_override_timer.expires = jiffies + 1; add_timer(&ppd->led_override_timer); @@ -1271,8 +1271,8 @@ int hfi1_reset_device(int unit) if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) { dd_dev_info(dd, - "Invalid unit number %u or not initialized or not present\n", - unit); + "Invalid unit number %u or not initialized or not present\n", + unit); ret = -ENXIO; goto bail; } @@ -1302,11 +1302,11 @@ int hfi1_reset_device(int unit) if (ret) dd_dev_err(dd, - "Reinitialize unit %u after reset failed with %d\n", - unit, ret); + "Reinitialize unit %u after reset failed with %d\n", + unit, ret); else dd_dev_info(dd, "Reinitialized unit %u after resetting\n", - unit); + unit); bail: return ret; @@ -1363,7 +1363,7 @@ int process_receive_bypass(struct hfi1_packet *packet) handle_eflags(packet); dd_dev_err(packet->rcd->dd, - "Bypass packets are not supported in normal operation. Dropping\n"); + "Bypass packets are not supported in normal operation. Dropping\n"); return RHF_RCV_CONTINUE; } @@ -1401,6 +1401,6 @@ int kdeth_process_eager(struct hfi1_packet *packet) int process_receive_invalid(struct hfi1_packet *packet) { dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", - rhf_rcv_type(packet->rhf)); + rhf_rcv_type(packet->rhf)); return RHF_RCV_CONTINUE; } diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index d7250af1d08b..f36d06bc2817 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -115,11 +115,9 @@ static DEFINE_MUTEX(eprom_mutex); static void write_enable(struct hfi1_devdata *dd) { /* raise signal */ - write_csr(dd, ASIC_GPIO_OUT, - read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N); + write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N); /* raise enable */ - write_csr(dd, ASIC_GPIO_OE, - read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N); + write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N); } /* @@ -128,11 +126,9 @@ static void write_enable(struct hfi1_devdata *dd) static void write_disable(struct hfi1_devdata *dd) { /* lower signal */ - write_csr(dd, ASIC_GPIO_OUT, - read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N); + write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N); /* lower enable */ - write_csr(dd, ASIC_GPIO_OE, - read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N); + write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N); } /* @@ -210,8 +206,8 @@ static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len) /* check the end points for the minimum erase */ if ((start & MASK_4KB) || (end & MASK_4KB)) { dd_dev_err(dd, - "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n", - __func__, start, end); + "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n", + __func__, start, end); return -EINVAL; } @@ -275,7 +271,7 @@ static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { read_page(dd, start + offset, buffer); if (copy_to_user((void __user *)(addr + offset), - buffer, EP_PAGE_SIZE)) { + buffer, EP_PAGE_SIZE)) { ret = -EFAULT; goto done; } @@ -319,7 +315,7 @@ static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr) for (offset = 0; offset < len; offset += EP_PAGE_SIZE) { if (copy_from_user(buffer, (void __user *)(addr + offset), - EP_PAGE_SIZE)) { + EP_PAGE_SIZE)) { ret = -EFAULT; goto done; } @@ -385,13 +381,13 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) ret = acquire_hw_mutex(dd); if (ret) { dd_dev_err(dd, - "%s: unable to acquire hw mutex, no EPROM support\n", - __func__); + "%s: unable to acquire hw mutex, no EPROM support\n", + __func__); goto done_asic; } dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n", - __func__, cmd->type, cmd->len, cmd->addr); + __func__, cmd->type, cmd->len, cmd->addr); switch (cmd->type) { case HFI1_CMD_EP_INFO: @@ -402,7 +398,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) dev_id = read_device_id(dd); /* addr points to a u32 user buffer */ if (copy_to_user((void __user *)cmd->addr, &dev_id, - sizeof(u32))) + sizeof(u32))) ret = -EFAULT; break; @@ -430,7 +426,7 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) default: dd_dev_err(dd, "%s: unexpected command %d\n", - __func__, cmd->type); + __func__, cmd->type); ret = -EINVAL; break; } @@ -464,19 +460,18 @@ int eprom_init(struct hfi1_devdata *dd) ret = acquire_hw_mutex(dd); if (ret) { dd_dev_err(dd, - "%s: unable to acquire hw mutex, no EPROM support\n", - __func__); + "%s: unable to acquire hw mutex, no EPROM support\n", + __func__); goto done_asic; } /* reset EPROM to be sure it is in a good state */ /* set reset */ - write_csr(dd, ASIC_EEP_CTL_STAT, - ASIC_EEP_CTL_STAT_EP_RESET_SMASK); + write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK); /* clear reset, set speed */ write_csr(dd, ASIC_EEP_CTL_STAT, - EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); + EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT); /* wake the device with command "release powerdown NoID" */ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index c4b9dd49dfa7..1bd1545d083d 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1145,9 +1145,9 @@ static int user_init(struct file *fp) * has done it. */ if (fd->subctxt) { - ret = wait_event_interruptible(uctxt->wait, - !test_bit(HFI1_CTXT_MASTER_UNINIT, - &uctxt->event_flags)); + ret = wait_event_interruptible(uctxt->wait, !test_bit( + HFI1_CTXT_MASTER_UNINIT, + &uctxt->event_flags)); goto expected; } @@ -1592,7 +1592,7 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) /* NOTE: assumes unsigned long is 8 bytes */ static ssize_t ui_read(struct file *filp, char __user *buf, size_t count, - loff_t *f_pos) + loff_t *f_pos) { struct hfi1_devdata *dd = filp->private_data; void __iomem *base = dd->kregbase; diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 3a7163dab39e..52a3e8c95a07 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -382,8 +382,8 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what, return 0; dd_dev_err(dd, - "invalid firmware header field %s: expected 0x%x, actual 0x%x\n", - what, expected, actual); + "invalid firmware header field %s: expected 0x%x, actual 0x%x\n", + what, expected, actual); return 1; } @@ -393,13 +393,19 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what, static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css) { /* verify CSS header fields (most sizes are in DW, so add /4) */ - if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE) || - invalid_header(dd, "header_len", css->header_len, (sizeof(struct firmware_file) / 4)) || - invalid_header(dd, "header_version", css->header_version, CSS_HEADER_VERSION) || - invalid_header(dd, "module_vendor", css->module_vendor, CSS_MODULE_VENDOR) || + if (invalid_header(dd, "module_type", css->module_type, + CSS_MODULE_TYPE) || + invalid_header(dd, "header_len", css->header_len, + (sizeof(struct firmware_file) / 4)) || + invalid_header(dd, "header_version", css->header_version, + CSS_HEADER_VERSION) || + invalid_header(dd, "module_vendor", css->module_vendor, + CSS_MODULE_VENDOR) || invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) || - invalid_header(dd, "modulus_size", css->modulus_size, KEY_SIZE / 4) || - invalid_header(dd, "exponent_size", css->exponent_size, EXPONENT_SIZE / 4)) { + invalid_header(dd, "modulus_size", css->modulus_size, + KEY_SIZE / 4) || + invalid_header(dd, "exponent_size", css->exponent_size, + EXPONENT_SIZE / 4)) { return -EINVAL; } return 0; @@ -414,8 +420,8 @@ static int payload_check(struct hfi1_devdata *dd, const char *name, /* make sure we have some payload */ if (prefix_size >= file_size) { dd_dev_err(dd, - "firmware \"%s\", size %ld, must be larger than %ld bytes\n", - name, file_size, prefix_size); + "firmware \"%s\", size %ld, must be larger than %ld bytes\n", + name, file_size, prefix_size); return -EINVAL; } @@ -491,7 +497,7 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, /* make sure there are bytes in the payload */ ret = payload_check(dd, name, fdet->fw->size, - sizeof(struct firmware_file)); + sizeof(struct firmware_file)); if (ret == 0) { fdet->css_header = css; fdet->modulus = ff->modulus; @@ -516,7 +522,7 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, /* make sure there are bytes in the payload */ ret = payload_check(dd, name, fdet->fw->size, - sizeof(struct augmented_firmware_file)); + sizeof(struct augmented_firmware_file)); if (ret == 0) { fdet->css_header = css; fdet->modulus = aff->modulus; @@ -531,9 +537,10 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, } else { /* css->size check failed */ dd_dev_err(dd, - "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n", - fdet->fw->size / 4, (fdet->fw->size - AUGMENT_SIZE) / 4, - css->size); + "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n", + fdet->fw->size / 4, + (fdet->fw->size - AUGMENT_SIZE) / 4, + css->size); ret = -EINVAL; } @@ -696,7 +703,7 @@ static int obtain_firmware(struct hfi1_devdata *dd) if (platform_config_load) { platform_config = NULL; err = request_firmware(&platform_config, platform_config_name, - &dd->pcidev->dev); + &dd->pcidev->dev); if (err) { platform_config = NULL; goto done; @@ -837,7 +844,7 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who, >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT; if (status != RSA_STATUS_IDLE) { dd_dev_err(dd, "%s security engine not idle - giving up\n", - who); + who); return -EBUSY; } @@ -874,7 +881,7 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who, if (status == RSA_STATUS_IDLE) { /* should not happen */ dd_dev_err(dd, "%s firmware security bad idle state\n", - who); + who); ret = -EINVAL; break; } else if (status == RSA_STATUS_DONE) { @@ -908,8 +915,8 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who, * is not keeping the error high. */ write_csr(dd, MISC_ERR_CLEAR, - MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK - | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK); + MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK | + MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK); /* * All that is left are the current errors. Print warnings on * authorization failure details, if any. Firmware authorization @@ -938,7 +945,8 @@ static void load_security_variables(struct hfi1_devdata *dd, write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE); /* Security variables d. Write the header */ write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD, - (u8 *)fdet->css_header, sizeof(struct css_header)); + (u8 *)fdet->css_header, + sizeof(struct css_header)); } /* return the 8051 firmware state */ @@ -1018,7 +1026,7 @@ static int load_8051_firmware(struct hfi1_devdata *dd, /* Firmware load steps 3-5 */ ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr, - fdet->firmware_len); + fdet->firmware_len); if (ret) return ret; @@ -1045,13 +1053,13 @@ static int load_8051_firmware(struct hfi1_devdata *dd, ret = wait_fm_ready(dd, TIMEOUT_8051_START); if (ret) { /* timed out */ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n", - get_firmware_state(dd)); + get_firmware_state(dd)); return -ETIMEDOUT; } read_misc_status(dd, &ver_a, &ver_b); dd_dev_info(dd, "8051 firmware version %d.%d\n", - (int)ver_b, (int)ver_a); + (int)ver_b, (int)ver_a); dd->dc8051_ver = dc8051_ver(ver_b, ver_a); return 0; @@ -1066,11 +1074,11 @@ void sbus_request(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr, u8 command, u32 data_in) { write_csr(dd, ASIC_CFG_SBUS_REQUEST, - ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) - | ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) - | ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) - | ((u64)receiver_addr - << ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT)); + ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) | + ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) | + ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) | + ((u64)receiver_addr << + ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT)); } /* @@ -1088,14 +1096,14 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags) return; dd_dev_info(dd, "Turning off spicos:%s%s\n", - flags & SPICO_SBUS ? " SBus" : "", - flags & SPICO_FABRIC ? " fabric" : ""); + flags & SPICO_SBUS ? " SBus" : "", + flags & SPICO_FABRIC ? " fabric" : ""); write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK); /* disable SBus spico */ if (flags & SPICO_SBUS) sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01, - WRITE_SBUS_RECEIVER, 0x00000040); + WRITE_SBUS_RECEIVER, 0x00000040); /* disable the fabric serdes spicos */ if (flags & SPICO_FABRIC) @@ -1222,7 +1230,7 @@ static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, /* step 5: download SerDes machine code */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER, - *(u32 *)&fdet->firmware_ptr[i]); + *(u32 *)&fdet->firmware_ptr[i]); } /* step 6: IMEM override off */ sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000); @@ -1261,7 +1269,7 @@ static int load_sbus_firmware(struct hfi1_devdata *dd, /* step 5: download the SBus Master machine code */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER, - *(u32 *)&fdet->firmware_ptr[i]); + *(u32 *)&fdet->firmware_ptr[i]); } /* step 6: set IMEM_CNTL_EN off */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040); @@ -1300,7 +1308,7 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd, */ for (i = 0; i < fdet->firmware_len; i += 4) { sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER, - *(u32 *)&fdet->firmware_ptr[i]); + *(u32 *)&fdet->firmware_ptr[i]); } /* step 5: disable XDMEM access */ sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140); @@ -1334,7 +1342,7 @@ static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2, * 23:16 BROADCAST_GROUP_2 (default 0xff) */ sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER, - (u32)bg1 << 4 | (u32)bg2 << 16); + (u32)bg1 << 4 | (u32)bg2 << 16); } } @@ -1359,8 +1367,8 @@ retry: /* timed out */ dd_dev_err(dd, - "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n", - (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up"); + "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n", + (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up"); if (try == 0) { /* break mutex and retry */ @@ -1380,7 +1388,7 @@ void release_hw_mutex(struct hfi1_devdata *dd) void set_sbus_fast_mode(struct hfi1_devdata *dd) { write_csr(dd, ASIC_CFG_SBUS_EXECUTE, - ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK); + ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK); } void clear_sbus_fast_mode(struct hfi1_devdata *dd) @@ -1410,9 +1418,9 @@ int load_firmware(struct hfi1_devdata *dd) set_sbus_fast_mode(dd); set_serdes_broadcast(dd, all_fabric_serdes_broadcast, - fabric_serdes_broadcast[dd->hfi1_id], - fabric_serdes_addrs[dd->hfi1_id], - NUM_FABRIC_SERDES); + fabric_serdes_broadcast[dd->hfi1_id], + fabric_serdes_addrs[dd->hfi1_id], + NUM_FABRIC_SERDES); turn_off_spicos(dd, SPICO_FABRIC); do { ret = load_fabric_serdes_firmware(dd, &fw_fabric); @@ -1551,8 +1559,8 @@ int parse_platform_config(struct hfi1_devdata *dd) header2 = *(ptr + 1); if (header1 != ~header2) { dd_dev_info(dd, "%s: Failed validation at offset %ld\n", - __func__, (ptr - - (u32 *)dd->platform_config.data)); + __func__, (ptr - (u32 *) + dd->platform_config.data)); goto bail; } @@ -1595,9 +1603,10 @@ int parse_platform_config(struct hfi1_devdata *dd) break; default: dd_dev_info(dd, - "%s: Unknown data table %d, offset %ld\n", - __func__, table_type, - (ptr - (u32 *)dd->platform_config.data)); + "%s: Unknown data table %d, offset %ld\n", + __func__, table_type, + (ptr - (u32 *) + dd->platform_config.data)); goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1630,7 +1639,7 @@ int parse_platform_config(struct hfi1_devdata *dd) /* Calculate and check table crc */ crc = crc32_le(~(u32)0, (unsigned char const *)ptr, - (table_length_dwords * 4)); + (table_length_dwords * 4)); crc ^= ~(u32)0; /* Jump the table */ @@ -1654,7 +1663,8 @@ bail: } static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, - int field, u32 *field_len_bits, u32 *field_start_bits) + int field, u32 *field_len_bits, + u32 *field_start_bits) { struct platform_config_cache *pcfgcache = &dd->pcfg_cache; u32 *src_ptr = NULL; @@ -1714,8 +1724,9 @@ static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, * @len: length of memory pointed by @data in bytes. */ int get_platform_config_field(struct hfi1_devdata *dd, - enum platform_config_table_type_encoding table_type, - int table_index, int field_index, u32 *data, u32 len) + enum platform_config_table_type_encoding + table_type, int table_index, int field_index, + u32 *data, u32 len) { int ret = 0, wlen = 0, seek = 0; u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL; @@ -1727,7 +1738,8 @@ int get_platform_config_field(struct hfi1_devdata *dd, return -EINVAL; ret = get_platform_fw_field_metadata(dd, table_type, field_index, - &field_len_bits, &field_start_bits); + &field_len_bits, + &field_start_bits); if (ret) return -EINVAL; @@ -1817,9 +1829,9 @@ int load_pcie_firmware(struct hfi1_devdata *dd) if (fw_pcie_serdes_load) { dd_dev_info(dd, "Setting PCIe SerDes broadcast\n"); set_serdes_broadcast(dd, all_pcie_serdes_broadcast, - pcie_serdes_broadcast[dd->hfi1_id], - pcie_serdes_addrs[dd->hfi1_id], - NUM_PCIE_SERDES); + pcie_serdes_broadcast[dd->hfi1_id], + pcie_serdes_addrs[dd->hfi1_id], + NUM_PCIE_SERDES); do { ret = load_pcie_serdes_firmware(dd, &fw_pcie); } while (retry_firmware(dd, ret)); @@ -1844,5 +1856,5 @@ void read_guid(struct hfi1_devdata *dd) dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID); dd_dev_info(dd, "GUID %llx", - (unsigned long long)dd->base_guid); + (unsigned long long)dd->base_guid); } diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 4db5ad9921a9..07df5153703c 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1715,8 +1715,9 @@ void restore_pci_variables(struct hfi1_devdata *dd); int do_pcie_gen3_transition(struct hfi1_devdata *dd); int parse_platform_config(struct hfi1_devdata *dd); int get_platform_config_field(struct hfi1_devdata *dd, - enum platform_config_table_type_encoding table_type, - int table_index, int field_index, u32 *data, u32 len); + enum platform_config_table_type_encoding + table_type, int table_index, int field_index, + u32 *data, u32 len); const char *get_unit_name(int unit); const char *get_card_name(struct rvt_dev_info *rdi); diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index f794604bea2a..a7210593e4fd 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -149,7 +149,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) rcd = hfi1_create_ctxtdata(ppd, i, dd->node); if (!rcd) { dd_dev_err(dd, - "Unable to allocate kernel receive context, failing\n"); + "Unable to allocate kernel receive context, failing\n"); goto nomem; } /* @@ -170,7 +170,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node); if (!rcd->sc) { dd_dev_err(dd, - "Unable to allocate kernel send context, failing\n"); + "Unable to allocate kernel send context, failing\n"); dd->rcd[rcd->ctxt] = NULL; hfi1_free_ctxtdata(dd, rcd); goto nomem; @@ -741,7 +741,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) lastfail = hfi1_setup_eagerbufs(rcd); if (lastfail) dd_dev_err(dd, - "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); + "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); } if (lastfail) ret = lastfail; @@ -797,8 +797,8 @@ done: lastfail = bringup_serdes(ppd); if (lastfail) dd_dev_info(dd, - "Failed to bring up port %u\n", - ppd->port); + "Failed to bring up port %u\n", + ppd->port); /* * Set status even if port serdes is not initialized @@ -1542,8 +1542,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) if (!rcd->rcvhdrq) { dd_dev_err(dd, - "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", - amt, rcd->ctxt); + "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", + amt, rcd->ctxt); goto bail; } @@ -1587,8 +1587,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) bail_free: dd_dev_err(dd, - "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", - rcd->ctxt); + "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", + rcd->ctxt); vfree(rcd->user_event_mask); rcd->user_event_mask = NULL; dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, @@ -1678,7 +1678,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) if (rcd->egrbufs.rcvtid_size == round_mtu || !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", - rcd->ctxt); + rcd->ctxt); goto bail_rcvegrbuf_phys; } @@ -1760,14 +1760,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) for (idx = 0; idx < rcd->egrbufs.alloced; idx++) { hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER, - rcd->egrbufs.rcvtids[idx].phys, order); + rcd->egrbufs.rcvtids[idx].phys, order); cond_resched(); } goto bail; bail_rcvegrbuf_phys: for (idx = 0; idx < rcd->egrbufs.alloced && - rcd->egrbufs.buffers[idx].addr; + rcd->egrbufs.buffers[idx].addr; idx++) { dma_free_coherent(&dd->pcidev->dev, rcd->egrbufs.buffers[idx].len, diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 03cebae672a3..46eeeca59bbc 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -135,18 +135,16 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) set_up_vl15(dd, dd->vau, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); ppd->neighbor_guid = - read_csr(dd, - DC_DC8051_STS_REMOTE_GUID); + read_csr(dd, DC_DC8051_STS_REMOTE_GUID); ppd->neighbor_type = read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) & DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK; ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) & - DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK; - dd_dev_info(dd, - "Neighbor GUID: %llx Neighbor type %d\n", - ppd->neighbor_guid, - ppd->neighbor_type); + DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK; + dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n", + ppd->neighbor_guid, + ppd->neighbor_type); } /* physical link went up */ diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 13cf66fe2aca..7619b752789e 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -535,7 +535,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ibp = &ppd->ibport_data; if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || - ppd->vls_supported > ARRAY_SIZE(dd->vld)) { + ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -981,9 +981,8 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, link_state = HLS_DN_DOWNDEF; else if (phys_state == IB_PORTPHYSSTATE_POLLING) { link_state = HLS_DN_POLL; - set_link_down_reason(ppd, - OPA_LINKDOWN_REASON_FM_BOUNCE, 0, - OPA_LINKDOWN_REASON_FM_BOUNCE); + set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE, + 0, OPA_LINKDOWN_REASON_FM_BOUNCE); } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) link_state = HLS_DN_DISABLE; else { @@ -1102,7 +1101,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* Must be a valid unicast LID address. */ if ((lid == 0 && ls_old > IB_PORT_INIT) || - lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { + lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n", lid); @@ -1135,7 +1134,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, /* Must be a valid unicast LID address. */ if ((smlid == 0 && ls_old > IB_PORT_INIT) || - smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { + smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { smp->status |= IB_SMP_INVALID_FIELD; pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid); } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { @@ -1185,7 +1184,9 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, if (lwe == OPA_LINK_WIDTH_RESET || lwe == OPA_LINK_WIDTH_RESET_OLD) { set_link_width_downgrade_enabled(ppd, - ppd->link_width_downgrade_supported); + ppd-> + link_width_downgrade_supported + ); } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) { /* only set and apply if something changed */ if (lwe != ppd->link_width_downgrade_enabled) { @@ -1210,16 +1211,17 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, ibp->rvp.vl_high_limit); if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) || - ppd->vls_supported > ARRAY_SIZE(dd->vld)) { + ppd->vls_supported > ARRAY_SIZE(dd->vld)) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } for (i = 0; i < ppd->vls_supported; i++) { if ((i % 2) == 0) - mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> 4) - & 0xF); + mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >> + 4) & 0xF); else - mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & 0xF); + mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] & + 0xF); if (mtu == 0xffff) { pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n", mtu, @@ -1229,8 +1231,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, } if (dd->vld[i].mtu != mtu) { dd_dev_info(dd, - "MTU change on vl %d from %d to %d\n", - i, dd->vld[i].mtu, mtu); + "MTU change on vl %d from %d to %d\n", + i, dd->vld[i].mtu, mtu); dd->vld[i].mtu = mtu; call_set_mtu++; } @@ -1243,8 +1245,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, mtu = 2048; if (dd->vld[15].mtu != mtu) { dd_dev_info(dd, - "MTU change on vl 15 from %d to %d\n", - dd->vld[15].mtu, mtu); + "MTU change on vl 15 from %d to %d\n", + dd->vld[15].mtu, mtu); dd->vld[15].mtu = mtu; call_set_mtu++; } @@ -1260,7 +1262,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, smp->status |= IB_SMP_INVALID_FIELD; } else { if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS, - vls) == -EINVAL) + vls) == -EINVAL) smp->status |= IB_SMP_INVALID_FIELD; } } @@ -1806,7 +1808,7 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data, * addr and (addr + len - 1) are on the same "page" */ if (addr >= 4096 || - (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) { + (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } @@ -2271,7 +2273,7 @@ enum error_info_selects { }; static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u32 *resp_len) + struct ib_device *ibdev, u32 *resp_len) { struct opa_class_port_info *p = (struct opa_class_port_info *)pmp->data; @@ -2320,7 +2322,8 @@ static void a0_portstatus(struct hfi1_pportdata *ppd, } static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { struct opa_port_status_req *req = (struct opa_port_status_req *)pmp->data; @@ -2376,7 +2379,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, CNTR_INVALID_VL)); rsp->port_multicast_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS, - CNTR_INVALID_VL)); + CNTR_INVALID_VL)); rsp->port_multicast_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL)); @@ -2405,7 +2408,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, } tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL); tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, - CNTR_INVALID_VL); + CNTR_INVALID_VL); if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) { /* overflow/wrapped */ rsp->link_error_recovery = cpu_to_be32(~0); @@ -2420,7 +2423,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL)); rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN, - CNTR_INVALID_VL)); + CNTR_INVALID_VL)); /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */ tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); @@ -2442,27 +2445,27 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp, rsp->vls[vfi].port_vl_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_data = cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_pkts = cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_wait = cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); vlinfo++; vfi++; @@ -2492,7 +2495,7 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR, - CNTR_INVALID_VL); + CNTR_INVALID_VL); /* local link integrity must be right-shifted by the lli resolution */ tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL); tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL); @@ -2502,10 +2505,10 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL); error_counter_summary += (tmp >> res_ler); error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR, - CNTR_INVALID_VL); + CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR, - CNTR_INVALID_VL); + CNTR_INVALID_VL); /* ppd->link_downed is a 32-bit value */ error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL); @@ -2563,7 +2566,8 @@ static void pma_get_opa_port_dctrs(struct ib_device *ibdev, } static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { struct opa_port_data_counters_msg *req = (struct opa_port_data_counters_msg *)pmp->data; @@ -2650,35 +2654,35 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, * any additional checks for vl. */ for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(req->vl_select_mask)) { + 8 * sizeof(req->vl_select_mask)) { memset(vlinfo, 0, sizeof(*vlinfo)); rsp->vls[vfi].port_vl_xmit_data = cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_pkts = cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_xmit_wait = cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_fecn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); rsp->vls[vfi].port_vl_rcv_becn = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL, - idx_from_vl(vl))); + idx_from_vl(vl))); /* rsp->port_vl_xmit_time_cong is 0 for HFIs */ /* rsp->port_vl_xmit_wasted_bw ??? */ @@ -2777,7 +2781,8 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev, } static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { size_t response_data_size; struct _port_ectrs *rsp; @@ -2820,7 +2825,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, */ port_mask = be64_to_cpu(req->port_select_mask[3]); port_num = find_first_bit((unsigned long *)&port_mask, - sizeof(port_mask)); + sizeof(port_mask)); if (port_num != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; @@ -2842,7 +2847,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, CNTR_INVALID_VL)); rsp->fm_config_errors = cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR, - CNTR_INVALID_VL)); + CNTR_INVALID_VL)); tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; @@ -2950,7 +2955,8 @@ bail: } static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { size_t response_data_size; struct _port_ei *rsp; @@ -3000,9 +3006,9 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, rsp->port_rcv_ei.status_and_code = dd->err_info_rcvport.status_and_code; memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1, - &dd->err_info_rcvport.packet_flit1, sizeof(u64)); + &dd->err_info_rcvport.packet_flit1, sizeof(u64)); memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2, - &dd->err_info_rcvport.packet_flit2, sizeof(u64)); + &dd->err_info_rcvport.packet_flit2, sizeof(u64)); /* ExcessiverBufferOverrunInfo */ reg = read_csr(dd, RCV_ERR_INFO); @@ -3047,7 +3053,8 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, } static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { struct opa_clear_port_status *req = (struct opa_clear_port_status *)pmp->data; @@ -3131,7 +3138,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, if (counter_select & CS_LINK_ERROR_RECOVERY) { write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0); write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, - CNTR_INVALID_VL, 0); + CNTR_INVALID_VL, 0); } if (counter_select & CS_PORT_RCV_ERRORS) @@ -3194,7 +3201,8 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp, } static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, - struct ib_device *ibdev, u8 port, u32 *resp_len) + struct ib_device *ibdev, + u8 port, u32 *resp_len) { struct _port_ei *rsp; struct opa_port_error_info_msg *req; @@ -3295,9 +3303,8 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data, } static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am, - u8 *data, - struct ib_device *ibdev, - u8 port, u32 *resp_len) + u8 *data, struct ib_device *ibdev, + u8 port, u32 *resp_len) { int i; struct opa_congestion_setting_attr *p = @@ -3402,7 +3409,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am, continue; memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3); memcpy(cong_log->events[i].remote_qp_number_cn_entry, - &cce->rqpn, 3); + &cce->rqpn, 3); cong_log->events[i].sl_svc_type_cn_entry = ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7); cong_log->events[i].remote_lid_cn_entry = @@ -3584,8 +3591,8 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, reg = read_csr(dd, DCC_CFG_LED_CNTRL); if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) && - ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf)) - p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK); + ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf)) + p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK); if (resp_len) *resp_len += sizeof(struct opa_led_info); @@ -3653,7 +3660,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port, - resp_len); + resp_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_get_opa_psi(smp, am, data, ibdev, port, @@ -3735,7 +3742,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am, break; case OPA_ATTRIB_ID_SC_TO_VLNT_MAP: ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port, - resp_len); + resp_len); break; case OPA_ATTRIB_ID_PORT_STATE_INFO: ret = __subn_set_opa_psi(smp, am, data, ibdev, port, @@ -4092,10 +4099,10 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, port_num && port_num <= ibdev->phys_port_cnt && port != port_num) (void)check_mkey(to_iport(ibdev, port_num), - (struct ib_mad_hdr *)smp, 0, - smp->mkey, - (__force __be32)smp->dr_slid, - smp->return_path, smp->hop_cnt); + (struct ib_mad_hdr *)smp, 0, + smp->mkey, + (__force __be32)smp->dr_slid, + smp->return_path, smp->hop_cnt); ret = IB_MAD_RESULT_FAILURE; return ret; } @@ -4203,19 +4210,19 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port, break; case OPA_PM_ATTRIB_ID_PORT_STATUS: ret = pma_get_opa_portstatus(pmp, ibdev, port, - resp_len); + resp_len); break; case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS: ret = pma_get_opa_datacounters(pmp, ibdev, port, - resp_len); + resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS: ret = pma_get_opa_porterrors(pmp, ibdev, port, - resp_len); + resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_get_opa_errorinfo(pmp, ibdev, port, - resp_len); + resp_len); break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; @@ -4228,11 +4235,11 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port, switch (pmp->mad_hdr.attr_id) { case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS: ret = pma_set_opa_portstatus(pmp, ibdev, port, - resp_len); + resp_len); break; case OPA_PM_ATTRIB_ID_ERROR_INFO: ret = pma_set_opa_errorinfo(pmp, ibdev, port, - resp_len); + resp_len); break; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 4d9fd3b5ef1e..cbd61cf6549a 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -217,10 +217,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl); pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2, - &dd->pcie_devctl2); + &dd->pcie_devctl2); pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0); - pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - &dd->pci_lnkctl3); + pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3); pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); return 0; @@ -271,7 +270,7 @@ void hfi1_pcie_flr(struct hfi1_devdata *dd) clear: pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_BCR_FLR); + PCI_EXP_DEVCTL_BCR_FLR); /* PCIe spec requires the function to be back within 100ms */ msleep(100); } @@ -377,8 +376,8 @@ int pcie_speeds(struct hfi1_devdata *dd) pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap); if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) { dd_dev_info(dd, - "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n", - linkcap & PCI_EXP_LNKCAP_SLS); + "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n", + linkcap & PCI_EXP_LNKCAP_SLS); dd->link_gen3_capable = 0; } @@ -432,19 +431,15 @@ void hfi1_enable_intx(struct pci_dev *pdev) void restore_pci_variables(struct hfi1_devdata *dd) { pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command); - pci_write_config_dword(dd->pcidev, - PCI_BASE_ADDRESS_0, dd->pcibar0); - pci_write_config_dword(dd->pcidev, - PCI_BASE_ADDRESS_1, dd->pcibar1); - pci_write_config_dword(dd->pcidev, - PCI_ROM_ADDRESS, dd->pci_rom); + pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0); + pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1); + pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom); pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl); pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl); pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2, - dd->pcie_devctl2); + dd->pcie_devctl2); pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0); - pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - dd->pci_lnkctl3); + pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3); pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); } @@ -746,21 +741,22 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs, c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div); c_plus1 = eq[i][POST] / div; pci_write_config_dword(pdev, PCIE_CFG_REG_PL102, - eq_value(c_minus1, c0, c_plus1)); + eq_value(c_minus1, c0, c_plus1)); /* check if these coefficients violate EQ rules */ pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105, - &violation); + &violation); if (violation & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){ if (hit_error == 0) { dd_dev_err(dd, - "Gen3 EQ Table Coefficient rule violations\n"); + "Gen3 EQ Table Coefficient rule violations\n"); dd_dev_err(dd, " prec attn post\n"); } dd_dev_err(dd, " p%02d: %02x %02x %02x\n", - i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]); + i, (u32)eq[i][0], (u32)eq[i][1], + (u32)eq[i][2]); dd_dev_err(dd, " %02x %02x %02x\n", - (u32)c_minus1, (u32)c0, (u32)c_plus1); + (u32)c_minus1, (u32)c0, (u32)c_plus1); hit_error = 1; } } @@ -815,8 +811,8 @@ static int trigger_sbr(struct hfi1_devdata *dd) list_for_each_entry(pdev, &dev->bus->devices, bus_list) if (pdev != dev) { dd_dev_err(dd, - "%s: another device is on the same bus\n", - __func__); + "%s: another device is on the same bus\n", + __func__); return -ENOTTY; } @@ -840,8 +836,8 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index, u16 code, u16 data) { write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8), - (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) - | ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); + (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) | + ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT))); } /* @@ -851,14 +847,13 @@ static void arm_gasket_logic(struct hfi1_devdata *dd) { u64 reg; - reg = (((u64)1 << dd->hfi1_id) - << ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) - | ((u64)pcie_serdes_broadcast[dd->hfi1_id] - << ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT - | ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK - | ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) - << ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT - ); + reg = (((u64)1 << dd->hfi1_id) << + ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) | + ((u64)pcie_serdes_broadcast[dd->hfi1_id] << + ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT | + ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK | + ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) << + ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT); write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg); /* read back to push the write */ read_csr(dd, ASIC_PCIE_SD_HOST_CMD); @@ -982,8 +977,8 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) /* if already at target speed, done (unless forced) */ if (dd->lbus_speed == target_speed) { dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__, - pcie_target, - pcie_force ? "re-doing anyway" : "skipping"); + pcie_target, + pcie_force ? "re-doing anyway" : "skipping"); if (!pcie_force) return 0; } @@ -1087,8 +1082,10 @@ retry: default_pset = DEFAULT_MCP_PSET; } pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101, - (fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) - | (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT)); + (fs << + PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) | + (lf << + PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT)); ret = load_eq_table(dd, eq, fs, div); if (ret) goto done; @@ -1102,15 +1099,15 @@ retry: pcie_pset = default_pset; if (pcie_pset > 10) { /* valid range is 0-10, inclusive */ dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n", - __func__, pcie_pset, default_pset); + __func__, pcie_pset, default_pset); pcie_pset = default_pset; } dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset); pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106, - ((1 << pcie_pset) - << PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) - | PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK - | PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK); + ((1 << pcie_pset) << + PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) | + PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK | + PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK); /* * step 5b: Do post firmware download steps via SBus @@ -1165,13 +1162,13 @@ retry: parent = dd->pcidev->bus->self; pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2); dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, - (u32)lnkctl2); + (u32)lnkctl2); /* only write to parent if target is not as high as ours */ if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) { lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; lnkctl2 |= target_vector; dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, - (u32)lnkctl2); + (u32)lnkctl2); pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2); } else { dd_dev_info(dd, "%s: ..target speed is OK\n", __func__); @@ -1180,11 +1177,11 @@ retry: dd_dev_info(dd, "%s: setting target link speed\n", __func__); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2); dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, - (u32)lnkctl2); + (u32)lnkctl2); lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK; lnkctl2 |= target_vector; dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, - (u32)lnkctl2); + (u32)lnkctl2); pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2); /* step 5h: arm gasket logic */ @@ -1221,8 +1218,8 @@ retry: ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor); if (ret) { dd_dev_info(dd, - "%s: read of VendorID failed after SBR, err %d\n", - __func__, ret); + "%s: read of VendorID failed after SBR, err %d\n", + __func__, ret); return_error = 1; goto done; } @@ -1273,8 +1270,8 @@ retry: & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK; if ((status & (1 << dd->hfi1_id)) == 0) { dd_dev_err(dd, - "%s: gasket status 0x%x, expecting 0x%x\n", - __func__, status, 1 << dd->hfi1_id); + "%s: gasket status 0x%x, expecting 0x%x\n", + __func__, status, 1 << dd->hfi1_id); ret = -EIO; goto done; } @@ -1291,13 +1288,13 @@ retry: /* update our link information cache */ update_lbus_info(dd); dd_dev_info(dd, "%s: new speed and width: %s\n", __func__, - dd->lbus_info); + dd->lbus_info); if (dd->lbus_speed != target_speed) { /* not target */ /* maybe retry */ do_retry = retry_count < pcie_retry; dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n", - pcie_target, do_retry ? ", retrying" : ""); + pcie_target, do_retry ? ", retrying" : ""); retry_count++; if (do_retry) { msleep(100); /* allow time to settle */ diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 191b260d173d..a483c0aad4cc 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -511,7 +511,7 @@ static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) sci = &dd->send_contexts[sw_index]; if (!sci->allocated) { dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", - __func__, sw_index, hw_context); + __func__, sw_index, hw_context); } sci->allocated = 0; dd->hw_to_sw[hw_context] = INVALID_SCI; @@ -627,7 +627,7 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) & SC(CREDIT_CTRL_THRESHOLD_MASK)) << SC(CREDIT_CTRL_THRESHOLD_SHIFT)); write_kctxt_csr(sc->dd, sc->hw_context, - SC(CREDIT_CTRL), sc->credit_ctrl); + SC(CREDIT_CTRL), sc->credit_ctrl); /* force a credit return on change to avoid a possible stall */ force_return = 1; @@ -765,9 +765,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, /* set the default partition key */ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), - (DEFAULT_PKEY & - SC(CHECK_PARTITION_KEY_VALUE_MASK)) - << SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); + (DEFAULT_PKEY & + SC(CHECK_PARTITION_KEY_VALUE_MASK)) << + SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); /* per context type checks */ if (type == SC_USER) { @@ -780,8 +780,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, /* set the send context check opcode mask and value */ write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), - ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | - ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); + ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) | + ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT))); /* set up credit return */ reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK); @@ -799,7 +799,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, thresh = sc_percent_to_threshold(sc, 50); } else if (type == SC_USER) { thresh = sc_percent_to_threshold(sc, - user_credit_return_threshold); + user_credit_return_threshold); } else { /* kernel */ thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize); } @@ -972,11 +972,11 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) if (loop > 500) { /* timed out - bounce the link */ dd_dev_err(dd, - "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", - __func__, sc->sw_index, - sc->hw_context, (u32)reg); + "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", + __func__, sc->sw_index, + sc->hw_context, (u32)reg); queue_work(dd->pport->hfi1_wq, - &dd->pport->link_bounce_work); + &dd->pport->link_bounce_work); break; } loop++; @@ -1022,7 +1022,7 @@ int sc_restart(struct send_context *sc) return -EINVAL; dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, - sc->hw_context); + sc->hw_context); /* * Step 1: Wait for the context to actually halt. @@ -1037,7 +1037,7 @@ int sc_restart(struct send_context *sc) break; if (loop > 100) { dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", - __func__, sc->sw_index, sc->hw_context); + __func__, sc->sw_index, sc->hw_context); return -ETIME; } loop++; @@ -1063,9 +1063,9 @@ int sc_restart(struct send_context *sc) break; if (loop > 100) { dd_dev_err(dd, - "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", - __func__, sc->sw_index, - sc->hw_context, count); + "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", + __func__, sc->sw_index, + sc->hw_context, count); } loop++; udelay(1); @@ -1178,18 +1178,18 @@ void pio_reset_all(struct hfi1_devdata *dd) if (ret == -EIO) { /* clear the error */ write_csr(dd, SEND_PIO_ERR_CLEAR, - SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); + SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK); } /* reset init all */ write_csr(dd, SEND_PIO_INIT_CTXT, - SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); + SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK); udelay(2); ret = pio_init_wait_progress(dd); if (ret < 0) { dd_dev_err(dd, - "PIO send context init %s while initializing all PIO blocks\n", - ret == -ETIMEDOUT ? "is stuck" : "had an error"); + "PIO send context init %s while initializing all PIO blocks\n", + ret == -ETIMEDOUT ? "is stuck" : "had an error"); } } @@ -1237,8 +1237,7 @@ int sc_enable(struct send_context *sc) */ reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); if (reg) - write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), - reg); + write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); /* * The HW PIO initialization engine can handle only one init @@ -1296,7 +1295,7 @@ void sc_return_credits(struct send_context *sc) /* a 0->1 transition schedules a credit return */ write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), - SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); + SC(CREDIT_FORCE_FORCE_RETURN_SMASK)); /* * Ensure that the write is flushed and the credit return is * scheduled. We care more about the 0 -> 1 transition. @@ -1322,7 +1321,7 @@ void sc_drop(struct send_context *sc) return; dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", - __func__, sc->sw_index, sc->hw_context); + __func__, sc->sw_index, sc->hw_context); } /* @@ -1472,7 +1471,7 @@ void sc_add_credit_return_intr(struct send_context *sc) if (sc->credit_intr_count == 0) { sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); write_kctxt_csr(sc->dd, sc->hw_context, - SC(CREDIT_CTRL), sc->credit_ctrl); + SC(CREDIT_CTRL), sc->credit_ctrl); } sc->credit_intr_count++; spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); @@ -1494,7 +1493,7 @@ void sc_del_credit_return_intr(struct send_context *sc) if (sc->credit_intr_count == 0) { sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); write_kctxt_csr(sc->dd, sc->hw_context, - SC(CREDIT_CTRL), sc->credit_ctrl); + SC(CREDIT_CTRL), sc->credit_ctrl); } spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); } @@ -1667,7 +1666,7 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) sw_index = dd->hw_to_sw[hw_context]; if (unlikely(sw_index >= dd->num_send_contexts)) { dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", - __func__, hw_context, sw_index); + __func__, hw_context, sw_index); goto done; } sc = dd->send_contexts[sw_index].sc; @@ -1680,8 +1679,8 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) sw_index = dd->hw_to_sw[gc]; if (unlikely(sw_index >= dd->num_send_contexts)) { dd_dev_err(dd, - "%s: invalid hw (%u) to sw (%u) mapping\n", - __func__, hw_context, sw_index); + "%s: invalid hw (%u) to sw (%u) mapping\n", + __func__, hw_context, sw_index); continue; } sc_release_update(dd->send_contexts[sw_index].sc); @@ -2009,8 +2008,8 @@ int init_credit_return(struct hfi1_devdata *dd) if (!dd->cr_base[i].va) { set_dev_node(&dd->pcidev->dev, dd->node); dd_dev_err(dd, - "Unable to allocate credit return DMA range for NUMA %d\n", - i); + "Unable to allocate credit return DMA range for NUMA %d\n", + i); ret = -ENOMEM; goto done; } @@ -2034,10 +2033,10 @@ void free_credit_return(struct hfi1_devdata *dd) for (i = 0; i < num_numa; i++) { if (dd->cr_base[i].va) { dma_free_coherent(&dd->pcidev->dev, - TXE_NUM_CONTEXTS - * sizeof(struct credit_return), - dd->cr_base[i].va, - dd->cr_base[i].pa); + TXE_NUM_CONTEXTS * + sizeof(struct credit_return), + dd->cr_base[i].va, + dd->cr_base[i].pa); } } kfree(dd->cr_base); diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h index 09a5eebf4b56..d80909a60df9 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/staging/rdma/hfi1/pio.h @@ -289,7 +289,7 @@ void sc_flush(struct send_context *sc); void sc_drop(struct send_context *sc); void sc_stop(struct send_context *sc, int bit); struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, - pio_release_cb cb, void *arg); + pio_release_cb cb, void *arg); void sc_release_update(struct send_context *sc); void sc_return_credits(struct send_context *sc); void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context); @@ -322,7 +322,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op); void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, const void *from, size_t count); void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, - const void *from, size_t nbytes); + const void *from, size_t nbytes); void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes); void seg_pio_copy_end(struct pio_buf *pbuf); diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c index 6f97d228563b..998e7bc89036 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/staging/rdma/hfi1/pio_copy.c @@ -200,7 +200,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc, * o nbytes must not span a QW boundary */ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, - unsigned int nbytes) + unsigned int nbytes) { unsigned long off; @@ -227,7 +227,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, * o nbytes may span a QW boundary */ static inline void read_extra_bytes(struct pio_buf *pbuf, - const void *from, unsigned int nbytes) + const void *from, unsigned int nbytes) { unsigned long off = (unsigned long)from & 0x7; unsigned int room, xbytes; @@ -366,7 +366,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n) * o from may _not_ be u64 aligned. */ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, - unsigned int nbytes) + unsigned int nbytes) { jcopy(&pbuf->carry.val8[0], from, nbytes); pbuf->carry_bytes = nbytes; @@ -381,7 +381,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, * o nbytes may span a QW boundary */ static inline void read_extra_bytes(struct pio_buf *pbuf, - const void *from, unsigned int nbytes) + const void *from, unsigned int nbytes) { jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes); pbuf->carry_bytes += nbytes; @@ -437,7 +437,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest) u64 zero = 0; jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero, - 8 - pbuf->carry_bytes); + 8 - pbuf->carry_bytes); writeq(pbuf->carry.val64, dest); return 1; } @@ -457,7 +457,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest) * @nbytes: bytes to copy */ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc, - const void *from, size_t nbytes) + const void *from, size_t nbytes) { void __iomem *dest = pbuf->start + SOP_DISTANCE; void __iomem *send = dest + PIO_BLOCK_SIZE; @@ -647,7 +647,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) * Must handle nbytes < 8. */ static void mid_copy_straight(struct pio_buf *pbuf, - const void *from, size_t nbytes) + const void *from, size_t nbytes) { void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64)); void __iomem *dend; /* 8-byte data end */ diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index bdb1504b2ade..c5e04b069ad6 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -468,7 +468,7 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len, if (port_num > dd->num_pports || port_num < 1) { dd_dev_info(dd, "%s: Invalid port number %d\n", - __func__, port_num); + __func__, port_num); ret = -EINVAL; goto set_zeroes; } diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 99584f7f5052..28ff638cd371 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1773,8 +1773,8 @@ static inline void rc_cancel_ack(struct rvt_qp *qp) * schedule a response to be sent. */ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data, - struct rvt_qp *qp, u32 opcode, u32 psn, int diff, - struct hfi1_ctxtdata *rcd) + struct rvt_qp *qp, u32 opcode, u32 psn, + int diff, struct hfi1_ctxtdata *rcd) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct rvt_ack_entry *e; diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 6f0005a93c44..e2c4f8288c3e 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -283,9 +283,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, guid)) goto err; - if (!gid_ok(&hdr->u.l.grh.sgid, - qp->alt_ah_attr.grh.dgid.global.subnet_prefix, - qp->alt_ah_attr.grh.dgid.global.interface_id)) + if (!gid_ok( + &hdr->u.l.grh.sgid, + qp->alt_ah_attr.grh.dgid.global.subnet_prefix, + qp->alt_ah_attr.grh.dgid.global.interface_id)) goto err; } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, @@ -317,9 +318,10 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr, if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix, guid)) goto err; - if (!gid_ok(&hdr->u.l.grh.sgid, - qp->remote_ah_attr.grh.dgid.global.subnet_prefix, - qp->remote_ah_attr.grh.dgid.global.interface_id)) + if (!gid_ok( + &hdr->u.l.grh.sgid, + qp->remote_ah_attr.grh.dgid.global.subnet_prefix, + qp->remote_ah_attr.grh.dgid.global.interface_id)) goto err; } if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 5f62d0229088..74086eabbb25 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -325,9 +325,9 @@ static void sdma_wait_for_packet_egress(struct sdma_engine *sde, if (lcnt++ > 500) { /* timed out - bounce the link */ dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", - __func__, sde->this_idx, (u32)reg); + __func__, sde->this_idx, (u32)reg); queue_work(dd->pport->hfi1_wq, - &dd->pport->link_bounce_work); + &dd->pport->link_bounce_work); break; } udelay(1); @@ -458,8 +458,8 @@ static void sdma_err_halt_wait(struct work_struct *work) break; if (time_after(jiffies, timeout)) { dd_dev_err(sde->dd, - "SDMA engine %d - timeout waiting for engine to halt\n", - sde->this_idx); + "SDMA engine %d - timeout waiting for engine to halt\n", + sde->this_idx); /* * Continue anyway. This could happen if there was * an uncorrectable error in the wrong spot. @@ -656,7 +656,7 @@ static void sdma_start_hw_clean_up(struct sdma_engine *sde) } static void sdma_set_state(struct sdma_engine *sde, - enum sdma_states next_state) + enum sdma_states next_state) { struct sdma_state *ss = &sde->state; const struct sdma_set_state_action *action = sdma_action_table; @@ -908,7 +908,7 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) /* newmap in hand, save old map */ spin_lock_irq(&dd->sde_map_lock); oldmap = rcu_dereference_protected(dd->sdma_map, - lockdep_is_held(&dd->sde_map_lock)); + lockdep_is_held(&dd->sde_map_lock)); /* publish newmap */ rcu_assign_pointer(dd->sdma_map, newmap); @@ -1006,16 +1006,16 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) return 0; } if (mod_num_sdma && - /* can't exceed chip support */ - mod_num_sdma <= dd->chip_sdma_engines && - /* count must be >= vls */ - mod_num_sdma >= num_vls) + /* can't exceed chip support */ + mod_num_sdma <= dd->chip_sdma_engines && + /* count must be >= vls */ + mod_num_sdma >= num_vls) num_engines = mod_num_sdma; dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines); dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", - dd->chip_sdma_mem_size); + dd->chip_sdma_mem_size); per_sdma_credits = dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE); @@ -1026,7 +1026,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) descq_cnt = sdma_get_descq_cnt(); dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", - num_engines, descq_cnt); + num_engines, descq_cnt); /* alloc memory for array of send engines */ dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL); @@ -1086,10 +1086,10 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) SDMA_DESC1_INT_REQ_FLAG; tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, - (unsigned long)sde); + (unsigned long)sde); tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, - (unsigned long)sde); + (unsigned long)sde); INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); INIT_WORK(&sde->flush_worker, sdma_field_flush); @@ -1240,7 +1240,7 @@ void sdma_exit(struct hfi1_devdata *dd) sde = &dd->per_sdma[this_idx]; if (!list_empty(&sde->dmawait)) dd_dev_err(dd, "sde %u: dmawait list not empty!\n", - sde->this_idx); + sde->this_idx); sdma_process_event(sde, sdma_event_e00_go_hw_down); del_timer_sync(&sde->err_progress_check_timer); @@ -1370,9 +1370,9 @@ retry: if (unlikely(!sane)) { dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", - sde->this_idx, - use_dmahead ? "dma" : "kreg", - hwhead, swhead, swtail, cnt); + sde->this_idx, + use_dmahead ? "dma" : "kreg", + hwhead, swhead, swtail, cnt); if (use_dmahead) { /* try one more time, using csr */ use_dmahead = 0; @@ -1550,10 +1550,10 @@ void sdma_engine_error(struct sdma_engine *sde, u64 status) __sdma_process_event(sde, sdma_event_e60_hw_halted); if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { dd_dev_err(sde->dd, - "SDMA (%u) engine error: 0x%llx state %s\n", - sde->this_idx, - (unsigned long long)status, - sdma_state_names[sde->state.current_state]); + "SDMA (%u) engine error: 0x%llx state %s\n", + sde->this_idx, + (unsigned long long)status, + sdma_state_names[sde->state.current_state]); dump_sdma_state(sde); } write_sequnlock(&sde->head_lock); @@ -1597,8 +1597,8 @@ static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) if (op & SDMA_SENDCTRL_OP_CLEANUP) write_sde_csr(sde, SD(CTRL), - sde->p_senddmactrl | - SD(CTRL_SDMA_CLEANUP_SMASK)); + sde->p_senddmactrl | + SD(CTRL_SDMA_CLEANUP_SMASK)); else write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); @@ -1622,12 +1622,10 @@ static void sdma_setlengen(struct sdma_engine *sde) * generation counter. */ write_sde_csr(sde, SD(LEN_GEN), - (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT) - ); + (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); write_sde_csr(sde, SD(LEN_GEN), - ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) - | (4ULL << SD(LEN_GEN_GENERATION_SHIFT)) - ); + ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | + (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); } static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) @@ -1707,17 +1705,16 @@ static void init_sdma_regs( write_sde_csr(sde, SD(DESC_CNT), 0); write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); write_sde_csr(sde, SD(MEMORY), - ((u64)credits << - SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | - ((u64)(credits * sde->this_idx) << - SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); + ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | + ((u64)(credits * sde->this_idx) << + SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); set_sdma_integrity(sde); opmask = OPCODE_CHECK_MASK_DISABLED; opval = OPCODE_CHECK_VAL_DISABLED; write_sde_csr(sde, SD(CHECK_OPCODE), - (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | - (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); + (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | + (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); } #ifdef CONFIG_SDMA_VERBOSITY @@ -1796,12 +1793,9 @@ static void dump_sdma_state(struct sdma_engine *sde) descq = sde->descq; dd_dev_err(sde->dd, - "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", - sde->this_idx, - head, - tail, - cnt, - !list_empty(&sde->flushlist)); + "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", + sde->this_idx, head, tail, cnt, + !list_empty(&sde->flushlist)); /* print info for each entry in the descriptor queue */ while (head != tail) { @@ -1822,20 +1816,23 @@ static void dump_sdma_state(struct sdma_engine *sde) len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; dd_dev_err(sde->dd, - "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", - head, flags, addr, gen, len); + "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", + head, flags, addr, gen, len); dd_dev_err(sde->dd, - "\tdesc0:0x%016llx desc1 0x%016llx\n", - desc[0], desc[1]); + "\tdesc0:0x%016llx desc1 0x%016llx\n", + desc[0], desc[1]); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) dd_dev_err(sde->dd, - "\taidx: %u amode: %u alen: %u\n", - (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) - >> SDMA_DESC1_HEADER_INDEX_SHIFT), - (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) - >> SDMA_DESC1_HEADER_MODE_SHIFT), - (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK) - >> SDMA_DESC1_HEADER_DWS_SHIFT)); + "\taidx: %u amode: %u alen: %u\n", + (u8)((desc[1] & + SDMA_DESC1_HEADER_INDEX_SMASK) >> + SDMA_DESC1_HEADER_INDEX_SHIFT), + (u8)((desc[1] & + SDMA_DESC1_HEADER_MODE_SMASK) >> + SDMA_DESC1_HEADER_MODE_SHIFT), + (u8)((desc[1] & + SDMA_DESC1_HEADER_DWS_SMASK) >> + SDMA_DESC1_HEADER_DWS_SHIFT)); head++; head &= sde->sdma_mask; } @@ -1862,29 +1859,26 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) head = sde->descq_head & sde->sdma_mask; tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; seq_printf(s, SDE_FMT, sde->this_idx, - sde->cpu, - sdma_state_name(sde->state.current_state), - (unsigned long long)read_sde_csr(sde, SD(CTRL)), - (unsigned long long)read_sde_csr(sde, SD(STATUS)), - (unsigned long long)read_sde_csr(sde, - SD(ENG_ERR_STATUS)), - (unsigned long long)read_sde_csr(sde, SD(TAIL)), - tail, - (unsigned long long)read_sde_csr(sde, SD(HEAD)), - head, - (unsigned long long)le64_to_cpu(*sde->head_dma), - (unsigned long long)read_sde_csr(sde, SD(MEMORY)), - (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), - (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), - (unsigned long long)sde->last_status, - (unsigned long long)sde->ahg_bits, - sde->tx_tail, - sde->tx_head, - sde->descq_tail, - sde->descq_head, + sde->cpu, + sdma_state_name(sde->state.current_state), + (unsigned long long)read_sde_csr(sde, SD(CTRL)), + (unsigned long long)read_sde_csr(sde, SD(STATUS)), + (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), + (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, + (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, + (unsigned long long)le64_to_cpu(*sde->head_dma), + (unsigned long long)read_sde_csr(sde, SD(MEMORY)), + (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), + (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), + (unsigned long long)sde->last_status, + (unsigned long long)sde->ahg_bits, + sde->tx_tail, + sde->tx_head, + sde->descq_tail, + sde->descq_head, !list_empty(&sde->flushlist), - sde->descq_full_count, - (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); + sde->descq_full_count, + (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); /* print info for each entry in the descriptor queue */ while (head != tail) { @@ -1905,14 +1899,16 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) & SDMA_DESC0_BYTE_COUNT_MASK; seq_printf(s, - "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", - head, flags, addr, gen, len); + "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", + head, flags, addr, gen, len); if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", - (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK) - >> SDMA_DESC1_HEADER_INDEX_SHIFT), - (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK) - >> SDMA_DESC1_HEADER_MODE_SHIFT)); + (u8)((desc[1] & + SDMA_DESC1_HEADER_INDEX_SMASK) >> + SDMA_DESC1_HEADER_INDEX_SHIFT), + (u8)((desc[1] & + SDMA_DESC1_HEADER_MODE_SMASK) >> + SDMA_DESC1_HEADER_MODE_SHIFT)); head = (head + 1) & sde->sdma_mask; } } @@ -2108,9 +2104,8 @@ nodesc: * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state */ -int sdma_send_txlist(struct sdma_engine *sde, - struct iowait *wait, - struct list_head *tx_list) +int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, + struct list_head *tx_list) { struct sdma_txreq *tx, *tx_next; int ret = 0; @@ -2178,8 +2173,7 @@ nodesc: goto update_tail; } -static void sdma_process_event(struct sdma_engine *sde, - enum sdma_events event) +static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) { unsigned long flags; @@ -2196,7 +2190,7 @@ static void sdma_process_event(struct sdma_engine *sde, } static void __sdma_process_event(struct sdma_engine *sde, - enum sdma_events event) + enum sdma_events event) { struct sdma_state *ss = &sde->state; int need_progress = 0; @@ -2227,7 +2221,7 @@ static void __sdma_process_event(struct sdma_engine *sde, /* This reference means the state machine is started */ sdma_get(&sde->state); sdma_set_state(sde, - sdma_state_s10_hw_start_up_halt_wait); + sdma_state_s10_hw_start_up_halt_wait); break; case sdma_event_e15_hw_halt_done: break; @@ -2265,7 +2259,7 @@ static void __sdma_process_event(struct sdma_engine *sde, break; case sdma_event_e15_hw_halt_done: sdma_set_state(sde, - sdma_state_s15_hw_start_up_clean_wait); + sdma_state_s15_hw_start_up_clean_wait); sdma_start_hw_clean_up(sde); break; case sdma_event_e25_hw_clean_up_done: @@ -3003,7 +2997,8 @@ void sdma_freeze(struct hfi1_devdata *dd) * continuing. */ ret = wait_event_interruptible(dd->sdma_unfreeze_wq, - atomic_read(&dd->sdma_unfreeze_count) <= 0); + atomic_read(&dd->sdma_unfreeze_count) <= + 0); /* interrupted or count is negative, then unloading - just exit */ if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) return; @@ -3040,7 +3035,7 @@ void sdma_unfreeze(struct hfi1_devdata *dd) /* tell all engines start freeze clean up */ for (i = 0; i < dd->num_sdma; i++) sdma_process_event(&dd->per_sdma[i], - sdma_event_e82_hw_unfreeze); + sdma_event_e82_hw_unfreeze); } /** @@ -3054,5 +3049,6 @@ void _sdma_engine_progress_schedule( trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); /* assume we have selected a good cpu */ write_csr(sde->dd, - CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), sde->progress_mask); + CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), + sde->progress_mask); } diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index fe232c105742..3e3f1803a251 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -61,8 +61,8 @@ * Congestion control table size followed by table entries */ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) { int ret; struct hfi1_pportdata *ppd = @@ -110,8 +110,8 @@ static struct bin_attribute cc_table_bin_attr = { * trigger threshold and the minimum injection rate delay. */ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t count) + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t count) { int ret; struct hfi1_pportdata *ppd = @@ -550,7 +550,7 @@ static ssize_t show_nctxts(struct device *device, } static ssize_t show_nfreectxts(struct device *device, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct hfi1_ibdev *dev = container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); @@ -660,8 +660,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, if (!port_num || port_num > dd->num_pports) { dd_dev_err(dd, - "Skipping infiniband class with invalid port %u\n", - port_num); + "Skipping infiniband class with invalid port %u\n", + port_num); return -ENODEV; } ppd = &dd->pport[port_num - 1]; @@ -700,34 +700,32 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, kobj, "CCMgtA"); if (ret) { dd_dev_err(dd, - "Skipping Congestion Control sysfs info, (err %d) port %u\n", - ret, port_num); + "Skipping Congestion Control sysfs info, (err %d) port %u\n", + ret, port_num); goto bail_vl2mtu; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); - ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, - &cc_setting_bin_attr); + ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr); if (ret) { dd_dev_err(dd, - "Skipping Congestion Control setting sysfs info, (err %d) port %u\n", - ret, port_num); + "Skipping Congestion Control setting sysfs info, (err %d) port %u\n", + ret, port_num); goto bail_cc; } - ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, - &cc_table_bin_attr); + ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr); if (ret) { dd_dev_err(dd, - "Skipping Congestion Control table sysfs info, (err %d) port %u\n", - ret, port_num); + "Skipping Congestion Control table sysfs info, (err %d) port %u\n", + ret, port_num); goto bail_cc_entry_bin; } dd_dev_info(dd, - "IB%u: Congestion Control Agent enabled for port %d\n", - dd->unit, port_num); + "IB%u: Congestion Control Agent enabled for port %d\n", + dd->unit, port_num); return 0; diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c index 923ca550318a..99fd01751492 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/staging/rdma/hfi1/trace.c @@ -109,17 +109,17 @@ const char *parse_everbs_hdrs( case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE): trace_seq_printf(p, IMM_PRN, - be32_to_cpu(eh->imm_data)); + be32_to_cpu(eh->imm_data)); break; /* reth + imm */ case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE): trace_seq_printf(p, RETH_PRN " " IMM_PRN, - (unsigned long long)ib_u64_get( - (__be32 *)&eh->rc.reth.vaddr), - be32_to_cpu(eh->rc.reth.rkey), - be32_to_cpu(eh->rc.reth.length), - be32_to_cpu(eh->rc.imm_data)); + (unsigned long long)ib_u64_get( + (__be32 *)&eh->rc.reth.vaddr), + be32_to_cpu(eh->rc.reth.rkey), + be32_to_cpu(eh->rc.reth.length), + be32_to_cpu(eh->rc.imm_data)); break; /* reth */ case OP(RC, RDMA_READ_REQUEST): @@ -128,10 +128,10 @@ const char *parse_everbs_hdrs( case OP(RC, RDMA_WRITE_ONLY): case OP(UC, RDMA_WRITE_ONLY): trace_seq_printf(p, RETH_PRN, - (unsigned long long)ib_u64_get( - (__be32 *)&eh->rc.reth.vaddr), - be32_to_cpu(eh->rc.reth.rkey), - be32_to_cpu(eh->rc.reth.length)); + (unsigned long long)ib_u64_get( + (__be32 *)&eh->rc.reth.vaddr), + be32_to_cpu(eh->rc.reth.rkey), + be32_to_cpu(eh->rc.reth.length)); break; case OP(RC, RDMA_READ_RESPONSE_FIRST): case OP(RC, RDMA_READ_RESPONSE_LAST): @@ -154,19 +154,20 @@ const char *parse_everbs_hdrs( case OP(RC, COMPARE_SWAP): case OP(RC, FETCH_ADD): trace_seq_printf(p, ATOMICETH_PRN, - (unsigned long long)ib_u64_get(eh->atomic_eth.vaddr), - eh->atomic_eth.rkey, - (unsigned long long)ib_u64_get( - (__be32 *)&eh->atomic_eth.swap_data), - (unsigned long long)ib_u64_get( + (unsigned long long)ib_u64_get( + eh->atomic_eth.vaddr), + eh->atomic_eth.rkey, + (unsigned long long)ib_u64_get( + (__be32 *)&eh->atomic_eth.swap_data), + (unsigned long long)ib_u64_get( (__be32 *)&eh->atomic_eth.compare_data)); break; /* deth */ case OP(UD, SEND_ONLY): case OP(UD, SEND_ONLY_WITH_IMMEDIATE): trace_seq_printf(p, DETH_PRN, - be32_to_cpu(eh->ud.deth[0]), - be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); + be32_to_cpu(eh->ud.deth[0]), + be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK); break; } trace_seq_putc(p, 0); @@ -187,12 +188,12 @@ const char *parse_sdma_flags( trace_seq_printf(p, "%s", flags); if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) trace_seq_printf(p, " amode:%u aidx:%u alen:%u", - (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) - & SDMA_DESC1_HEADER_MODE_MASK), - (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) - & SDMA_DESC1_HEADER_INDEX_MASK), - (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) - & SDMA_DESC1_HEADER_DWS_MASK)); + (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) & + SDMA_DESC1_HEADER_MODE_MASK), + (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) & + SDMA_DESC1_HEADER_INDEX_MASK), + (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) & + SDMA_DESC1_HEADER_DWS_MASK)); return ret; } diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index a13215ffdddd..dfa996715736 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -76,81 +76,77 @@ __print_symbolic(etype, \ #define TRACE_SYSTEM hfi1_rx TRACE_EVENT(hfi1_rcvhdr, - TP_PROTO(struct hfi1_devdata *dd, - u64 eflags, - u32 ctxt, - u32 etype, - u32 hlen, - u32 tlen, - u32 updegr, - u32 etail), - TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(u64, eflags) - __field(u32, ctxt) - __field(u32, etype) - __field(u32, hlen) - __field(u32, tlen) - __field(u32, updegr) - __field(u32, etail) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - __entry->eflags = eflags; - __entry->ctxt = ctxt; - __entry->etype = etype; - __entry->hlen = hlen; - __entry->tlen = tlen; - __entry->updegr = updegr; - __entry->etail = etail; - ), - TP_printk( -"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d", - __get_str(dev), - __entry->ctxt, - __entry->eflags, - __entry->etype, show_packettype(__entry->etype), - __entry->hlen, - __entry->tlen, - __entry->updegr, - __entry->etail - ) + TP_PROTO(struct hfi1_devdata *dd, + u64 eflags, + u32 ctxt, + u32 etype, + u32 hlen, + u32 tlen, + u32 updegr, + u32 etail + ), + TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail), + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __field(u64, eflags) + __field(u32, ctxt) + __field(u32, etype) + __field(u32, hlen) + __field(u32, tlen) + __field(u32, updegr) + __field(u32, etail) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + __entry->eflags = eflags; + __entry->ctxt = ctxt; + __entry->etype = etype; + __entry->hlen = hlen; + __entry->tlen = tlen; + __entry->updegr = updegr; + __entry->etail = etail; + ), + TP_printk( + "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d", + __get_str(dev), + __entry->ctxt, + __entry->eflags, + __entry->etype, show_packettype(__entry->etype), + __entry->hlen, + __entry->tlen, + __entry->updegr, + __entry->etail + ) ); TRACE_EVENT(hfi1_receive_interrupt, - TP_PROTO(struct hfi1_devdata *dd, u32 ctxt), - TP_ARGS(dd, ctxt), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(u32, ctxt) - __field(u8, slow_path) - __field(u8, dma_rtail) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - __entry->ctxt = ctxt; - if (dd->rcd[ctxt]->do_interrupt == - &handle_receive_interrupt) { - __entry->slow_path = 1; - __entry->dma_rtail = 0xFF; - } else if (dd->rcd[ctxt]->do_interrupt == - &handle_receive_interrupt_dma_rtail){ - __entry->dma_rtail = 1; - __entry->slow_path = 0; - } else if (dd->rcd[ctxt]->do_interrupt == - &handle_receive_interrupt_nodma_rtail) { - __entry->dma_rtail = 0; - __entry->slow_path = 0; - } - ), - TP_printk( - "[%s] ctxt %d SlowPath: %d DmaRtail: %d", - __get_str(dev), - __entry->ctxt, - __entry->slow_path, - __entry->dma_rtail - ) + TP_PROTO(struct hfi1_devdata *dd, u32 ctxt), + TP_ARGS(dd, ctxt), + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __field(u32, ctxt) + __field(u8, slow_path) + __field(u8, dma_rtail) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + __entry->ctxt = ctxt; + if (dd->rcd[ctxt]->do_interrupt == + &handle_receive_interrupt) { + __entry->slow_path = 1; + __entry->dma_rtail = 0xFF; + } else if (dd->rcd[ctxt]->do_interrupt == + &handle_receive_interrupt_dma_rtail){ + __entry->dma_rtail = 1; + __entry->slow_path = 0; + } else if (dd->rcd[ctxt]->do_interrupt == + &handle_receive_interrupt_nodma_rtail) { + __entry->dma_rtail = 0; + __entry->slow_path = 0; + } + ), + TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d", + __get_str(dev), + __entry->ctxt, + __entry->slow_path, + __entry->dma_rtail + ) ); TRACE_EVENT(hfi1_exp_tid_reg, @@ -281,78 +277,72 @@ TRACE_EVENT(hfi1_mmu_invalidate, #define TRACE_SYSTEM hfi1_tx TRACE_EVENT(hfi1_piofree, - TP_PROTO(struct send_context *sc, int extra), - TP_ARGS(sc, extra), - TP_STRUCT__entry( - DD_DEV_ENTRY(sc->dd) - __field(u32, sw_index) - __field(u32, hw_context) - __field(int, extra) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sc->dd); - __entry->sw_index = sc->sw_index; - __entry->hw_context = sc->hw_context; - __entry->extra = extra; - ), - TP_printk( - "[%s] ctxt %u(%u) extra %d", - __get_str(dev), - __entry->sw_index, - __entry->hw_context, - __entry->extra - ) + TP_PROTO(struct send_context *sc, int extra), + TP_ARGS(sc, extra), + TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd) + __field(u32, sw_index) + __field(u32, hw_context) + __field(int, extra) + ), + TP_fast_assign(DD_DEV_ASSIGN(sc->dd); + __entry->sw_index = sc->sw_index; + __entry->hw_context = sc->hw_context; + __entry->extra = extra; + ), + TP_printk("[%s] ctxt %u(%u) extra %d", + __get_str(dev), + __entry->sw_index, + __entry->hw_context, + __entry->extra + ) ); TRACE_EVENT(hfi1_wantpiointr, - TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl), - TP_ARGS(sc, needint, credit_ctrl), - TP_STRUCT__entry( - DD_DEV_ENTRY(sc->dd) - __field(u32, sw_index) - __field(u32, hw_context) - __field(u32, needint) - __field(u64, credit_ctrl) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sc->dd); - __entry->sw_index = sc->sw_index; - __entry->hw_context = sc->hw_context; - __entry->needint = needint; - __entry->credit_ctrl = credit_ctrl; - ), - TP_printk( - "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx", - __get_str(dev), - __entry->sw_index, - __entry->hw_context, - __entry->needint, - (unsigned long long)__entry->credit_ctrl - ) + TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl), + TP_ARGS(sc, needint, credit_ctrl), + TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd) + __field(u32, sw_index) + __field(u32, hw_context) + __field(u32, needint) + __field(u64, credit_ctrl) + ), + TP_fast_assign(DD_DEV_ASSIGN(sc->dd); + __entry->sw_index = sc->sw_index; + __entry->hw_context = sc->hw_context; + __entry->needint = needint; + __entry->credit_ctrl = credit_ctrl; + ), + TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx", + __get_str(dev), + __entry->sw_index, + __entry->hw_context, + __entry->needint, + (unsigned long long)__entry->credit_ctrl + ) ); DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template, - TP_PROTO(struct rvt_qp *qp, u32 flags), - TP_ARGS(qp, flags), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) - __field(u32, qpn) - __field(u32, flags) - __field(u32, s_flags) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) - __entry->flags = flags; - __entry->qpn = qp->ibqp.qp_num; - __entry->s_flags = qp->s_flags; - ), - TP_printk( - "[%s] qpn 0x%x flags 0x%x s_flags 0x%x", - __get_str(dev), - __entry->qpn, - __entry->flags, - __entry->s_flags - ) + TP_PROTO(struct rvt_qp *qp, u32 flags), + TP_ARGS(qp, flags), + TP_STRUCT__entry( + DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) + __field(u32, qpn) + __field(u32, flags) + __field(u32, s_flags) + ), + TP_fast_assign( + DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) + __entry->flags = flags; + __entry->qpn = qp->ibqp.qp_num; + __entry->s_flags = qp->s_flags; + ), + TP_printk( + "[%s] qpn 0x%x flags 0x%x s_flags 0x%x", + __get_str(dev), + __entry->qpn, + __entry->flags, + __entry->s_flags + ) ); DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup, @@ -367,16 +357,11 @@ DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep, #define TRACE_SYSTEM hfi1_ibhdrs u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr); -const char *parse_everbs_hdrs( - struct trace_seq *p, - u8 opcode, - void *ehdrs); +const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs); #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs) -const char *parse_sdma_flags( - struct trace_seq *p, - u64 desc0, u64 desc1); +const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1); #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1) @@ -433,117 +418,115 @@ __print_symbolic(opcode, \ #define EHDR_PRN "%s" DECLARE_EVENT_CLASS(hfi1_ibhdr_template, - TP_PROTO(struct hfi1_devdata *dd, - struct hfi1_ib_header *hdr), - TP_ARGS(dd, hdr), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - /* LRH */ - __field(u8, vl) - __field(u8, lver) - __field(u8, sl) - __field(u8, lnh) - __field(u16, dlid) - __field(u16, len) - __field(u16, slid) - /* BTH */ - __field(u8, opcode) - __field(u8, se) - __field(u8, m) - __field(u8, pad) - __field(u8, tver) - __field(u16, pkey) - __field(u8, f) - __field(u8, b) - __field(u32, qpn) - __field(u8, a) - __field(u32, psn) - /* extended headers */ - __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr)) - ), - TP_fast_assign( - struct hfi1_other_headers *ohdr; - - DD_DEV_ASSIGN(dd); - /* LRH */ - __entry->vl = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 12); - __entry->lver = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf; - __entry->sl = - (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; - __entry->lnh = - (u8)(be16_to_cpu(hdr->lrh[0]) & 3); - __entry->dlid = - be16_to_cpu(hdr->lrh[1]); - /* allow for larger len */ - __entry->len = - be16_to_cpu(hdr->lrh[2]); - __entry->slid = - be16_to_cpu(hdr->lrh[3]); - /* BTH */ - if (__entry->lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else - ohdr = &hdr->u.l.oth; - __entry->opcode = - (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; - __entry->se = - (be32_to_cpu(ohdr->bth[0]) >> 23) & 1; - __entry->m = - (be32_to_cpu(ohdr->bth[0]) >> 22) & 1; - __entry->pad = - (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; - __entry->tver = - (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf; - __entry->pkey = - be32_to_cpu(ohdr->bth[0]) & 0xffff; - __entry->f = - (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) - & HFI1_FECN_MASK; - __entry->b = - (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) - & HFI1_BECN_MASK; - __entry->qpn = - be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; - __entry->a = - (be32_to_cpu(ohdr->bth[2]) >> 31) & 1; - /* allow for larger PSN */ - __entry->psn = - be32_to_cpu(ohdr->bth[2]) & 0x7fffffff; - /* extended headers */ - memcpy( - __get_dynamic_array(ehdrs), - &ohdr->u, - ibhdr_exhdr_len(hdr)); - ), - TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN, - __get_str(dev), - /* LRH */ - __entry->vl, - __entry->lver, - __entry->sl, - __entry->lnh, show_lnh(__entry->lnh), - __entry->dlid, - __entry->len, - __entry->slid, - /* BTH */ - __entry->opcode, show_ib_opcode(__entry->opcode), - __entry->se, - __entry->m, - __entry->pad, - __entry->tver, - __entry->pkey, - __entry->f, - __entry->b, - __entry->qpn, - __entry->a, - __entry->psn, - /* extended headers */ - __parse_ib_ehdrs( - __entry->opcode, - (void *)__get_dynamic_array(ehdrs)) - ) + TP_PROTO(struct hfi1_devdata *dd, + struct hfi1_ib_header *hdr), + TP_ARGS(dd, hdr), + TP_STRUCT__entry( + DD_DEV_ENTRY(dd) + /* LRH */ + __field(u8, vl) + __field(u8, lver) + __field(u8, sl) + __field(u8, lnh) + __field(u16, dlid) + __field(u16, len) + __field(u16, slid) + /* BTH */ + __field(u8, opcode) + __field(u8, se) + __field(u8, m) + __field(u8, pad) + __field(u8, tver) + __field(u16, pkey) + __field(u8, f) + __field(u8, b) + __field(u32, qpn) + __field(u8, a) + __field(u32, psn) + /* extended headers */ + __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr)) + ), + TP_fast_assign( + struct hfi1_other_headers *ohdr; + + DD_DEV_ASSIGN(dd); + /* LRH */ + __entry->vl = + (u8)(be16_to_cpu(hdr->lrh[0]) >> 12); + __entry->lver = + (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf; + __entry->sl = + (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; + __entry->lnh = + (u8)(be16_to_cpu(hdr->lrh[0]) & 3); + __entry->dlid = + be16_to_cpu(hdr->lrh[1]); + /* allow for larger len */ + __entry->len = + be16_to_cpu(hdr->lrh[2]); + __entry->slid = + be16_to_cpu(hdr->lrh[3]); + /* BTH */ + if (__entry->lnh == HFI1_LRH_BTH) + ohdr = &hdr->u.oth; + else + ohdr = &hdr->u.l.oth; + __entry->opcode = + (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; + __entry->se = + (be32_to_cpu(ohdr->bth[0]) >> 23) & 1; + __entry->m = + (be32_to_cpu(ohdr->bth[0]) >> 22) & 1; + __entry->pad = + (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; + __entry->tver = + (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf; + __entry->pkey = + be32_to_cpu(ohdr->bth[0]) & 0xffff; + __entry->f = + (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) & + HFI1_FECN_MASK; + __entry->b = + (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) & + HFI1_BECN_MASK; + __entry->qpn = + be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; + __entry->a = + (be32_to_cpu(ohdr->bth[2]) >> 31) & 1; + /* allow for larger PSN */ + __entry->psn = + be32_to_cpu(ohdr->bth[2]) & 0x7fffffff; + /* extended headers */ + memcpy(__get_dynamic_array(ehdrs), &ohdr->u, + ibhdr_exhdr_len(hdr)); + ), + TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN, + __get_str(dev), + /* LRH */ + __entry->vl, + __entry->lver, + __entry->sl, + __entry->lnh, show_lnh(__entry->lnh), + __entry->dlid, + __entry->len, + __entry->slid, + /* BTH */ + __entry->opcode, show_ib_opcode(__entry->opcode), + __entry->se, + __entry->m, + __entry->pad, + __entry->tver, + __entry->pkey, + __entry->f, + __entry->b, + __entry->qpn, + __entry->a, + __entry->psn, + /* extended headers */ + __parse_ib_ehdrs( + __entry->opcode, + (void *)__get_dynamic_array(ehdrs)) + ) ); DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr, @@ -562,13 +545,13 @@ DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr, #define TRACE_SYSTEM hfi1_snoop TRACE_EVENT(snoop_capture, - TP_PROTO(struct hfi1_devdata *dd, - int hdr_len, - struct hfi1_ib_header *hdr, - int data_len, - void *data), - TP_ARGS(dd, hdr_len, hdr, data_len, data), - TP_STRUCT__entry( + TP_PROTO(struct hfi1_devdata *dd, + int hdr_len, + struct hfi1_ib_header *hdr, + int data_len, + void *data), + TP_ARGS(dd, hdr_len, hdr, data_len, data), + TP_STRUCT__entry( DD_DEV_ENTRY(dd) __field(u16, slid) __field(u16, dlid) @@ -581,8 +564,8 @@ TRACE_EVENT(snoop_capture, __field(u8, lnh) __dynamic_array(u8, raw_hdr, hdr_len) __dynamic_array(u8, raw_pkt, data_len) - ), - TP_fast_assign( + ), + TP_fast_assign( struct hfi1_other_headers *ohdr; __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); @@ -601,8 +584,9 @@ TRACE_EVENT(snoop_capture, __entry->data_len = data_len; memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); memcpy(__get_dynamic_array(raw_pkt), data, data_len); - ), - TP_printk("[%s] " SNOOP_PRN, + ), + TP_printk( + "[%s] " SNOOP_PRN, __get_str(dev), __entry->slid, __entry->dlid, @@ -613,7 +597,7 @@ TRACE_EVENT(snoop_capture, __entry->pkey, __entry->hdr_len, __entry->data_len - ) + ) ); #undef TRACE_SYSTEM @@ -625,41 +609,39 @@ TRACE_EVENT(snoop_capture, TRACE_EVENT(hfi1_uctxtdata, TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt), TP_ARGS(dd, uctxt), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(unsigned, ctxt) - __field(u32, credits) - __field(u64, hw_free) - __field(u64, piobase) - __field(u16, rcvhdrq_cnt) - __field(u64, rcvhdrq_phys) - __field(u32, eager_cnt) - __field(u64, rcvegr_phys) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - __entry->ctxt = uctxt->ctxt; - __entry->credits = uctxt->sc->credits; - __entry->hw_free = (u64)uctxt->sc->hw_free; - __entry->piobase = (u64)uctxt->sc->base_addr; - __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt; - __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys; - __entry->eager_cnt = uctxt->egrbufs.alloced; - __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys; - ), - TP_printk( - "[%s] ctxt %u " UCTXT_FMT, - __get_str(dev), - __entry->ctxt, - __entry->credits, - __entry->hw_free, - __entry->piobase, - __entry->rcvhdrq_cnt, - __entry->rcvhdrq_phys, - __entry->eager_cnt, - __entry->rcvegr_phys - ) - ); + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __field(unsigned, ctxt) + __field(u32, credits) + __field(u64, hw_free) + __field(u64, piobase) + __field(u16, rcvhdrq_cnt) + __field(u64, rcvhdrq_phys) + __field(u32, eager_cnt) + __field(u64, rcvegr_phys) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + __entry->ctxt = uctxt->ctxt; + __entry->credits = uctxt->sc->credits; + __entry->hw_free = (u64)uctxt->sc->hw_free; + __entry->piobase = (u64)uctxt->sc->base_addr; + __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt; + __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys; + __entry->eager_cnt = uctxt->egrbufs.alloced; + __entry->rcvegr_phys = + uctxt->egrbufs.rcvtids[0].phys; + ), + TP_printk("[%s] ctxt %u " UCTXT_FMT, + __get_str(dev), + __entry->ctxt, + __entry->credits, + __entry->hw_free, + __entry->piobase, + __entry->rcvhdrq_cnt, + __entry->rcvhdrq_phys, + __entry->eager_cnt, + __entry->rcvegr_phys + ) +); #define CINFO_FMT \ "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u" @@ -667,38 +649,35 @@ TRACE_EVENT(hfi1_ctxt_info, TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt, struct hfi1_ctxt_info cinfo), TP_ARGS(dd, ctxt, subctxt, cinfo), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(unsigned, ctxt) - __field(unsigned, subctxt) - __field(u16, egrtids) - __field(u16, rcvhdrq_cnt) - __field(u16, rcvhdrq_size) - __field(u16, sdma_ring_size) - __field(u32, rcvegr_size) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - __entry->ctxt = ctxt; - __entry->subctxt = subctxt; - __entry->egrtids = cinfo.egrtids; - __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt; - __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize; - __entry->sdma_ring_size = cinfo.sdma_ring_size; - __entry->rcvegr_size = cinfo.rcvegr_size; - ), - TP_printk( - "[%s] ctxt %u:%u " CINFO_FMT, - __get_str(dev), - __entry->ctxt, - __entry->subctxt, - __entry->egrtids, - __entry->rcvegr_size, - __entry->rcvhdrq_cnt, - __entry->rcvhdrq_size, - __entry->sdma_ring_size - ) - ); + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __field(unsigned, ctxt) + __field(unsigned, subctxt) + __field(u16, egrtids) + __field(u16, rcvhdrq_cnt) + __field(u16, rcvhdrq_size) + __field(u16, sdma_ring_size) + __field(u32, rcvegr_size) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + __entry->ctxt = ctxt; + __entry->subctxt = subctxt; + __entry->egrtids = cinfo.egrtids; + __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt; + __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize; + __entry->sdma_ring_size = cinfo.sdma_ring_size; + __entry->rcvegr_size = cinfo.rcvegr_size; + ), + TP_printk("[%s] ctxt %u:%u " CINFO_FMT, + __get_str(dev), + __entry->ctxt, + __entry->subctxt, + __entry->egrtids, + __entry->rcvegr_size, + __entry->rcvhdrq_cnt, + __entry->rcvhdrq_size, + __entry->sdma_ring_size + ) +); #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_sma @@ -712,49 +691,46 @@ TRACE_EVENT(hfi1_ctxt_info, ) DECLARE_EVENT_CLASS(hfi1_bct_template, - TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc), - TP_ARGS(dd, bc), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __dynamic_array(u8, bct, sizeof(*bc)) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - memcpy( - __get_dynamic_array(bct), - bc, - sizeof(*bc)); - ), - TP_printk(BCT_FORMAT, - BCT(overall_shared_limit), - - BCT(vl[0].dedicated), - BCT(vl[0].shared), - - BCT(vl[1].dedicated), - BCT(vl[1].shared), - - BCT(vl[2].dedicated), - BCT(vl[2].shared), - - BCT(vl[3].dedicated), - BCT(vl[3].shared), - - BCT(vl[4].dedicated), - BCT(vl[4].shared), - - BCT(vl[5].dedicated), - BCT(vl[5].shared), - - BCT(vl[6].dedicated), - BCT(vl[6].shared), - - BCT(vl[7].dedicated), - BCT(vl[7].shared), - - BCT(vl[15].dedicated), - BCT(vl[15].shared) - ) + TP_PROTO(struct hfi1_devdata *dd, + struct buffer_control *bc), + TP_ARGS(dd, bc), + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __dynamic_array(u8, bct, sizeof(*bc)) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + memcpy(__get_dynamic_array(bct), bc, + sizeof(*bc)); + ), + TP_printk(BCT_FORMAT, + BCT(overall_shared_limit), + + BCT(vl[0].dedicated), + BCT(vl[0].shared), + + BCT(vl[1].dedicated), + BCT(vl[1].shared), + + BCT(vl[2].dedicated), + BCT(vl[2].shared), + + BCT(vl[3].dedicated), + BCT(vl[3].shared), + + BCT(vl[4].dedicated), + BCT(vl[4].shared), + + BCT(vl[5].dedicated), + BCT(vl[5].shared), + + BCT(vl[6].dedicated), + BCT(vl[6].shared), + + BCT(vl[7].dedicated), + BCT(vl[7].shared), + + BCT(vl[15].dedicated), + BCT(vl[15].shared) + ) ); DEFINE_EVENT(hfi1_bct_template, bct_set, @@ -769,252 +745,209 @@ DEFINE_EVENT(hfi1_bct_template, bct_get, #define TRACE_SYSTEM hfi1_sdma TRACE_EVENT(hfi1_sdma_descriptor, - TP_PROTO( - struct sdma_engine *sde, - u64 desc0, - u64 desc1, - u16 e, - void *descp), + TP_PROTO(struct sdma_engine *sde, + u64 desc0, + u64 desc1, + u16 e, + void *descp), TP_ARGS(sde, desc0, desc1, e, descp), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(void *, descp) - __field(u64, desc0) - __field(u64, desc1) - __field(u16, e) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->desc0 = desc0; - __entry->desc1 = desc1; - __entry->idx = sde->this_idx; - __entry->descp = descp; - __entry->e = e; - ), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(void *, descp) + __field(u64, desc0) + __field(u64, desc1) + __field(u16, e) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->desc0 = desc0; + __entry->desc1 = desc1; + __entry->idx = sde->this_idx; + __entry->descp = descp; + __entry->e = e; + ), TP_printk( - "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u", - __get_str(dev), - __entry->idx, - __parse_sdma_flags(__entry->desc0, __entry->desc1), - (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) - & SDMA_DESC0_PHY_ADDR_MASK, - (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) - & SDMA_DESC1_GENERATION_MASK), - (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) - & SDMA_DESC0_BYTE_COUNT_MASK), - __entry->desc0, - __entry->desc1, - __entry->descp, - __entry->e - ) + "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u", + __get_str(dev), + __entry->idx, + __parse_sdma_flags(__entry->desc0, __entry->desc1), + (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) & + SDMA_DESC0_PHY_ADDR_MASK, + (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) & + SDMA_DESC1_GENERATION_MASK), + (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) & + SDMA_DESC0_BYTE_COUNT_MASK), + __entry->desc0, + __entry->desc1, + __entry->descp, + __entry->e + ) ); TRACE_EVENT(hfi1_sdma_engine_select, - TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx), - TP_ARGS(dd, sel, vl, idx), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(u32, sel) - __field(u8, vl) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd); - __entry->sel = sel; - __entry->vl = vl; - __entry->idx = idx; - ), - TP_printk( - "[%s] selecting SDE %u sel 0x%x vl %u", - __get_str(dev), - __entry->idx, - __entry->sel, - __entry->vl - ) + TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx), + TP_ARGS(dd, sel, vl, idx), + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __field(u32, sel) + __field(u8, vl) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd); + __entry->sel = sel; + __entry->vl = vl; + __entry->idx = idx; + ), + TP_printk("[%s] selecting SDE %u sel 0x%x vl %u", + __get_str(dev), + __entry->idx, + __entry->sel, + __entry->vl + ) ); DECLARE_EVENT_CLASS(hfi1_sdma_engine_class, - TP_PROTO( - struct sdma_engine *sde, - u64 status - ), - TP_ARGS(sde, status), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(u64, status) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->status = status; - __entry->idx = sde->this_idx; - ), - TP_printk( - "[%s] SDE(%u) status %llx", - __get_str(dev), - __entry->idx, - (unsigned long long)__entry->status - ) + TP_PROTO(struct sdma_engine *sde, u64 status), + TP_ARGS(sde, status), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(u64, status) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->status = status; + __entry->idx = sde->this_idx; + ), + TP_printk("[%s] SDE(%u) status %llx", + __get_str(dev), + __entry->idx, + (unsigned long long)__entry->status + ) ); DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt, - TP_PROTO( - struct sdma_engine *sde, - u64 status - ), - TP_ARGS(sde, status) + TP_PROTO(struct sdma_engine *sde, u64 status), + TP_ARGS(sde, status) ); DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress, - TP_PROTO( - struct sdma_engine *sde, - u64 status - ), - TP_ARGS(sde, status) + TP_PROTO(struct sdma_engine *sde, u64 status), + TP_ARGS(sde, status) ); DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad, - TP_PROTO( - struct sdma_engine *sde, - int aidx - ), - TP_ARGS(sde, aidx), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(int, aidx) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->idx = sde->this_idx; - __entry->aidx = aidx; - ), - TP_printk( - "[%s] SDE(%u) aidx %d", - __get_str(dev), - __entry->idx, - __entry->aidx - ) + TP_PROTO(struct sdma_engine *sde, int aidx), + TP_ARGS(sde, aidx), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(int, aidx) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->idx = sde->this_idx; + __entry->aidx = aidx; + ), + TP_printk("[%s] SDE(%u) aidx %d", + __get_str(dev), + __entry->idx, + __entry->aidx + ) ); DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate, - TP_PROTO( - struct sdma_engine *sde, - int aidx - ), + TP_PROTO(struct sdma_engine *sde, int aidx), TP_ARGS(sde, aidx)); DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate, - TP_PROTO( - struct sdma_engine *sde, - int aidx - ), + TP_PROTO(struct sdma_engine *sde, int aidx), TP_ARGS(sde, aidx)); #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER TRACE_EVENT(hfi1_sdma_progress, - TP_PROTO( - struct sdma_engine *sde, - u16 hwhead, - u16 swhead, - struct sdma_txreq *txp - ), - TP_ARGS(sde, hwhead, swhead, txp), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(u64, sn) - __field(u16, hwhead) - __field(u16, swhead) - __field(u16, txnext) - __field(u16, tx_tail) - __field(u16, tx_head) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->hwhead = hwhead; - __entry->swhead = swhead; - __entry->tx_tail = sde->tx_tail; - __entry->tx_head = sde->tx_head; - __entry->txnext = txp ? txp->next_descq_idx : ~0; - __entry->idx = sde->this_idx; - __entry->sn = txp ? txp->sn : ~0; - ), - TP_printk( - "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u", - __get_str(dev), - __entry->idx, - __entry->sn, - __entry->hwhead, - __entry->swhead, - __entry->txnext, - __entry->tx_head, - __entry->tx_tail - ) + TP_PROTO(struct sdma_engine *sde, + u16 hwhead, + u16 swhead, + struct sdma_txreq *txp + ), + TP_ARGS(sde, hwhead, swhead, txp), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(u64, sn) + __field(u16, hwhead) + __field(u16, swhead) + __field(u16, txnext) + __field(u16, tx_tail) + __field(u16, tx_head) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->hwhead = hwhead; + __entry->swhead = swhead; + __entry->tx_tail = sde->tx_tail; + __entry->tx_head = sde->tx_head; + __entry->txnext = txp ? txp->next_descq_idx : ~0; + __entry->idx = sde->this_idx; + __entry->sn = txp ? txp->sn : ~0; + ), + TP_printk( + "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u", + __get_str(dev), + __entry->idx, + __entry->sn, + __entry->hwhead, + __entry->swhead, + __entry->txnext, + __entry->tx_head, + __entry->tx_tail + ) ); #else TRACE_EVENT(hfi1_sdma_progress, - TP_PROTO( - struct sdma_engine *sde, - u16 hwhead, - u16 swhead, - struct sdma_txreq *txp + TP_PROTO(struct sdma_engine *sde, + u16 hwhead, u16 swhead, + struct sdma_txreq *txp ), TP_ARGS(sde, hwhead, swhead, txp), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(u16, hwhead) - __field(u16, swhead) - __field(u16, txnext) - __field(u16, tx_tail) - __field(u16, tx_head) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->hwhead = hwhead; - __entry->swhead = swhead; - __entry->tx_tail = sde->tx_tail; - __entry->tx_head = sde->tx_head; - __entry->txnext = txp ? txp->next_descq_idx : ~0; - __entry->idx = sde->this_idx; - ), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(u16, hwhead) + __field(u16, swhead) + __field(u16, txnext) + __field(u16, tx_tail) + __field(u16, tx_head) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->hwhead = hwhead; + __entry->swhead = swhead; + __entry->tx_tail = sde->tx_tail; + __entry->tx_head = sde->tx_head; + __entry->txnext = txp ? txp->next_descq_idx : ~0; + __entry->idx = sde->this_idx; + ), TP_printk( - "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u", - __get_str(dev), - __entry->idx, - __entry->hwhead, - __entry->swhead, - __entry->txnext, - __entry->tx_head, - __entry->tx_tail - ) + "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u", + __get_str(dev), + __entry->idx, + __entry->hwhead, + __entry->swhead, + __entry->txnext, + __entry->tx_head, + __entry->tx_tail + ) ); #endif DECLARE_EVENT_CLASS(hfi1_sdma_sn, - TP_PROTO( - struct sdma_engine *sde, - u64 sn - ), - TP_ARGS(sde, sn), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __field(u64, sn) - __field(u8, idx) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __entry->sn = sn; - __entry->idx = sde->this_idx; - ), - TP_printk( - "[%s] SDE(%u) sn %llu", - __get_str(dev), - __entry->idx, - __entry->sn - ) + TP_PROTO(struct sdma_engine *sde, u64 sn), + TP_ARGS(sde, sn), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __field(u64, sn) + __field(u8, idx) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __entry->sn = sn; + __entry->idx = sde->this_idx; + ), + TP_printk("[%s] SDE(%u) sn %llu", + __get_str(dev), + __entry->idx, + __entry->sn + ) ); DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn, @@ -1026,10 +959,7 @@ DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn, ); DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn, - TP_PROTO( - struct sdma_engine *sde, - u64 sn - ), + TP_PROTO(struct sdma_engine *sde, u64 sn), TP_ARGS(sde, sn) ); @@ -1230,69 +1160,66 @@ TRACE_EVENT(hfi1_sdma_user_header_ahg, ); TRACE_EVENT(hfi1_sdma_state, - TP_PROTO( - struct sdma_engine *sde, - const char *cstate, - const char *nstate - ), - TP_ARGS(sde, cstate, nstate), - TP_STRUCT__entry( - DD_DEV_ENTRY(sde->dd) - __string(curstate, cstate) - __string(newstate, nstate) - ), - TP_fast_assign( - DD_DEV_ASSIGN(sde->dd); - __assign_str(curstate, cstate); - __assign_str(newstate, nstate); - ), + TP_PROTO(struct sdma_engine *sde, + const char *cstate, + const char *nstate + ), + TP_ARGS(sde, cstate, nstate), + TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd) + __string(curstate, cstate) + __string(newstate, nstate) + ), + TP_fast_assign(DD_DEV_ASSIGN(sde->dd); + __assign_str(curstate, cstate); + __assign_str(newstate, nstate); + ), TP_printk("[%s] current state %s new state %s", - __get_str(dev), - __get_str(curstate), - __get_str(newstate) - ) + __get_str(dev), + __get_str(curstate), + __get_str(newstate) + ) ); #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_rc DECLARE_EVENT_CLASS(hfi1_rc_template, - TP_PROTO(struct rvt_qp *qp, u32 psn), - TP_ARGS(qp, psn), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) - __field(u32, qpn) - __field(u32, s_flags) - __field(u32, psn) - __field(u32, s_psn) - __field(u32, s_next_psn) - __field(u32, s_sending_psn) - __field(u32, s_sending_hpsn) - __field(u32, r_psn) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) - __entry->qpn = qp->ibqp.qp_num; - __entry->s_flags = qp->s_flags; - __entry->psn = psn; - __entry->s_psn = qp->s_psn; - __entry->s_next_psn = qp->s_next_psn; - __entry->s_sending_psn = qp->s_sending_psn; - __entry->s_sending_hpsn = qp->s_sending_hpsn; - __entry->r_psn = qp->r_psn; - ), - TP_printk( - "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x", - __get_str(dev), - __entry->qpn, - __entry->s_flags, - __entry->psn, - __entry->s_psn, - __entry->s_next_psn, - __entry->s_sending_psn, - __entry->s_sending_hpsn, - __entry->r_psn - ) + TP_PROTO(struct rvt_qp *qp, u32 psn), + TP_ARGS(qp, psn), + TP_STRUCT__entry( + DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) + __field(u32, qpn) + __field(u32, s_flags) + __field(u32, psn) + __field(u32, s_psn) + __field(u32, s_next_psn) + __field(u32, s_sending_psn) + __field(u32, s_sending_hpsn) + __field(u32, r_psn) + ), + TP_fast_assign( + DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) + __entry->qpn = qp->ibqp.qp_num; + __entry->s_flags = qp->s_flags; + __entry->psn = psn; + __entry->s_psn = qp->s_psn; + __entry->s_next_psn = qp->s_next_psn; + __entry->s_sending_psn = qp->s_sending_psn; + __entry->s_sending_hpsn = qp->s_sending_hpsn; + __entry->r_psn = qp->r_psn; + ), + TP_printk( + "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x", + __get_str(dev), + __entry->qpn, + __entry->s_flags, + __entry->psn, + __entry->s_psn, + __entry->s_next_psn, + __entry->s_sending_psn, + __entry->s_sending_hpsn, + __entry->r_psn + ) ); DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete, @@ -1319,21 +1246,20 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error, #define TRACE_SYSTEM hfi1_misc TRACE_EVENT(hfi1_interrupt, - TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry, - int src), - TP_ARGS(dd, is_entry, src), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __array(char, buf, 64) - __field(int, src) - ), - TP_fast_assign( - DD_DEV_ASSIGN(dd) - is_entry->is_name(__entry->buf, 64, src - is_entry->start); - __entry->src = src; - ), - TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf, - __entry->src) + TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry, + int src), + TP_ARGS(dd, is_entry, src), + TP_STRUCT__entry(DD_DEV_ENTRY(dd) + __array(char, buf, 64) + __field(int, src) + ), + TP_fast_assign(DD_DEV_ASSIGN(dd) + is_entry->is_name(__entry->buf, 64, + src - is_entry->start); + __entry->src = src; + ), + TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf, + __entry->src) ); /* @@ -1348,21 +1274,21 @@ TRACE_EVENT(hfi1_interrupt, #define MAX_MSG_LEN 512 DECLARE_EVENT_CLASS(hfi1_trace_template, - TP_PROTO(const char *function, struct va_format *vaf), - TP_ARGS(function, vaf), - TP_STRUCT__entry( - __string(function, function) - __dynamic_array(char, msg, MAX_MSG_LEN) - ), - TP_fast_assign( - __assign_str(function, function); - WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), - MAX_MSG_LEN, vaf->fmt, - *vaf->va) >= MAX_MSG_LEN); - ), - TP_printk("(%s) %s", - __get_str(function), - __get_str(msg)) + TP_PROTO(const char *function, struct va_format *vaf), + TP_ARGS(function, vaf), + TP_STRUCT__entry(__string(function, function) + __dynamic_array(char, msg, MAX_MSG_LEN) + ), + TP_fast_assign(__assign_str(function, function); + WARN_ON_ONCE(vsnprintf + (__get_dynamic_array(msg), + MAX_MSG_LEN, vaf->fmt, + *vaf->va) >= + MAX_MSG_LEN); + ), + TP_printk("(%s) %s", + __get_str(function), + __get_str(msg)) ); /* diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c index d7dfdd231669..6c7f198299f6 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/staging/rdma/hfi1/twsi.c @@ -131,7 +131,7 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit) } if (rise_usec <= 0) dd_dev_err(dd, "SCL interface stuck low > %d uSec\n", - SCL_WAIT_USEC); + SCL_WAIT_USEC); } i2c_wait_for_writes(dd, target); } diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 89154014e8ae..e24cb6253fd7 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -318,7 +318,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) sl = ibp->sc_to_sl[sc5]; process_becn(ppd, sl, rlid, lqpn, rqpn, - IB_CC_SVCTYPE_UC); + IB_CC_SVCTYPE_UC); } if (bth1 & HFI1_FECN_SMASK) { diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index c3f069725be6..ea8f706cf4ec 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -320,9 +320,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) || ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) { lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); - if (unlikely(!loopback && (lid == ppd->lid || - (lid == be16_to_cpu(IB_LID_PERMISSIVE) && - qp->ibqp.qp_type == IB_QPT_GSI)))) { + if (unlikely(!loopback && + (lid == ppd->lid || + (lid == be16_to_cpu(IB_LID_PERMISSIVE) && + qp->ibqp.qp_type == IB_QPT_GSI)))) { unsigned long flags; /* * If DMAs are in progress, we can't generate diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index b6d09267492b..ce94cbcc689d 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -728,7 +728,7 @@ free_req: } static inline u32 compute_data_length(struct user_sdma_request *req, - struct user_sdma_txreq *tx) + struct user_sdma_txreq *tx) { /* * Determine the proper size of the packet data. diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index c412f1c6637c..275af1909d39 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -970,7 +970,8 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, /* The most likely matching pkey has index qp->s_pkey_index */ if (unlikely(!egress_pkey_matches_entry(pkey, - ppd->pkeys[qp->s_pkey_index]))) { + ppd->pkeys + [qp->s_pkey_index]))) { /* no match - try the entire table */ for (; i < MAX_PKEY_VALUES; i++) { if (egress_pkey_matches_entry(pkey, ppd->pkeys[i])) -- cgit v1.2.3-59-g8ed1b From e490974e675e8ddec795137c1db7f38e0308cbcd Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:22:00 -0800 Subject: staging/rdma/hfi1: Add braces on all arms of statement Add braces on all arms of statements to fix checkpatch check: CHECK: braces {} should be used on all arms of this statement Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 8 ++++---- drivers/staging/rdma/hfi1/driver.c | 16 ++++++++-------- drivers/staging/rdma/hfi1/file_ops.c | 9 ++++++--- drivers/staging/rdma/hfi1/init.c | 3 ++- drivers/staging/rdma/hfi1/mad.c | 23 ++++++++++++----------- drivers/staging/rdma/hfi1/pcie.c | 3 ++- drivers/staging/rdma/hfi1/qp.c | 13 ++++++++----- drivers/staging/rdma/hfi1/rc.c | 25 ++++++++++++++----------- drivers/staging/rdma/hfi1/sdma.c | 3 ++- drivers/staging/rdma/hfi1/twsi.c | 4 ++-- drivers/staging/rdma/hfi1/uc.c | 31 ++++++++++++++++--------------- drivers/staging/rdma/hfi1/ud.c | 23 ++++++++++++++--------- drivers/staging/rdma/hfi1/user_sdma.c | 6 ++++-- drivers/staging/rdma/hfi1/verbs.c | 11 ++++++----- 14 files changed, 100 insertions(+), 78 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index ce61883d146c..0a774656cb74 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13829,9 +13829,9 @@ int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey) int ret = 0; u64 reg; - if (ctxt < dd->num_rcv_contexts) + if (ctxt < dd->num_rcv_contexts) { rcd = dd->rcd[ctxt]; - else { + } else { ret = -EINVAL; goto done; } @@ -13857,9 +13857,9 @@ int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt) int ret = 0; u64 reg; - if (ctxt < dd->num_rcv_contexts) + if (ctxt < dd->num_rcv_contexts) { rcd = dd->rcd[ctxt]; - else { + } else { ret = -EINVAL; goto done; } diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 50a3b5adab0a..090b7010c34f 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -302,9 +302,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, goto drop; /* Check for GRH */ - if (lnh == HFI1_LRH_BTH) + if (lnh == HFI1_LRH_BTH) { ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) { + } else if (lnh == HFI1_LRH_GRH) { u32 vtf; ohdr = &hdr->u.l.oth; @@ -314,9 +314,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; rcv_flags |= HFI1_HAS_GRH; - } else + } else { goto drop; - + } /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { @@ -618,14 +618,14 @@ static void __prescan_rxq(struct hfi1_packet *packet) hfi1_get_msgheader(dd, rhf_addr); lnh = be16_to_cpu(hdr->lrh[0]) & 3; - if (lnh == HFI1_LRH_BTH) + if (lnh == HFI1_LRH_BTH) { ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) { + } else if (lnh == HFI1_LRH_GRH) { ohdr = &hdr->u.l.oth; grh = &hdr->u.l.grh; - } else + } else { goto next; /* just in case */ - + } bth1 = be32_to_cpu(ohdr->bth[1]); is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 1bd1545d083d..7846f31759e0 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -399,8 +399,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt->ctxt); - } else + } else { ret = sc_restart(sc); + } if (!ret) sc_return_credits(sc); break; @@ -1409,8 +1410,9 @@ static unsigned int poll_next(struct file *fp, set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags); hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt); pollflag = 0; - } else + } else { pollflag = POLLIN | POLLRDNORM; + } spin_unlock_irq(&dd->uctxt_lock); return pollflag; @@ -1488,8 +1490,9 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt, if (uctxt->rcvhdrtail_kvaddr) clear_rcvhdrtail(uctxt); rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB; - } else + } else { rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS; + } hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt); /* always; new head should be equal to new tail; see above */ bail: diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index a7210593e4fd..cb75fc7bb090 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1713,8 +1713,9 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) rcd->egrbufs.buffers[j].len)) { j++; offset = 0; - } else + } else { offset += new_size; + } } rcd->egrbufs.rcvtid_size = new_size; } diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 7619b752789e..305b7ae0daae 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -135,15 +135,16 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) struct ib_ah *ah; ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid); - if (IS_ERR(ah)) + if (IS_ERR(ah)) { ret = PTR_ERR(ah); - else { + } else { send_buf->ah = ah; ibp->rvp.sm_ah = ibah_to_rvtah(ah); ret = 0; } - } else + } else { ret = -EINVAL; + } } else { send_buf->ah = &ibp->rvp.sm_ah->ibah; ret = 0; @@ -769,9 +770,9 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data, p[i] = cpu_to_be16(q[i]); if (resp_len) *resp_len += size; - } else + } else { smp->status |= IB_SMP_INVALID_FIELD; - + } return reply((struct ib_mad_hdr *)smp); } @@ -977,15 +978,15 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp, break; /* FALLTHROUGH */ case IB_PORT_DOWN: - if (phys_state == IB_PORTPHYSSTATE_NOP) + if (phys_state == IB_PORTPHYSSTATE_NOP) { link_state = HLS_DN_DOWNDEF; - else if (phys_state == IB_PORTPHYSSTATE_POLLING) { + } else if (phys_state == IB_PORTPHYSSTATE_POLLING) { link_state = HLS_DN_POLL; set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE, 0, OPA_LINKDOWN_REASON_FM_BOUNCE); - } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) + } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) { link_state = HLS_DN_DISABLE; - else { + } else { pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n", phys_state); smp->status |= IB_SMP_INVALID_FIELD; @@ -1193,9 +1194,9 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, set_link_width_downgrade_enabled(ppd, lwe); call_link_downgrade_policy = 1; } - } else + } else { smp->status |= IB_SMP_INVALID_FIELD; - + } lse = be16_to_cpu(pi->link_speed.enabled); if (lse) { if (lse & be16_to_cpu(pi->link_speed.supported)) diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index cbd61cf6549a..2b0281c3546b 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -123,8 +123,9 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) goto bail; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - } else + } else { ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } if (ret) { hfi1_early_err(&pdev->dev, "Unable to set DMA consistent mask: %d\n", ret); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 90246737a3a1..4f40c987303b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -326,12 +326,15 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) x = (min + max) / 2; if (credit_table[x] == credits) break; - if (credit_table[x] > credits) + if (credit_table[x] > credits) { max = x; - else if (min == x) - break; - else - min = x; + } else { + if (min == x) { + break; + } else { + min = x; + } + } } aeth |= x << HFI1_AETH_CREDIT_SHIFT; } diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 28ff638cd371..70a6e63eefcd 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -505,9 +505,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) len = pmtu; break; } - if (wqe->wr.opcode == IB_WR_SEND) + if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_ONLY); - else { + } else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; @@ -542,9 +542,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) len = pmtu; break; } - if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_ONLY); - else { + } else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after RETH */ @@ -672,9 +672,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } - if (wqe->wr.opcode == IB_WR_SEND) + if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_LAST); - else { + } else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; @@ -712,9 +712,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } - if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_LAST); - else { + } else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; @@ -1013,10 +1013,12 @@ static void restart_rc(struct rvt_qp *qp, u32 psn, int wait) hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); return; - } else /* need to handle delayed completion */ + } else { /* need to handle delayed completion */ return; - } else + } + } else { qp->s_retry--; + } ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ) @@ -1612,8 +1614,9 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp, val = ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]); - } else + } else { val = 0; + } if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index 74086eabbb25..f2f76b371e8b 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -2009,8 +2009,9 @@ static int sdma_check_progress( ret = wait->sleep(sde, wait, tx, seq); if (ret == -EAGAIN) sde->desc_avail = sdma_descq_freecnt(sde); - } else + } else { ret = -EBUSY; + } return ret; } diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c index 6c7f198299f6..1cc07485723e 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/staging/rdma/hfi1/twsi.c @@ -119,9 +119,9 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit) * Allow for slow slaves by simple * delay for falling edge, sampling on rise. */ - if (!bit) + if (!bit) { udelay(2); - else { + } else { int rise_usec; for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index e24cb6253fd7..81b2dc70d570 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -139,9 +139,9 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) len = pmtu; break; } - if (wqe->wr.opcode == IB_WR_SEND) + if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_ONLY); - else { + } else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ @@ -168,9 +168,9 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) len = pmtu; break; } - if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_ONLY); - else { + } else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the RETH */ @@ -199,9 +199,9 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } - if (wqe->wr.opcode == IB_WR_SEND) + if (wqe->wr.opcode == IB_WR_SEND) { qp->s_state = OP(SEND_LAST); - else { + } else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; @@ -224,9 +224,9 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) middle = HFI1_CAP_IS_KSET(SDMA_AHG); break; } - if (wqe->wr.opcode == IB_WR_RDMA_WRITE) + if (wqe->wr.opcode == IB_WR_RDMA_WRITE) { qp->s_state = OP(RDMA_WRITE_LAST); - else { + } else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ @@ -353,8 +353,9 @@ inv: qp->r_state == OP(SEND_MIDDLE)) { set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; - } else + } else { rvt_put_ss(&qp->r_sge); + } qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): @@ -410,9 +411,9 @@ inv: case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): send_first: - if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { qp->r_sge = qp->s_rdma_read_sge; - else { + } else { ret = hfi1_rvt_get_rwqe(qp, 0); if (ret < 0) goto op_err; @@ -523,9 +524,9 @@ rdma_first: qp->r_sge.sge.length = 0; qp->r_sge.sge.sge_length = 0; } - if (opcode == OP(RDMA_WRITE_ONLY)) + if (opcode == OP(RDMA_WRITE_ONLY)) { goto rdma_last; - else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { + } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { wc.ex.imm_data = ohdr->u.rc.imm_data; goto rdma_last_imm; } @@ -555,9 +556,9 @@ rdma_last_imm: tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; - if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) { rvt_put_ss(&qp->s_rdma_read_sge); - else { + } else { ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto op_err; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index ea8f706cf4ec..65157a4f9f5c 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -163,9 +163,9 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & RVT_R_REUSE_SGE) + if (qp->r_flags & RVT_R_REUSE_SGE) { qp->r_flags &= ~RVT_R_REUSE_SGE; - else { + } else { int ret; ret = hfi1_rvt_get_rwqe(qp, 0); @@ -190,8 +190,9 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) hfi1_copy_sge(&qp->r_sge, &ah_attr->grh, sizeof(struct ib_grh), 1, 0); wc.wc_flags |= IB_WC_GRH; - } else + } else { hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + } ssge.sg_list = swqe->sg_list + 1; ssge.sge = *swqe->sg_list; ssge.num_sge = swqe->wr.num_sge; @@ -383,8 +384,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) qp->s_hdrwords++; ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; - } else + } else { bth0 = IB_OPCODE_UD_SEND_ONLY << 24; + } sc5 = ibp->sl_to_sc[ah_attr->sl]; lrh0 |= (ah_attr->sl & 0xf) << 4; if (qp->ibqp.qp_type == IB_QPT_SMI) { @@ -820,8 +822,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { wc.ex.imm_data = 0; wc.wc_flags = 0; - } else + } else { goto drop; + } /* * A GRH is expected to precede the data even if not @@ -832,9 +835,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & RVT_R_REUSE_SGE) + if (qp->r_flags & RVT_R_REUSE_SGE) { qp->r_flags &= ~RVT_R_REUSE_SGE; - else { + } else { int ret; ret = hfi1_rvt_get_rwqe(qp, 0); @@ -857,8 +860,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh, sizeof(struct ib_grh), 1, 0); wc.wc_flags |= IB_WC_GRH; - } else + } else { hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); + } hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1, 0); rvt_put_ss(&qp->r_sge); @@ -884,8 +888,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) } } wc.pkey_index = (unsigned)mgmt_pkey_idx; - } else + } else { wc.pkey_index = 0; + } wc.slid = be16_to_cpu(hdr->lrh[3]); sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf; diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index ce94cbcc689d..7287307b2c5e 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -765,8 +765,9 @@ static inline u32 compute_data_length(struct user_sdma_request *req, * remaining. */ len = min(len, req->data_len - req->sent); - } else + } else { len = min(req->data_len - req->sent, (u32)req->info.fragsize); + } SDMA_DBG(req, "Data Length = %u", len); return len; } @@ -1337,8 +1338,9 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, INTR) >> 16); val &= cpu_to_le16(~(1U << 13)); AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val); - } else + } else { AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val); + } } trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 275af1909d39..0e650b41f10c 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -400,9 +400,9 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) /* Check for GRH */ lnh = be16_to_cpu(hdr->lrh[0]) & 3; - if (lnh == HFI1_LRH_BTH) + if (lnh == HFI1_LRH_BTH) { packet->ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) { + } else if (lnh == HFI1_LRH_GRH) { u32 vtf; packet->ohdr = &hdr->u.l.oth; @@ -412,8 +412,9 @@ void hfi1_ib_rcv(struct hfi1_packet *packet) if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; packet->rcv_flags |= HFI1_HAS_GRH; - } else + } else { goto drop; + } trace_input_ibhdr(rcd->dd, hdr); @@ -528,9 +529,9 @@ static void verbs_sdma_complete( struct rvt_qp *qp = tx->qp; spin_lock(&qp->s_lock); - if (tx->wqe) + if (tx->wqe) { hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); - else if (qp->ibqp.qp_type == IB_QPT_RC) { + } else if (qp->ibqp.qp_type == IB_QPT_RC) { struct hfi1_ib_header *hdr; hdr = &tx->phdr.hdr; -- cgit v1.2.3-59-g8ed1b From edddfca00eecd0949a9adccf8dd490478f641cbc Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:22:09 -0800 Subject: staging/rdma/hfi1: Remove else after break Remove else after break to fix checkpatch warning: WARNING: else is not generally useful after a break or return Reviewed-by: Dennis Dalessandro Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 4f40c987303b..a17cb732277e 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -329,11 +329,9 @@ __be32 hfi1_compute_aeth(struct rvt_qp *qp) if (credit_table[x] > credits) { max = x; } else { - if (min == x) { + if (min == x) break; - } else { - min = x; - } + min = x; } } aeth |= x << HFI1_AETH_CREDIT_SHIFT; -- cgit v1.2.3-59-g8ed1b From 05d6ac1d8268915593480a34926f386970a9d720 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sun, 14 Feb 2016 20:22:17 -0800 Subject: staging/rdma/hfi1: Fix header Fix the header by moving the copyright notice out of the license text and to the top of the header. Also, update the copyright date. Reviewed-by: Dennis Dalessandro Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/affinity.c | 5 +---- drivers/staging/rdma/hfi1/affinity.h | 5 +---- drivers/staging/rdma/hfi1/aspm.h | 5 +---- drivers/staging/rdma/hfi1/chip.c | 5 +---- drivers/staging/rdma/hfi1/chip.h | 5 +---- drivers/staging/rdma/hfi1/chip_registers.h | 5 +---- drivers/staging/rdma/hfi1/common.h | 5 +---- drivers/staging/rdma/hfi1/debugfs.c | 5 +---- drivers/staging/rdma/hfi1/debugfs.h | 5 +---- drivers/staging/rdma/hfi1/device.c | 5 +---- drivers/staging/rdma/hfi1/device.h | 5 +---- drivers/staging/rdma/hfi1/diag.c | 5 +---- drivers/staging/rdma/hfi1/dma.c | 5 +---- drivers/staging/rdma/hfi1/driver.c | 5 +---- drivers/staging/rdma/hfi1/efivar.c | 5 +---- drivers/staging/rdma/hfi1/efivar.h | 5 +---- drivers/staging/rdma/hfi1/eprom.c | 5 +---- drivers/staging/rdma/hfi1/eprom.h | 5 +---- drivers/staging/rdma/hfi1/file_ops.c | 5 +---- drivers/staging/rdma/hfi1/firmware.c | 5 +---- drivers/staging/rdma/hfi1/hfi.h | 5 +---- drivers/staging/rdma/hfi1/init.c | 5 +---- drivers/staging/rdma/hfi1/intr.c | 5 +---- drivers/staging/rdma/hfi1/iowait.h | 5 +---- drivers/staging/rdma/hfi1/mad.c | 5 +---- drivers/staging/rdma/hfi1/mad.h | 5 +---- drivers/staging/rdma/hfi1/opa_compat.h | 5 +---- drivers/staging/rdma/hfi1/pcie.c | 5 +---- drivers/staging/rdma/hfi1/pio.c | 5 +---- drivers/staging/rdma/hfi1/pio.h | 5 +---- drivers/staging/rdma/hfi1/pio_copy.c | 5 +---- drivers/staging/rdma/hfi1/platform.c | 5 +---- drivers/staging/rdma/hfi1/platform.h | 5 +---- drivers/staging/rdma/hfi1/qp.c | 5 +---- drivers/staging/rdma/hfi1/qp.h | 5 +---- drivers/staging/rdma/hfi1/qsfp.c | 5 +---- drivers/staging/rdma/hfi1/qsfp.h | 5 +---- drivers/staging/rdma/hfi1/rc.c | 5 +---- drivers/staging/rdma/hfi1/ruc.c | 5 +---- drivers/staging/rdma/hfi1/sdma.c | 5 +---- drivers/staging/rdma/hfi1/sdma.h | 5 +---- drivers/staging/rdma/hfi1/sysfs.c | 5 +---- drivers/staging/rdma/hfi1/trace.c | 5 +---- drivers/staging/rdma/hfi1/trace.h | 5 +---- drivers/staging/rdma/hfi1/twsi.c | 5 +---- drivers/staging/rdma/hfi1/twsi.h | 5 +---- drivers/staging/rdma/hfi1/uc.c | 5 +---- drivers/staging/rdma/hfi1/ud.c | 5 +---- drivers/staging/rdma/hfi1/user_exp_rcv.c | 5 +---- drivers/staging/rdma/hfi1/user_exp_rcv.h | 5 +---- drivers/staging/rdma/hfi1/user_pages.c | 5 +---- drivers/staging/rdma/hfi1/user_sdma.c | 5 +---- drivers/staging/rdma/hfi1/user_sdma.h | 5 +---- drivers/staging/rdma/hfi1/verbs.c | 5 +---- drivers/staging/rdma/hfi1/verbs.h | 5 +---- 55 files changed, 55 insertions(+), 220 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/staging/rdma/hfi1/affinity.c index 59b29725ea11..2cb8ca77f876 100644 --- a/drivers/staging/rdma/hfi1/affinity.c +++ b/drivers/staging/rdma/hfi1/affinity.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/staging/rdma/hfi1/affinity.h index 2bdac9680e8e..b287e4963024 100644 --- a/drivers/staging/rdma/hfi1/affinity.h +++ b/drivers/staging/rdma/hfi1/affinity.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/staging/rdma/hfi1/aspm.h index 67fce1d0c7f0..3aac80296dcc 100644 --- a/drivers/staging/rdma/hfi1/aspm.h +++ b/drivers/staging/rdma/hfi1/aspm.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 0a774656cb74..93bf465dccbe 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 0b7055b14d17..84681390a44c 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -1,14 +1,13 @@ #ifndef _CHIP_H #define _CHIP_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h index 23898ebbad24..770f05c9b8de 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/staging/rdma/hfi1/chip_registers.h @@ -2,14 +2,13 @@ #define DEF_CHIP_REG /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -21,8 +20,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h index dcf8edf910b5..e9b6bb322025 100644 --- a/drivers/staging/rdma/hfi1/common.h +++ b/drivers/staging/rdma/hfi1/common.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 0b0fd8a70ccf..4fd58e374bb7 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -1,13 +1,12 @@ #ifdef CONFIG_DEBUG_FS /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -19,8 +18,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/staging/rdma/hfi1/debugfs.h index 92d6fe146714..b6fb6814f1b8 100644 --- a/drivers/staging/rdma/hfi1/debugfs.h +++ b/drivers/staging/rdma/hfi1/debugfs.h @@ -1,14 +1,13 @@ #ifndef _HFI1_DEBUGFS_H #define _HFI1_DEBUGFS_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c index 58472e5ac4e5..c05c39da83b1 100644 --- a/drivers/staging/rdma/hfi1/device.c +++ b/drivers/staging/rdma/hfi1/device.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h index 2850ff739d81..5bb3e83cf2da 100644 --- a/drivers/staging/rdma/hfi1/device.h +++ b/drivers/staging/rdma/hfi1/device.h @@ -1,14 +1,13 @@ #ifndef _HFI1_DEVICE_H #define _HFI1_DEVICE_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index b8faee0b676c..6546e91f85b7 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/staging/rdma/hfi1/dma.c index afe572dfeb10..7e8dab892848 100644 --- a/drivers/staging/rdma/hfi1/dma.c +++ b/drivers/staging/rdma/hfi1/dma.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 090b7010c34f..76ed5f7784a0 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c index 7dc5bae220e0..5fe39241b635 100644 --- a/drivers/staging/rdma/hfi1/efivar.c +++ b/drivers/staging/rdma/hfi1/efivar.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/staging/rdma/hfi1/efivar.h index 070706225c51..94e9e70de568 100644 --- a/drivers/staging/rdma/hfi1/efivar.h +++ b/drivers/staging/rdma/hfi1/efivar.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index f36d06bc2817..87114af3a38e 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/staging/rdma/hfi1/eprom.h index 5a61ba3ba810..d41f0b1afb15 100644 --- a/drivers/staging/rdma/hfi1/eprom.h +++ b/drivers/staging/rdma/hfi1/eprom.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 7846f31759e0..e4490aecf262 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 52a3e8c95a07..d5befd1afdbb 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 07df5153703c..92154822de5a 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1,14 +1,13 @@ #ifndef _HFI1_KERNEL_H #define _HFI1_KERNEL_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index cb75fc7bb090..2def53895d50 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c index 46eeeca59bbc..65348d16ab2f 100644 --- a/drivers/staging/rdma/hfi1/intr.c +++ b/drivers/staging/rdma/hfi1/intr.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h index 2cb3f0422752..2ec6ef38d389 100644 --- a/drivers/staging/rdma/hfi1/iowait.h +++ b/drivers/staging/rdma/hfi1/iowait.h @@ -1,14 +1,13 @@ #ifndef _HFI1_IOWAIT_H #define _HFI1_IOWAIT_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 305b7ae0daae..78931fccbac0 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h index 9ebaaf939d34..55ee08675333 100644 --- a/drivers/staging/rdma/hfi1/mad.h +++ b/drivers/staging/rdma/hfi1/mad.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/staging/rdma/hfi1/opa_compat.h index 30f77077e30b..6ef3c1cbdcd7 100644 --- a/drivers/staging/rdma/hfi1/opa_compat.h +++ b/drivers/staging/rdma/hfi1/opa_compat.h @@ -1,14 +1,13 @@ #ifndef _LINUX_H #define _LINUX_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 2b0281c3546b..7855962a8ba5 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index a483c0aad4cc..859cb4601547 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h index d80909a60df9..0026976ce4f6 100644 --- a/drivers/staging/rdma/hfi1/pio.h +++ b/drivers/staging/rdma/hfi1/pio.h @@ -1,14 +1,13 @@ #ifndef _PIO_H #define _PIO_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c index 998e7bc89036..228e9fb76e08 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/staging/rdma/hfi1/pio_copy.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index 2f07becb0b96..4777414352d0 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/staging/rdma/hfi1/platform.h index 1f41bdc61235..19620cf546d5 100644 --- a/drivers/staging/rdma/hfi1/platform.h +++ b/drivers/staging/rdma/hfi1/platform.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index a17cb732277e..9e831a162f19 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h index 7b1c57e37c6b..e7bc8d6cf681 100644 --- a/drivers/staging/rdma/hfi1/qp.h +++ b/drivers/staging/rdma/hfi1/qp.h @@ -1,14 +1,13 @@ #ifndef _QP_H #define _QP_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index c5e04b069ad6..e38a0eb131ea 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index c391750bf9d2..2ad59807573f 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 70a6e63eefcd..8caad18ba57e 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index e2c4f8288c3e..5d84981a6b51 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index f2f76b371e8b..d894f439614a 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h index 5aec18b58189..8f50c99fe711 100644 --- a/drivers/staging/rdma/hfi1/sdma.h +++ b/drivers/staging/rdma/hfi1/sdma.h @@ -1,14 +1,13 @@ #ifndef _HFI1_SDMA_H #define _HFI1_SDMA_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c index 3e3f1803a251..c7f1271190af 100644 --- a/drivers/staging/rdma/hfi1/sysfs.c +++ b/drivers/staging/rdma/hfi1/sysfs.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c index 99fd01751492..6821d7c377e5 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/staging/rdma/hfi1/trace.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index dfa996715736..b8b44168e62d 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c index 1cc07485723e..e82e52a63d35 100644 --- a/drivers/staging/rdma/hfi1/twsi.c +++ b/drivers/staging/rdma/hfi1/twsi.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h index 0722ac83e7dd..5b8a5b5e7eae 100644 --- a/drivers/staging/rdma/hfi1/twsi.h +++ b/drivers/staging/rdma/hfi1/twsi.h @@ -1,14 +1,13 @@ #ifndef _TWSI_H #define _TWSI_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 81b2dc70d570..5ba29affa43f 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index 65157a4f9f5c..ae8a70f703eb 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 36b61b5b6997..fccae508a5d0 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/staging/rdma/hfi1/user_exp_rcv.h index 28ef98a45a1e..9bc8d9fba87e 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.h +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.h @@ -1,14 +1,13 @@ #ifndef _HFI1_USER_EXP_RCV_H #define _HFI1_USER_EXP_RCV_H /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -20,8 +19,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index 1854c0c7ce7e..3bf81086c24d 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 7287307b2c5e..14fe0790a35b 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h index 7ef31a6b6dbe..e0d0fe02d557 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/staging/rdma/hfi1/user_sdma.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015, 2016 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015, 2016 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 0e650b41f10c..220bdb0b70bc 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index dc623c6e902d..a85e6bc580b6 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -1,12 +1,11 @@ /* + * Copyright(c) 2015, 2016 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. - * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -18,8 +17,6 @@ * * BSD LICENSE * - * Copyright(c) 2015 Intel Corporation. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: -- cgit v1.2.3-59-g8ed1b From 6b5c5213e57453c228f7695d5d889aa4c84272c3 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Thu, 18 Feb 2016 11:11:59 -0800 Subject: staging/rdma/hfi1: fix 0-day syntax error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Setting CONFIG_HFI1_DEBUG_SDMA_ORDER causes a syntax error: sdma.c: In function ‘complete_tx’: sdma.c:370: error: ‘txp’ undeclared (first use in this function) sdma.c:370: error: (Each undeclared identifier is reported only once sdma.c:370: error: for each function it appears in.) Adjust code under ifdef to reference the tx properly. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/sdma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index d894f439614a..e29b5d394a5f 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -367,10 +367,10 @@ static inline void complete_tx(struct sdma_engine *sde, callback_t complete = tx->complete; #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER - trace_hfi1_sdma_out_sn(sde, txp->sn); - if (WARN_ON_ONCE(sde->head_sn != txp->sn)) + trace_hfi1_sdma_out_sn(sde, tx->sn); + if (WARN_ON_ONCE(sde->head_sn != tx->sn)) dd_dev_err(sde->dd, "expected %llu got %llu\n", - sde->head_sn, txp->sn); + sde->head_sn, tx->sn); sde->head_sn++; #endif sdma_txclean(sde->dd, tx); -- cgit v1.2.3-59-g8ed1b From 4c9e7aacb6a6334168a81b83819fb5cb088d2fb3 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Thu, 18 Feb 2016 11:12:08 -0800 Subject: staging/rdma/hfi1: Fix xmit discard error weight Count only the errors that apply to xmit discards. Update the comment to better explain the limitations of the count. Reviewed-by: Dennis Dalessandro Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 93bf465dccbe..6e44d52e3330 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -5566,12 +5566,28 @@ static void handle_send_egress_err_info(struct hfi1_devdata *dd, int weight, i; /* - * Count all, in case multiple bits are set. Reminder: - * since there is only one info register for many sources, - * these may be attributed to the wrong VL if they occur - * too close together. + * Count all applicable bits as individual errors and + * attribute them to the packet that triggered this handler. + * This may not be completely accurate due to limitations + * on the available hardware error information. There is + * a single information register and any number of error + * packets may have occurred and contributed to it before + * this routine is called. This means that: + * a) If multiple packets with the same error occur before + * this routine is called, earlier packets are missed. + * There is only a single bit for each error type. + * b) Errors may not be attributed to the correct VL. + * The driver is attributing all bits in the info register + * to the packet that triggered this call, but bits + * could be an accumulation of different packets with + * different VLs. + * c) A single error packet may have multiple counts attached + * to it. There is no way for the driver to know if + * multiple bits set in the info register are due to a + * single packet or multiple packets. The driver assumes + * multiple packets. */ - weight = hweight64(info); + weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); for (i = 0; i < weight; i++) { __count_port_discards(ppd); if (vl >= 0 && vl < TXE_NUM_DATA_VL) -- cgit v1.2.3-59-g8ed1b From e8aa284ba0cd2a1b6bfb3181a5b3b7f0bdefbe1a Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Thu, 18 Feb 2016 11:12:16 -0800 Subject: staging/rdma/hfi1: Cleanup comments and logs in PHY code This is a set of minor fixes including comment and log message cleanups and improvements to the PHY layer code. Reviewed-by: Dean Luick Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 11 +++++++---- drivers/staging/rdma/hfi1/qsfp.c | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 6e44d52e3330..483b37ae698e 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -5946,10 +5946,10 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); if (reg & QSFP_HFI0_MODPRST_N) { - dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n", - __func__); - if (!qsfp_mod_present(ppd)) { + dd_dev_info(dd, "%s: QSFP module removed\n", + __func__); + ppd->driver_link_ready = 0; /* * Cable removed, reset all our information about the @@ -5989,6 +5989,9 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) queue_work(ppd->hfi1_wq, &ppd->link_down_work); } } else { + dd_dev_info(dd, "%s: QSFP module inserted\n", + __func__); + spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 0; ppd->qsfp_info.cache_refresh_required = 1; @@ -6009,7 +6012,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) } if (reg & QSFP_HFI0_INT_N) { - dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n", + dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", __func__); spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.check_interrupt_flags = 1; diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index e38a0eb131ea..07330b030ee0 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -187,7 +187,7 @@ done: /* * Write page n, offset m of QSFP memory as defined by SFF 8636 - * in the cache by writing @addr = ((256 * n) + m) + * by writing @addr = ((256 * n) + m) */ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) @@ -253,7 +253,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, /* * Access page n, offset m of QSFP memory as defined by SFF 8636 - * in the cache by reading @addr = ((256 * n) + m) + * by reading @addr = ((256 * n) + m) */ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) -- cgit v1.2.3-59-g8ed1b From ed6f653fe430ed4912aebec10a1b9d57813fe44c Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Thu, 18 Feb 2016 11:12:25 -0800 Subject: staging/rdma/hfi1: Fix debugfs access race Debugfs access races with the driver being ready. Make sure the driver is ready before debugfs files appear and debufs files are gone before the driver starts tearing down. Reviewed-by: Mike Marciniszyn Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 2def53895d50..371ed293677d 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -983,7 +983,6 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) list_del(&dd->list); spin_unlock_irqrestore(&hfi1_devs_lock, flags); free_platform_config(dd); - hfi1_dbg_ibdev_exit(&dd->verbs_dev); rcu_barrier(); /* wait for rcu callbacks to complete */ free_percpu(dd->int_counter); free_percpu(dd->rcv_limit); @@ -1088,7 +1087,6 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) &pdev->dev, "Could not alloc cpulist info, cpu affinity might be wrong\n"); } - hfi1_dbg_ibdev_init(&dd->verbs_dev); return dd; bail: @@ -1445,8 +1443,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * we still create devices, so diags, etc. can be used * to determine cause of problem. */ - if (!initfail && !ret) + if (!initfail && !ret) { dd->flags |= HFI1_INITTED; + /* create debufs files after init and ib register */ + hfi1_dbg_ibdev_init(&dd->verbs_dev); + } j = hfi1_device_create(dd); if (j) @@ -1487,6 +1488,8 @@ static void remove_one(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); + /* close debugfs files before ib unregister */ + hfi1_dbg_ibdev_exit(&dd->verbs_dev); /* unregister from IB core */ hfi1_unregister_ib_device(dd); -- cgit v1.2.3-59-g8ed1b From 354d9c952d9db01d561abd55fdfa09ccc67039f9 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Thu, 18 Feb 2016 11:12:34 -0800 Subject: staging/rdma/hfi1: Disclose more information when i2c fails Improve logging messages when there are i2c failures. Clean i2c read error handling. Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qsfp.c | 42 +++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 07330b030ee0..7e76b93f8f94 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -102,7 +102,8 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, ret = hfi1_twsi_reset(ppd->dd, target); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, - "I2C write interface reset failed\n"); + "I2C chain %d write interface reset failed\n", + target); goto done; } @@ -121,15 +122,14 @@ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, { struct hfi1_devdata *dd = ppd->dd; int ret, cnt, pass = 0; - int stuck = 0; - u8 *buff = bp; + int orig_offset = offset; cnt = 0; while (cnt < len) { int rlen = len - cnt; ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset, - buff + cnt, rlen); + bp + cnt, rlen); /* Some QSFP's fail first try. Retry as experiment */ if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY) continue; @@ -145,14 +145,11 @@ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, ret = cnt; exit: - if (stuck) - dd_dev_err(dd, "I2C interface bus stuck non-idle\n"); - - if (pass >= I2C_MAX_RETRY && ret) + if (ret < 0) { hfi1_dev_porterr(dd, ppd->port, - "I2C failed even retrying\n"); - else if (pass) - hfi1_dev_porterr(dd, ppd->port, "I2C retries: %d\n", pass); + "I2C chain %d read failed, addr 0x%x, offset 0x%x, len %d\n", + target, i2c_addr, orig_offset, len); + } /* Must wait min 20us between qsfp i2c transactions */ udelay(20); @@ -174,7 +171,8 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, ret = hfi1_twsi_reset(ppd->dd, target); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, - "I2C read interface reset failed\n"); + "I2C chain %d read interface reset failed\n", + target); goto done; } @@ -206,7 +204,8 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, ret = hfi1_twsi_reset(ppd->dd, target); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, - "QSFP write interface reset failed\n"); + "QSFP chain %d write interface reset failed\n", + target); mutex_unlock(&ppd->dd->qsfp_i2c_mutex); return ret; } @@ -221,10 +220,9 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); if (ret != 1) { - hfi1_dev_porterr( - ppd->dd, - ppd->port, - "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret); + hfi1_dev_porterr(ppd->dd, ppd->port, + "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n", + target, ret); ret = -EIO; break; } @@ -272,7 +270,8 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, ret = hfi1_twsi_reset(ppd->dd, target); if (ret) { hfi1_dev_porterr(ppd->dd, ppd->port, - "QSFP read interface reset failed\n"); + "QSFP chain %d read interface reset failed\n", + target); mutex_unlock(&ppd->dd->qsfp_i2c_mutex); return ret; } @@ -286,10 +285,9 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE, QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1); if (ret != 1) { - hfi1_dev_porterr( - ppd->dd, - ppd->port, - "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret); + hfi1_dev_porterr(ppd->dd, ppd->port, + "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n", + target, ret); ret = -EIO; break; } -- cgit v1.2.3-59-g8ed1b From 7b47622d784311bff8218d03754fbf20529c1a71 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Thu, 18 Feb 2016 11:12:51 -0800 Subject: staging/rdma/hfi1: Guard i2c access against cp An attempt to cp or cat /sys/kernel/debug/hfi1/hfi1_0/i2c1 produces this message: hfi1 0000:81:00.0: hfi1_0: IB0:1 I2C failed even retrying Fix the issue by explicitly rejecting a simple cat/cp with an -EINVAL error return. Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/debugfs.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 4fd58e374bb7..07c16b343ba0 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -446,6 +446,16 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, rcu_read_lock(); ppd = private2ppd(file); + /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ + i2c_addr = (*ppos >> 16) & 0xffff; + offset = *ppos & 0xffff; + + /* explicitly reject invalid address 0 to catch cp and cat */ + if (i2c_addr == 0) { + ret = -EINVAL; + goto _return; + } + buff = kmalloc(count, GFP_KERNEL); if (!buff) { ret = -ENOMEM; @@ -458,10 +468,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, goto _free; } - /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ - i2c_addr = (*ppos >> 16) & 0xffff; - offset = *ppos & 0xffff; - total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count); if (total_written < 0) { ret = total_written; @@ -507,16 +513,22 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, rcu_read_lock(); ppd = private2ppd(file); + /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ + i2c_addr = (*ppos >> 16) & 0xffff; + offset = *ppos & 0xffff; + + /* explicitly reject invalid address 0 to catch cp and cat */ + if (i2c_addr == 0) { + ret = -EINVAL; + goto _return; + } + buff = kmalloc(count, GFP_KERNEL); if (!buff) { ret = -ENOMEM; goto _return; } - /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */ - i2c_addr = (*ppos >> 16) & 0xffff; - offset = *ppos & 0xffff; - total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); if (total_read < 0) { ret = total_read; -- cgit v1.2.3-59-g8ed1b From 582e05c3deeaf56ed04df62ad9f1fa6e88199bd9 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Thu, 18 Feb 2016 11:13:01 -0800 Subject: staging/rdma/hfi1: Fix counter read for cp A cp or cat of /sys/kernel/debug/hfi1/hfi1_0/port1counters produces the following message: hfi1 0000:81:00.0: hfi1_0: index not supported hfi1 0000:81:00.0: hfi1_0: read_cntrs does not support indexing Fix by removing the file position logic and the associated messages and make the file positioning the responsibility of the caller. The port counter read function argument is changed to the per port data structure since the counters are relative to the port and not the device. Reviewed-by: Sebastian Sanchez Signed-off-by: Dean Luick Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 30 +++++------------------------- drivers/staging/rdma/hfi1/chip.h | 6 ++---- drivers/staging/rdma/hfi1/debugfs.c | 11 ++++------- 3 files changed, 11 insertions(+), 36 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 483b37ae698e..fe73ebf077e4 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -11407,28 +11407,19 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) dd->rcvhdrtail_dummy_physaddr); } -u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep, - u64 **cntrp) +u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) { int ret; u64 val = 0; if (namep) { ret = dd->cntrnameslen; - if (pos != 0) { - dd_dev_err(dd, "read_cntrs does not support indexing"); - return 0; - } *namep = dd->cntrnames; } else { const struct cntr_entry *entry; int i, j; ret = (dd->ndevcntrs) * sizeof(u64); - if (pos != 0) { - dd_dev_err(dd, "read_cntrs does not support indexing"); - return 0; - } /* Get the start of the block of counters */ *cntrp = dd->cntrs; @@ -11487,30 +11478,19 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep, /* * Used by sysfs to create files for hfi stats to read */ -u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port, - char **namep, u64 **cntrp) +u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) { int ret; u64 val = 0; if (namep) { - ret = dd->portcntrnameslen; - if (pos != 0) { - dd_dev_err(dd, "index not supported"); - return 0; - } - *namep = dd->portcntrnames; + ret = ppd->dd->portcntrnameslen; + *namep = ppd->dd->portcntrnames; } else { const struct cntr_entry *entry; - struct hfi1_pportdata *ppd; int i, j; - ret = (dd->nportcntrs) * sizeof(u64); - if (pos != 0) { - dd_dev_err(dd, "indexing not supported"); - return 0; - } - ppd = (struct hfi1_pportdata *)(dd + 1 + port); + ret = ppd->dd->nportcntrs * sizeof(u64); *cntrp = ppd->cntrs; for (i = 0; i < PORT_CNTR_LAST; i++) { diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 84681390a44c..e9a41ed39642 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -1297,10 +1297,8 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, u32 type, unsigned long pa, u16 order); void hfi1_quiet_serdes(struct hfi1_pportdata *ppd); void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt); -u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep, - u64 **cntrp); -u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port, - char **namep, u64 **cntrp); +u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp); +u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp); u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd); int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which); int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val); diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 07c16b343ba0..99845bc19437 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -336,7 +336,7 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf, rcu_read_lock(); dd = private2dd(file); - avail = hfi1_read_cntrs(dd, *ppos, NULL, &counters); + avail = hfi1_read_cntrs(dd, NULL, &counters); rval = simple_read_from_buffer(buf, count, ppos, counters, avail); rcu_read_unlock(); return rval; @@ -353,7 +353,7 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, rcu_read_lock(); dd = private2dd(file); - avail = hfi1_read_cntrs(dd, *ppos, &names, NULL); + avail = hfi1_read_cntrs(dd, &names, NULL); rval = simple_read_from_buffer(buf, count, ppos, names, avail); rcu_read_unlock(); return rval; @@ -380,8 +380,7 @@ static ssize_t portnames_read(struct file *file, char __user *buf, rcu_read_lock(); dd = private2dd(file); - /* port number n/a here since names are constant */ - avail = hfi1_read_portcntrs(dd, *ppos, 0, &names, NULL); + avail = hfi1_read_portcntrs(dd->pport, &names, NULL); rval = simple_read_from_buffer(buf, count, ppos, names, avail); rcu_read_unlock(); return rval; @@ -393,14 +392,12 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, { u64 *counters; size_t avail; - struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; ssize_t rval; rcu_read_lock(); ppd = private2ppd(file); - dd = ppd->dd; - avail = hfi1_read_portcntrs(dd, *ppos, ppd->port - 1, NULL, &counters); + avail = hfi1_read_portcntrs(ppd, NULL, &counters); rval = simple_read_from_buffer(buf, count, ppos, counters, avail); rcu_read_unlock(); return rval; -- cgit v1.2.3-59-g8ed1b From aa0ad411e51763f95afdae11d6ee684915faecef Mon Sep 17 00:00:00 2001 From: Jianxin Xiong Date: Fri, 26 Feb 2016 13:33:13 -0800 Subject: staging/rdma/hfi1: Fix header size calculation for RC/UC QPs with GRH enabled There is a header size counter in both the QP struture and the txreq structure. The counter in the txreq structure is not updated properly for RC and UC queue pairs with GRH enabled, and thus causing SDMA send to fail. This patch fixes the RC and UC path. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Reviewed-by: Mike Marciniszyn Signed-off-by: Jianxin Xiong Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 8 ++++---- drivers/staging/rdma/hfi1/uc.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 8caad18ba57e..1ce0e08378b4 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -358,11 +358,11 @@ normal: } qp->s_rdma_ack_cnt++; qp->s_hdrwords = hwords; - /* pbc */ - ps->s_txreq->hdr_dwords = hwords + 2; ps->s_txreq->sde = priv->s_sde; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps); + /* pbc */ + ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; return 1; bail: @@ -763,8 +763,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } qp->s_len -= len; qp->s_hdrwords = hwords; - /* pbc */ - ps->s_txreq->hdr_dwords = hwords + 2; ps->s_txreq->sde = priv->s_sde; qp->s_cur_sge = ss; qp->s_cur_size = len; @@ -775,6 +773,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) bth2, middle, ps); + /* pbc */ + ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; return 1; done_free_tx: diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c index 5ba29affa43f..df773d433297 100644 --- a/drivers/staging/rdma/hfi1/uc.c +++ b/drivers/staging/rdma/hfi1/uc.c @@ -239,13 +239,13 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } qp->s_len -= len; qp->s_hdrwords = hwords; - /* pbc */ - ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; ps->s_txreq->sde = priv->s_sde; qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), mask_psn(qp->s_psn++), middle, ps); + /* pbc */ + ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2; return 1; done_free_tx: -- cgit v1.2.3-59-g8ed1b From bf400235f392eabf60c865c95da823727cb00def Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Fri, 26 Feb 2016 13:33:18 -0800 Subject: staging/rdma/hfi1: Avoid using upstream component if it is not accessible When the hfi1 device is assigned to a VM (eg KVM), the hfi1 driver has no access to the upstream component and therefore cannot use it to perform some operations, such as secondary bus reset. As a result, the hfi1 driver cannot perform the pcie Gen3 transition. Instead, those operation should be done in the host environment, preferrably done during the Option ROM initialization. Similarly, the hfi1 driver cannot support ASPM and tune the pcie capability under this circumstance. Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Kaike Wan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/aspm.h | 19 +++++++++++++++++-- drivers/staging/rdma/hfi1/pcie.c | 22 +++++++++++++++++++--- 2 files changed, 36 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/staging/rdma/hfi1/aspm.h index 3aac80296dcc..0d58fe3b49b5 100644 --- a/drivers/staging/rdma/hfi1/aspm.h +++ b/drivers/staging/rdma/hfi1/aspm.h @@ -72,6 +72,13 @@ static inline bool aspm_hw_l1_supported(struct hfi1_devdata *dd) struct pci_dev *parent = dd->pcidev->bus->self; u32 up, dn; + /* + * If the driver does not have access to the upstream component, + * it cannot support ASPM L1 at all. + */ + if (!parent) + return false; + pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn); dn = ASPM_L1_SUPPORTED(dn); @@ -98,6 +105,13 @@ static inline void aspm_hw_enable_l1(struct hfi1_devdata *dd) { struct pci_dev *parent = dd->pcidev->bus->self; + /* + * If the driver does not have access to the upstream component, + * it cannot support ASPM L1 at all. + */ + if (!parent) + return; + /* Enable ASPM L1 first in upstream component and then downstream */ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, @@ -114,8 +128,9 @@ static inline void aspm_hw_disable_l1(struct hfi1_devdata *dd) /* Disable ASPM L1 first in downstream component and then upstream */ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, 0x0); - pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPMC, 0x0); + if (parent) + pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0x0); } static inline void aspm_enable(struct hfi1_devdata *dd) diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 7855962a8ba5..1adfa8bfaf2b 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -362,6 +362,7 @@ static void update_lbus_info(struct hfi1_devdata *dd) int pcie_speeds(struct hfi1_devdata *dd) { u32 linkcap; + struct pci_dev *parent = dd->pcidev->bus->self; if (!pci_is_pcie(dd->pcidev)) { dd_dev_err(dd, "Can't find PCI Express capability!\n"); @@ -382,7 +383,7 @@ int pcie_speeds(struct hfi1_devdata *dd) /* * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed */ - if (dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { + if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) { dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n"); dd->link_gen3_capable = 0; } @@ -471,6 +472,12 @@ static void tune_pcie_caps(struct hfi1_devdata *dd) } /* Find out supported and configured values for parent (root) */ parent = dd->pcidev->bus->self; + /* + * The driver cannot perform the tuning if it does not have + * access to the upstream component. + */ + if (!parent) + return; if (!pci_is_root_bus(parent->bus)) { dd_dev_info(dd, "Parent not root\n"); return; @@ -939,7 +946,7 @@ static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname) */ int do_pcie_gen3_transition(struct hfi1_devdata *dd) { - struct pci_dev *parent; + struct pci_dev *parent = dd->pcidev->bus->self; u64 fw_ctrl; u64 reg, therm; u32 reg32, fs, lf; @@ -981,6 +988,16 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) return 0; } + /* + * The driver cannot do the transition if it has no access to the + * upstream component + */ + if (!parent) { + dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n", + __func__); + return 0; + } + /* * Do the Gen3 transition. Steps are those of the PCIe Gen3 * recipe. @@ -1157,7 +1174,6 @@ retry: * that it is Gen3 capable earlier. */ dd_dev_info(dd, "%s: setting parent target link speed\n", __func__); - parent = dd->pcidev->bus->self; pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2); dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__, (u32)lnkctl2); -- cgit v1.2.3-59-g8ed1b From 24487dd39cb24c23560c2dc726c6d3375f42a697 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Fri, 26 Feb 2016 13:33:23 -0800 Subject: staging/rdma/hfi1: Check interrupt registers mapping This patch tests the interrupt registers when the driver has no access to its upstream component. In this case, it is highly likely that it is running in a virtual machine (eg, Qemu-kvm guest). If the interrupt registers are not mapped properly by the virtual machine monitor, an error message will be printed and the probing will be terminated. This will help the user identify the issue. On the other hand, if the driver is running in a host or has access to its upstream component in some other VM, it will do nothing. Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Kaike Wan Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 56 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index fe73ebf077e4..7799652773d5 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13943,6 +13943,50 @@ static int obtain_boardname(struct hfi1_devdata *dd) return 0; } +/* + * Check the interrupt registers to make sure that they are mapped correctly. + * It is intended to help user identify any mismapping by VMM when the driver + * is running in a VM. This function should only be called before interrupt + * is set up properly. + * + * Return 0 on success, -EINVAL on failure. + */ +static int check_int_registers(struct hfi1_devdata *dd) +{ + u64 reg; + u64 all_bits = ~(u64)0; + u64 mask; + + /* Clear CceIntMask[0] to avoid raising any interrupts */ + mask = read_csr(dd, CCE_INT_MASK); + write_csr(dd, CCE_INT_MASK, 0ull); + reg = read_csr(dd, CCE_INT_MASK); + if (reg) + goto err_exit; + + /* Clear all interrupt status bits */ + write_csr(dd, CCE_INT_CLEAR, all_bits); + reg = read_csr(dd, CCE_INT_STATUS); + if (reg) + goto err_exit; + + /* Set all interrupt status bits */ + write_csr(dd, CCE_INT_FORCE, all_bits); + reg = read_csr(dd, CCE_INT_STATUS); + if (reg != all_bits) + goto err_exit; + + /* Restore the interrupt mask */ + write_csr(dd, CCE_INT_CLEAR, all_bits); + write_csr(dd, CCE_INT_MASK, mask); + + return 0; +err_exit: + write_csr(dd, CCE_INT_MASK, mask); + dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); + return -EINVAL; +} + /** * Allocate and initialize the device structure for the hfi. * @dev: the pci_dev for hfi1_ib device @@ -13967,6 +14011,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, "RTL FPGA emulation", "Functional simulator" }; + struct pci_dev *parent = pdev->bus->self; dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS * sizeof(struct hfi1_pportdata)); @@ -14045,6 +14090,17 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) & CCE_REVISION_CHIP_REV_MINOR_MASK; + /* + * Check interrupt registers mapping if the driver has no access to + * the upstream component. In this case, it is likely that the driver + * is running in a VM. + */ + if (!parent) { + ret = check_int_registers(dd); + if (ret) + goto bail_cleanup; + } + /* * obtain the hardware ID - NOT related to unit, which is a * software enumeration -- cgit v1.2.3-59-g8ed1b From 409b146225cdefcc76d9956e323e84e510208884 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Fri, 26 Feb 2016 13:33:28 -0800 Subject: staging/rdma/hfi1: Fix reporting of LED status in Get(LedInfo) and Get(PortInfo) The LedInfo SMA attribute is redefined to control the LED beaconing state machine instead of the LED directly. In accordance, we now return the state of LED beaconing, represented by whether the beaconing timer is active, instead of the state of the LED itself for SMA queries Get(LedInfo) and Get(PortInfo). While we are at it, we fix the beaconing timer control code so that the state of the timer is accurately updated. Reviewed-by: Ira Weiny Reviewed-by: Mike Marciniszyn Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 13 +++++++++++-- drivers/staging/rdma/hfi1/mad.c | 24 +++++++++++++++++++----- 2 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 76ed5f7784a0..45818646eb99 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -1169,6 +1169,12 @@ void shutdown_led_override(struct hfi1_pportdata *ppd) { struct hfi1_devdata *dd = ppd->dd; + /* + * This pairs with the memory barrier implied by the atomic_dec in + * hfi1_set_led_override to ensure that we read the correct state of + * LED beaconing represented by led_override_timer_active + */ + smp_mb(); if (atomic_read(&ppd->led_override_timer_active)) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); @@ -1199,11 +1205,14 @@ static void run_led_override(unsigned long opaque) * don't re-fire the timer if user asked for it to be off; we let * it fire one more time after they turn it off to simplify */ - if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) + if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) { mod_timer(&ppd->led_override_timer, jiffies + timeout); - else + } else { /* Hand control of the LED to the DC for normal operation */ write_csr(dd, DCC_CFG_LED_CNTRL, 0); + /* Record that we did not re-fire the timer */ + atomic_dec(&ppd->led_override_timer_active); + } } /* diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 78931fccbac0..5925798db4d1 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -516,6 +516,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, struct opa_port_info *pi = (struct opa_port_info *)data; u8 mtu; u8 credit_rate; + u8 is_beaconing_active; u32 state; u32 num_ports = OPA_AM_NPORT(am); u32 start_of_sm_config = OPA_AM_START_SM_CFG(am); @@ -581,6 +582,14 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4; pi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; + /* + * This pairs with the memory barrier implied by the atomic_dec in + * hfi1_set_led_override to ensure that we read the correct state of + * LED beaconing represented by led_override_timer_active + */ + smp_mb(); + is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); + pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6; pi->port_states.ledenable_offlinereason |= ppd->offline_disabled_reason; #else @@ -3578,19 +3587,24 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, u32 *resp_len) { struct hfi1_devdata *dd = dd_from_ibdev(ibdev); + struct hfi1_pportdata *ppd = dd->pport; struct opa_led_info *p = (struct opa_led_info *)data; u32 nport = OPA_AM_NPORT(am); - u64 reg; + u32 is_beaconing_active; if (nport != 1) { smp->status |= IB_SMP_INVALID_FIELD; return reply((struct ib_mad_hdr *)smp); } - reg = read_csr(dd, DCC_CFG_LED_CNTRL); - if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) && - ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf)) - p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK); + /* + * This pairs with the memory barrier implied by the atomic_dec in + * hfi1_set_led_override to ensure that we read the correct state of + * LED beaconing represented by led_override_timer_active + */ + smp_mb(); + is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); + p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT); if (resp_len) *resp_len += sizeof(struct opa_led_info); -- cgit v1.2.3-59-g8ed1b From 79d0c088801b221330ee3b75cd10912003e3c6dd Mon Sep 17 00:00:00 2001 From: Jubin John Date: Fri, 26 Feb 2016 13:33:33 -0800 Subject: staging/rdma/hfi1: Fix memory leaks Fix 3 memory leaks reported by the LeakCheck tool in the KEDR framework. The following resources were allocated memory during their respective initializations but not freed during cleanup: 1. SDMA map elements 2. PIO map elements 3. HW send context to SW index map This patch fixes the memory leaks by freeing the allocated memory in the cleanup path. Reviewed-by: Dean Luick Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 2 ++ drivers/staging/rdma/hfi1/pio.c | 2 +- drivers/staging/rdma/hfi1/sdma.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 371ed293677d..37b3ce8377b6 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1324,6 +1324,8 @@ static void cleanup_device_data(struct hfi1_devdata *dd) dd->num_send_contexts = 0; kfree(dd->send_contexts); dd->send_contexts = NULL; + kfree(dd->hw_to_sw); + dd->hw_to_sw = NULL; kfree(dd->boardname); vfree(dd->events); vfree(dd->status); diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 859cb4601547..361b43d47c76 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1881,7 +1881,7 @@ void free_pio_map(struct hfi1_devdata *dd) /* Free PIO map if allocated */ if (rcu_access_pointer(dd->pio_map)) { spin_lock_irq(&dd->pio_map_lock); - kfree(rcu_access_pointer(dd->pio_map)); + pio_map_free(rcu_access_pointer(dd->pio_map)); RCU_INIT_POINTER(dd->pio_map, NULL); spin_unlock_irq(&dd->pio_map_lock); synchronize_rcu(); diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c index e29b5d394a5f..abb8ebc1fcac 100644 --- a/drivers/staging/rdma/hfi1/sdma.c +++ b/drivers/staging/rdma/hfi1/sdma.c @@ -966,7 +966,7 @@ static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) sde->tx_ring = NULL; } spin_lock_irq(&dd->sde_map_lock); - kfree(rcu_access_pointer(dd->sdma_map)); + sdma_map_free(rcu_access_pointer(dd->sdma_map)); RCU_INIT_POINTER(dd->sdma_map, NULL); spin_unlock_irq(&dd->sde_map_lock); synchronize_rcu(); -- cgit v1.2.3-59-g8ed1b From 42d6ec19c918cb5bc6d14769e24240dce8f81687 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Sat, 5 Mar 2016 08:49:24 -0800 Subject: IB/hfi1: Add the break statement that was removed in an earlier patch The break statement was unintentionally removed in this patch commit 41ca419abc0ca7ee65d765408cdc1a7fed2897a3 ("staging/rdma/hfi1: Remove hfi1 MR and hfi1 specific qp type") Reviewed-by: Mike Marciniszyn Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/ruc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index 5d84981a6b51..aa53859503ee 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -470,6 +470,7 @@ again: goto inv_err; do_write: if (wqe->length == 0) + break; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, wqe->rdma_wr.rkey, -- cgit v1.2.3-59-g8ed1b From bf640096e670a35e3a7ba1336216664f89a2bcf1 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Sat, 5 Mar 2016 08:49:29 -0800 Subject: IB/hfi1: Move constant to the right in bitwise operations Implement changes recommended by the Coccinelle tool to move constant to the right in bitwise operations -bash-4.2$ make coccicheck MODE=report M=drivers/infiniband/hw/hfi1/ drivers/infiniband/hw/hfi1/pio.c:765:4-16: Move constant to right. drivers/infiniband/hw/hfi1/rc.c:2503:19-29: Move constant to right. drivers/infiniband/hw/hfi1/chip.c:9813:11-22: Move constant to right. drivers/infiniband/hw/hfi1/chip.c:14468:29-40: Move constant to right. Reviewed-by: Jubin John Reviewed-by: Dennis Dalessandro Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 6 +++--- drivers/staging/rdma/hfi1/pio.c | 4 ++-- drivers/staging/rdma/hfi1/rc.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 7799652773d5..263c88274514 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -9810,7 +9810,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) if (do_transition) { ret = set_physical_link_state(dd, - PLS_OFFLINE | (rem_reason << 8)); + (rem_reason << 8) | PLS_OFFLINE); if (ret != HCMD_SUCCESS) { dd_dev_err(dd, @@ -14465,8 +14465,8 @@ static void handle_temp_err(struct hfi1_devdata *dd) */ ppd->driver_link_ready = 0; ppd->link_enabled = 0; - set_physical_link_state(dd, PLS_OFFLINE | - (OPA_LINKDOWN_REASON_SMA_DISABLED << 8)); + set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | + PLS_OFFLINE); /* * Step 2: Shutdown LCB and 8051 * After shutdown, do not restore DC_CFG_RESET value. diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index 361b43d47c76..e888e214356b 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -762,8 +762,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, /* set the default partition key */ write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), - (DEFAULT_PKEY & - SC(CHECK_PARTITION_KEY_VALUE_MASK)) << + (SC(CHECK_PARTITION_KEY_VALUE_MASK) & + DEFAULT_PKEY) << SC(CHECK_PARTITION_KEY_VALUE_SHIFT)); /* per context type checks */ diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 1ce0e08378b4..351f136c7caa 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -2500,7 +2500,7 @@ send_last: return; rnr_nak: - qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; + qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK; qp->r_ack_psn = qp->r_psn; /* Queue RNR NAK for later */ rc_defered_ack(rcd, qp); -- cgit v1.2.3-59-g8ed1b From 2b8b34a948d063c4e803105ec0a3e8d27bd97c19 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Sat, 5 Mar 2016 08:49:34 -0800 Subject: IB/hfi1: Replace kmalloc and memcpy with a kmemdup This change was recommended by Coccinelle tool when I ran the command: -bash-4.2$ make coccicheck MODE=patch M=drivers/infiniband/hw/hfi1/ Reviewed-by: Jubin John Reviewed-by: Mike Marciniszyn Reviewed-by: Dennis Dalessandro Signed-off-by: Harish Chegondi Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/efivar.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c index 5fe39241b635..3f014f96f9e0 100644 --- a/drivers/staging/rdma/hfi1/efivar.c +++ b/drivers/staging/rdma/hfi1/efivar.c @@ -125,13 +125,12 @@ static int read_efi_var(const char *name, unsigned long *size, * temporary buffer. Now allocate a correctly sized * buffer. */ - data = kmalloc(temp_size, GFP_KERNEL); + data = kmemdup(temp_buffer, temp_size, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto fail; } - memcpy(data, temp_buffer, temp_size); *size = temp_size; *return_data = data; -- cgit v1.2.3-59-g8ed1b From 3afb6f637e8edd68cee35b5c432c176f57fd712f Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:49:39 -0800 Subject: IB/hfi1: Remove ASIC block clear The ASIC block is shared between two HFIs. Individual devices should not initialize registers there. Retain the power-on values. Individual users set registers as needed with one exception. Clear sbus fast mode on "slow" calls. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 88 ------------------------------------ drivers/staging/rdma/hfi1/firmware.c | 3 ++ 2 files changed, 3 insertions(+), 88 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 263c88274514..0874287dcc04 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -12934,91 +12934,6 @@ static void reset_cce_csrs(struct hfi1_devdata *dd) write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); } -/* set ASIC CSRs to chip reset defaults */ -static void reset_asic_csrs(struct hfi1_devdata *dd) -{ - int i; - - /* - * If the HFIs are shared between separate nodes or VMs, - * then more will need to be done here. One idea is a module - * parameter that returns early, letting the first power-on or - * a known first load do the reset and blocking all others. - */ - - if (!(dd->flags & HFI1_DO_INIT_ASIC)) - return; - - if (dd->icode != ICODE_FPGA_EMULATION) { - /* emulation does not have an SBus - leave these alone */ - /* - * All writes to ASIC_CFG_SBUS_REQUEST do something. - * Notes: - * o The reset is not zero if aimed at the core. See the - * SBus documentation for details. - * o If the SBus firmware has been updated (e.g. by the BIOS), - * will the reset revert that? - */ - /* ASIC_CFG_SBUS_REQUEST leave alone */ - write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0); - } - /* ASIC_SBUS_RESULT read-only */ - write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0); - for (i = 0; i < ASIC_NUM_SCRATCH; i++) - write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0); - write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */ - - /* We might want to retain this state across FLR if we ever use it */ - write_csr(dd, ASIC_CFG_DRV_STR, 0); - - /* ASIC_CFG_THERM_POLL_EN leave alone */ - /* ASIC_STS_THERM read-only */ - /* ASIC_CFG_RESET leave alone */ - - write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0); - /* ASIC_PCIE_SD_HOST_STATUS read-only */ - write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0); - write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0); - /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */ - write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */ - /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */ - /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */ - for (i = 0; i < 16; i++) - write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0); - - /* ASIC_GPIO_IN read-only */ - write_csr(dd, ASIC_GPIO_OE, 0); - write_csr(dd, ASIC_GPIO_INVERT, 0); - write_csr(dd, ASIC_GPIO_OUT, 0); - write_csr(dd, ASIC_GPIO_MASK, 0); - /* ASIC_GPIO_STATUS read-only */ - write_csr(dd, ASIC_GPIO_CLEAR, ~0ull); - /* ASIC_GPIO_FORCE leave alone */ - - /* ASIC_QSFP1_IN read-only */ - write_csr(dd, ASIC_QSFP1_OE, 0); - write_csr(dd, ASIC_QSFP1_INVERT, 0); - write_csr(dd, ASIC_QSFP1_OUT, 0); - write_csr(dd, ASIC_QSFP1_MASK, 0); - /* ASIC_QSFP1_STATUS read-only */ - write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull); - /* ASIC_QSFP1_FORCE leave alone */ - - /* ASIC_QSFP2_IN read-only */ - write_csr(dd, ASIC_QSFP2_OE, 0); - write_csr(dd, ASIC_QSFP2_INVERT, 0); - write_csr(dd, ASIC_QSFP2_OUT, 0); - write_csr(dd, ASIC_QSFP2_MASK, 0); - /* ASIC_QSFP2_STATUS read-only */ - write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull); - /* ASIC_QSFP2_FORCE leave alone */ - - write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR); - /* this also writes a NOP command, clearing paging mode */ - write_csr(dd, ASIC_EEP_ADDR_CMD, 0); - write_csr(dd, ASIC_EEP_DATA, 0); -} - /* set MISC CSRs to chip reset defaults */ static void reset_misc_csrs(struct hfi1_devdata *dd) { @@ -13428,14 +13343,11 @@ static void init_chip(struct hfi1_devdata *dd) hfi1_pcie_flr(dd); restore_pci_variables(dd); } - - reset_asic_csrs(dd); } else { dd_dev_info(dd, "Resetting CSRs with writes\n"); reset_cce_csrs(dd); reset_txe_csrs(dd); reset_rxe_csrs(dd); - reset_asic_csrs(dd); reset_misc_csrs(dd); } /* clear the DC reset */ diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index d5befd1afdbb..ca4e48988b70 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1170,6 +1170,9 @@ int sbus_request_slow(struct hfi1_devdata *dd, { u64 reg, count = 0; + /* make sure fast mode is clear */ + clear_sbus_fast_mode(dd); + sbus_request(dd, receiver_addr, data_addr, command, data_in); write_csr(dd, ASIC_CFG_SBUS_EXECUTE, ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK); -- cgit v1.2.3-59-g8ed1b From 78eb129d47f553e6f0607c393ebf4e9851edd73e Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:49:45 -0800 Subject: IB/hfi1: Add shared ASIC structure Create a shared structure to exist between devices that share the same ASIC. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 28 +++++++++++++++++++++++++--- drivers/staging/rdma/hfi1/hfi.h | 9 +++++++++ drivers/staging/rdma/hfi1/init.c | 20 ++++++++++++++++++++ 3 files changed, 54 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 0874287dcc04..686cadf449b9 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13800,15 +13800,20 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd) ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) /* + * Information can be shared between the two HFIs on the same ASIC + * in the same OS. This function finds the peer device and sets + * up a shared structure. + * * Certain chip functions need to be initialized only once per asic * instead of per-device. This function finds the peer device and * checks whether that chip initialization needs to be done by this * device. */ -static void asic_should_init(struct hfi1_devdata *dd) +static int init_asic_data(struct hfi1_devdata *dd) { unsigned long flags; struct hfi1_devdata *tmp, *peer = NULL; + int ret = 0; spin_lock_irqsave(&hfi1_devs_lock, flags); /* Find our peer device */ @@ -13826,7 +13831,22 @@ static void asic_should_init(struct hfi1_devdata *dd) */ if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC)) dd->flags |= HFI1_DO_INIT_ASIC; + + if (peer) { + dd->asic_data = peer->asic_data; + } else { + dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); + if (!dd->asic_data) { + ret = -ENOMEM; + goto done; + } + mutex_init(&dd->asic_data->asic_resource_mutex); + } + dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ + +done: spin_unlock_irqrestore(&hfi1_devs_lock, flags); + return ret; } /* @@ -14076,8 +14096,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, /* needs to be done before we look for the peer device */ read_guid(dd); - /* should this device init the ASIC block? */ - asic_should_init(dd); + /* set up shared ASIC data with peer device */ + ret = init_asic_data(dd); + if (ret) + goto bail_cleanup; /* obtain chip sizes, reset chip CSRs */ init_chip(dd); diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 92154822de5a..e71a1c2fbfac 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -805,6 +805,12 @@ struct hfi1_temp { u8 triggers; /* temperature triggers */ }; +/* common data between shared ASIC HFIs */ +struct hfi1_asic_data { + struct hfi1_devdata *dds[2]; /* back pointers */ + struct mutex asic_resource_mutex; +}; + /* device data struct now contains only "general per-device" info. * fields related to a physical IB port are in a hfi1_pportdata struct. */ @@ -880,6 +886,9 @@ struct hfi1_devdata { wait_queue_head_t sdma_unfreeze_wq; atomic_t sdma_unfreeze_count; + /* common data between shared ASIC HFIs in this OS */ + struct hfi1_asic_data *asic_data; + /* hfi1_pportdata, points to array of (physical) port-specific * data structs, indexed by pidx (0..n-1) */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 37b3ce8377b6..260a8e19beb7 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -974,6 +974,25 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) kfree(rcd); } +/* + * Release our hold on the shared asic data. If we are the last one, + * free the structure. Must be holding hfi1_devs_lock. + */ +static void release_asic_data(struct hfi1_devdata *dd) +{ + int other; + + if (!dd->asic_data) + return; + dd->asic_data->dds[dd->hfi1_id] = NULL; + other = dd->hfi1_id ? 0 : 1; + if (!dd->asic_data->dds[other]) { + /* we are the last holder, free it */ + kfree(dd->asic_data); + } + dd->asic_data = NULL; +} + void hfi1_free_devdata(struct hfi1_devdata *dd) { unsigned long flags; @@ -981,6 +1000,7 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) spin_lock_irqsave(&hfi1_devs_lock, flags); idr_remove(&hfi1_unit_table, dd->unit); list_del(&dd->list); + release_asic_data(dd); spin_unlock_irqrestore(&hfi1_devs_lock, flags); free_platform_config(dd); rcu_barrier(); /* wait for rcu callbacks to complete */ -- cgit v1.2.3-59-g8ed1b From a2ee27a4552505db5967630abcc3a90340e0d824 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:49:50 -0800 Subject: IB/hfi1: Add ASIC resource reservation functions The ASIC block is a shared hardware resource between two devices on the chip. Add functions to acquire and release these resources in a way that is safe for both multiple users on the same OS and multiple users on different OSes, while holding the hardware mutex as little as possible. Reservations are noted in a scratch register in the shared region. There are two types of reservations: per-HFI dynamic and permanent. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 2 + drivers/staging/rdma/hfi1/chip.h | 30 ++++++ drivers/staging/rdma/hfi1/firmware.c | 187 +++++++++++++++++++++++++++++++++++ 3 files changed, 219 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 686cadf449b9..98ebee4aac22 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13368,6 +13368,7 @@ static void init_chip(struct hfi1_devdata *dd) */ write_csr(dd, ASIC_QSFP1_OUT, 0x1f); write_csr(dd, ASIC_QSFP2_OUT, 0x1f); + init_chip_resources(dd); } static void init_early_variables(struct hfi1_devdata *dd) @@ -13794,6 +13795,7 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd) free_cntrs(dd); free_rcverr(dd); clean_up_interrupts(dd); + finish_chip_resources(dd); } #define HFI_BASE_GUID(dev) \ diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index e9a41ed39642..dc684bc11d27 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -639,6 +639,36 @@ int load_firmware(struct hfi1_devdata *dd); void dispose_firmware(void); int acquire_hw_mutex(struct hfi1_devdata *dd); void release_hw_mutex(struct hfi1_devdata *dd); + +/* + * Bitmask of dynamic access for ASIC block chip resources. Each HFI has its + * own range of bits for the resource so it can clear its own bits on + * starting and exiting. If either HFI has the resource bit set, the + * resource is in use. The separate bit ranges are: + * HFI0 bits 7:0 + * HFI1 bits 15:8 + */ +#define CR_SBUS 0x01 /* SBUS, THERM, and PCIE registers */ +#define CR_EPROM 0x02 /* EEP, GPIO registers */ +#define CR_I2C1 0x04 /* QSFP1_OE register */ +#define CR_I2C2 0x08 /* QSFP2_OE register */ +#define CR_DYN_SHIFT 8 /* dynamic flag shift */ +#define CR_DYN_MASK ((1ull << CR_DYN_SHIFT) - 1) + +/* + * Bitmask of static ASIC states these are outside of the dynamic ASIC + * block chip resources above. These are to be set once and never cleared. + * Must be holding the SBus dynamic flag when setting. + */ +#define CR_THERM_INIT 0x010000 + +int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait); +void release_chip_resource(struct hfi1_devdata *dd, u32 resource); +bool check_chip_resource(struct hfi1_devdata *dd, u32 resource, + const char *func); +void init_chip_resources(struct hfi1_devdata *dd); +void finish_chip_resources(struct hfi1_devdata *dd); + void fabric_serdes_reset(struct hfi1_devdata *dd); int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result); diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index ca4e48988b70..140dd8646cd0 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1385,6 +1385,193 @@ void release_hw_mutex(struct hfi1_devdata *dd) write_csr(dd, ASIC_CFG_MUTEX, 0); } +/* return the given resource bit(s) as a mask for the given HFI */ +static inline u64 resource_mask(u32 hfi1_id, u32 resource) +{ + return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0); +} + +static void fail_mutex_acquire_message(struct hfi1_devdata *dd, + const char *func) +{ + dd_dev_err(dd, + "%s: hardware mutex stuck - suggest rebooting the machine\n", + func); +} + +/* + * Acquire access to a chip resource. + * + * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed. + */ +static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource) +{ + u64 scratch0, all_bits, my_bit; + int ret; + + if (resource & CR_DYN_MASK) { + /* a dynamic resource is in use if either HFI has set the bit */ + all_bits = resource_mask(0, resource) | + resource_mask(1, resource); + my_bit = resource_mask(dd->hfi1_id, resource); + } else { + /* non-dynamic resources are not split between HFIs */ + all_bits = resource; + my_bit = resource; + } + + /* lock against other callers within the driver wanting a resource */ + mutex_lock(&dd->asic_data->asic_resource_mutex); + + ret = acquire_hw_mutex(dd); + if (ret) { + fail_mutex_acquire_message(dd, __func__); + ret = -EIO; + goto done; + } + + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + if (scratch0 & all_bits) { + ret = -EBUSY; + } else { + write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit); + /* force write to be visible to other HFI on another OS */ + (void)read_csr(dd, ASIC_CFG_SCRATCH); + } + + release_hw_mutex(dd); + +done: + mutex_unlock(&dd->asic_data->asic_resource_mutex); + return ret; +} + +/* + * Acquire access to a chip resource, wait up to mswait milliseconds for + * the resource to become available. + * + * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex + * acquire failed. + */ +int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait) +{ + unsigned long timeout; + int ret; + + timeout = jiffies + msecs_to_jiffies(mswait); + while (1) { + ret = __acquire_chip_resource(dd, resource); + if (ret != -EBUSY) + return ret; + /* resource is busy, check our timeout */ + if (time_after_eq(jiffies, timeout)) + return -EBUSY; + usleep_range(80, 120); /* arbitrary delay */ + } +} + +/* + * Release access to a chip resource + */ +void release_chip_resource(struct hfi1_devdata *dd, u32 resource) +{ + u64 scratch0, bit; + + /* only dynamic resources should ever be cleared */ + if (!(resource & CR_DYN_MASK)) { + dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__, + resource); + return; + } + bit = resource_mask(dd->hfi1_id, resource); + + /* lock against other callers within the driver wanting a resource */ + mutex_lock(&dd->asic_data->asic_resource_mutex); + + if (acquire_hw_mutex(dd)) { + fail_mutex_acquire_message(dd, __func__); + goto done; + } + + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + if ((scratch0 & bit) != 0) { + scratch0 &= ~bit; + write_csr(dd, ASIC_CFG_SCRATCH, scratch0); + /* force write to be visible to other HFI on another OS */ + (void)read_csr(dd, ASIC_CFG_SCRATCH); + } else { + dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n", + __func__, dd->hfi1_id, resource); + } + + release_hw_mutex(dd); + +done: + mutex_unlock(&dd->asic_data->asic_resource_mutex); +} + +/* + * Return true if resource is set, false otherwise. Print a warning + * if not set and a function is supplied. + */ +bool check_chip_resource(struct hfi1_devdata *dd, u32 resource, + const char *func) +{ + u64 scratch0, bit; + + if (resource & CR_DYN_MASK) + bit = resource_mask(dd->hfi1_id, resource); + else + bit = resource; + + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + if ((scratch0 & bit) == 0) { + if (func) + dd_dev_warn(dd, + "%s: id %d, resource 0x%x, not acquired!\n", + func, dd->hfi1_id, resource); + return false; + } + return true; +} + +static void clear_chip_resources(struct hfi1_devdata *dd, const char *func) +{ + u64 scratch0; + + /* lock against other callers within the driver wanting a resource */ + mutex_lock(&dd->asic_data->asic_resource_mutex); + + if (acquire_hw_mutex(dd)) { + fail_mutex_acquire_message(dd, func); + goto done; + } + + /* clear all dynamic access bits for this HFI */ + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK); + write_csr(dd, ASIC_CFG_SCRATCH, scratch0); + /* force write to be visible to other HFI on another OS */ + (void)read_csr(dd, ASIC_CFG_SCRATCH); + + release_hw_mutex(dd); + +done: + mutex_unlock(&dd->asic_data->asic_resource_mutex); +} + +void init_chip_resources(struct hfi1_devdata *dd) +{ + /* clear any holds left by us */ + clear_chip_resources(dd, __func__); +} + +void finish_chip_resources(struct hfi1_devdata *dd) +{ + /* clear any holds left by us */ + clear_chip_resources(dd, __func__); +} + void set_sbus_fast_mode(struct hfi1_devdata *dd) { write_csr(dd, ASIC_CFG_SBUS_EXECUTE, -- cgit v1.2.3-59-g8ed1b From 60c708285c0d3877a78dd8ede24238bc68c09651 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:49:55 -0800 Subject: IB/hfi1: Change EPROM handling to use resource reservation Change EPROM handling to use the new ASIC resource reservation system. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/eprom.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c index 87114af3a38e..bd8771570f81 100644 --- a/drivers/staging/rdma/hfi1/eprom.c +++ b/drivers/staging/rdma/hfi1/eprom.c @@ -102,9 +102,11 @@ #define EPROM_WP_N BIT_ULL(14) /* EPROM write line */ /* - * Use the EP mutex to guard against other callers from within the driver. + * How long to wait for the EPROM to become available, in ms. + * The spec 32 Mb EPROM takes around 40s to erase then write. + * Double it for safety. */ -static DEFINE_MUTEX(eprom_mutex); +#define EPROM_TIMEOUT 80000 /* ms */ /* * Turn on external enable line that allows writing on the flash. @@ -371,14 +373,9 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) if (!dd->eprom_available) return -EOPNOTSUPP; - /* lock against other callers touching the ASIC block */ - mutex_lock(&eprom_mutex); - - /* lock against the other HFI on another OS */ - ret = acquire_hw_mutex(dd); + ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); if (ret) { - dd_dev_err(dd, - "%s: unable to acquire hw mutex, no EPROM support\n", + dd_dev_err(dd, "%s: unable to acquire EPROM resource\n", __func__); goto done_asic; } @@ -428,9 +425,8 @@ int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd) break; } - release_hw_mutex(dd); + release_chip_resource(dd, CR_EPROM); done_asic: - mutex_unlock(&eprom_mutex); return ret; } @@ -441,23 +437,18 @@ int eprom_init(struct hfi1_devdata *dd) { int ret = 0; - /* only the discrete chip has an EPROM, nothing to do */ + /* only the discrete chip has an EPROM */ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0) return 0; - /* lock against other callers */ - mutex_lock(&eprom_mutex); - /* - * Lock against the other HFI on another OS - the mutex above - * would have caught anything in this driver. It is OK if - * both OSes reset the EPROM - as long as they don't do it at - * the same time. + * It is OK if both HFIs reset the EPROM as long as they don't + * do it at the same time. */ - ret = acquire_hw_mutex(dd); + ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT); if (ret) { dd_dev_err(dd, - "%s: unable to acquire hw mutex, no EPROM support\n", + "%s: unable to acquire EPROM resource, no EPROM support\n", __func__); goto done_asic; } @@ -474,8 +465,7 @@ int eprom_init(struct hfi1_devdata *dd) write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID); dd->eprom_available = true; - release_hw_mutex(dd); + release_chip_resource(dd, CR_EPROM); done_asic: - mutex_unlock(&eprom_mutex); return ret; } -- cgit v1.2.3-59-g8ed1b From 576531fde8473333322905ea09fd5cfd14ce91ef Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:01 -0800 Subject: IB/hfi1: Change SBus handling to use resource reservation The SBus resource includes SBUS, PCIE, and THERM registers. Change SBus handling to use the new ASIC resource reservation system. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 9 +++++-- drivers/staging/rdma/hfi1/chip.h | 3 +++ drivers/staging/rdma/hfi1/firmware.c | 46 +++++++++++++++++++----------------- drivers/staging/rdma/hfi1/pcie.c | 13 ++++++---- 4 files changed, 42 insertions(+), 29 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 98ebee4aac22..269c9775f7c6 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -14324,7 +14324,12 @@ static int thermal_init(struct hfi1_devdata *dd) !(dd->flags & HFI1_DO_INIT_ASIC)) return ret; - acquire_hw_mutex(dd); + ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); + if (ret) { + THERM_FAILURE(dd, ret, "Acquire SBus"); + return ret; + } + dd_dev_info(dd, "Initializing thermal sensor\n"); /* Disable polling of thermal readings */ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); @@ -14372,7 +14377,7 @@ static int thermal_init(struct hfi1_devdata *dd) /* Enable polling of thermal readings */ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); done: - release_hw_mutex(dd); + release_chip_resource(dd, CR_SBUS); return ret; } diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index dc684bc11d27..311e6e843399 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -669,6 +669,9 @@ bool check_chip_resource(struct hfi1_devdata *dd, u32 resource, void init_chip_resources(struct hfi1_devdata *dd); void finish_chip_resources(struct hfi1_devdata *dd); +/* ms wait time for access to an SBus resoure */ +#define SBUS_TIMEOUT 4000 /* long enough for a FW download and SBR */ + void fabric_serdes_reset(struct hfi1_devdata *dd); int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result); diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 140dd8646cd0..1ea1ad84c784 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -1125,15 +1125,23 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags) */ void fabric_serdes_reset(struct hfi1_devdata *dd) { + int ret; + if (!fw_fabric_serdes_load) return; + ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); + if (ret) { + dd_dev_err(dd, + "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n"); + return; + } + set_sbus_fast_mode(dd); + if (is_ax(dd)) { /* A0 serdes do not work with a re-download */ u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; - acquire_hw_mutex(dd); - set_sbus_fast_mode(dd); /* place SerDes in reset and disable SPICO */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); /* wait 100 refclk cycles @ 156.25MHz => 640ns */ @@ -1142,26 +1150,20 @@ void fabric_serdes_reset(struct hfi1_devdata *dd) sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); /* turn SPICO enable on */ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); - clear_sbus_fast_mode(dd); - release_hw_mutex(dd); - return; + } else { + turn_off_spicos(dd, SPICO_FABRIC); + /* + * No need for firmware retry - what to download has already + * been decided. + * No need to pay attention to the load return - the only + * failure is a validation failure, which has already been + * checked by the initial download. + */ + (void)load_fabric_serdes_firmware(dd, &fw_fabric); } - acquire_hw_mutex(dd); - set_sbus_fast_mode(dd); - - turn_off_spicos(dd, SPICO_FABRIC); - /* - * No need for firmware retry - what to download has already been - * decided. - * No need to pay attention to the load return - the only failure - * is a validation failure, which has already been checked by the - * initial download. - */ - (void)load_fabric_serdes_firmware(dd, &fw_fabric); - clear_sbus_fast_mode(dd); - release_hw_mutex(dd); + release_chip_resource(dd, CR_SBUS); } /* Access to the SBus in this routine should probably be serialized */ @@ -1598,7 +1600,7 @@ int load_firmware(struct hfi1_devdata *dd) int ret; if (fw_fabric_serdes_load) { - ret = acquire_hw_mutex(dd); + ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); if (ret) return ret; @@ -1614,7 +1616,7 @@ int load_firmware(struct hfi1_devdata *dd) } while (retry_firmware(dd, ret)); clear_sbus_fast_mode(dd); - release_hw_mutex(dd); + release_chip_resource(dd, CR_SBUS); if (ret) return ret; } @@ -1995,7 +1997,7 @@ int get_platform_config_field(struct hfi1_devdata *dd, * Download the firmware needed for the Gen3 PCIe SerDes. An update * to the SBus firmware is needed before updating the PCIe firmware. * - * Note: caller must be holding the HW mutex. + * Note: caller must be holding the SBus resource. */ int load_pcie_firmware(struct hfi1_devdata *dd) { diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 1adfa8bfaf2b..42a409f16449 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -773,7 +773,7 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs, /* * Steps to be done after the PCIe firmware is downloaded and * before the SBR for the Pcie Gen3. - * The hardware mutex is already being held. + * The SBus resource is already being held. */ static void pcie_post_steps(struct hfi1_devdata *dd) { @@ -1012,10 +1012,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd) goto done_no_mutex; } - /* hold the HW mutex across the firmware download and SBR */ - ret = acquire_hw_mutex(dd); - if (ret) + /* hold the SBus resource across the firmware download and SBR */ + ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); + if (ret) { + dd_dev_err(dd, "%s: unable to acquire SBus resource\n", + __func__); return ret; + } /* make sure thermal polling is not causing interrupts */ therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN); @@ -1324,7 +1327,7 @@ done: dd_dev_info(dd, "%s: Re-enable therm polling\n", __func__); } - release_hw_mutex(dd); + release_chip_resource(dd, CR_SBUS); done_no_mutex: /* return no error if it is OK to be at current speed */ if (ret && !return_error) { -- cgit v1.2.3-59-g8ed1b From 765a6fac9132da203347525032bb40b1e9055104 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:06 -0800 Subject: IB/hfi1: Change QSFP functions to use resource reservation Remove the mutex guarding each operation in favor the ASIC resource acquire/release. Push the resource acquire/release, above each operation call to allow exclusive access across multiple operations. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 20 +++++-- drivers/staging/rdma/hfi1/chip.h | 3 + drivers/staging/rdma/hfi1/debugfs.c | 22 +++++-- drivers/staging/rdma/hfi1/hfi.h | 14 ++++- drivers/staging/rdma/hfi1/init.c | 1 - drivers/staging/rdma/hfi1/platform.c | 26 ++++++--- drivers/staging/rdma/hfi1/qsfp.c | 108 +++++++++++++++++++++++------------ drivers/staging/rdma/hfi1/qsfp.h | 4 ++ 8 files changed, 142 insertions(+), 56 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 269c9775f7c6..d3a9b9f3b4f5 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -6267,8 +6267,8 @@ void handle_8051_request(struct work_struct *work) cdr_ctrl_byte &= ~(1 << i); } } - qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS, - &cdr_ctrl_byte, 1); + one_qsfp_write(ppd, dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS, + &cdr_ctrl_byte, 1); hreq_response(dd, HREQ_SUCCESS, data); refresh_qsfp_cache(ppd, &ppd->qsfp_info); break; @@ -9290,8 +9290,8 @@ void qsfp_event(struct work_struct *work) if (qd->check_interrupt_flags) { u8 qsfp_interrupt_status[16] = {0,}; - if (qsfp_read(ppd, dd->hfi1_id, 6, - &qsfp_interrupt_status[0], 16) != 16) { + if (one_qsfp_read(ppd, dd->hfi1_id, 6, + &qsfp_interrupt_status[0], 16) != 16) { dd_dev_info(dd, "%s: Failed to read status of QSFP module\n", __func__); @@ -9845,7 +9845,17 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) if (ppd->port_type == PORT_TYPE_QSFP && ppd->qsfp_info.limiting_active && qsfp_mod_present(ppd)) { - set_qsfp_tx(ppd, 0); + int ret; + + ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); + if (ret == 0) { + set_qsfp_tx(ppd, 0); + release_chip_resource(dd, qsfp_resource(dd)); + } else { + /* not fatal, but should warn */ + dd_dev_err(dd, + "Unable to acquire lock to turn off QSFP TX\n"); + } } /* diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 311e6e843399..9313963b4cf4 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -672,6 +672,9 @@ void finish_chip_resources(struct hfi1_devdata *dd); /* ms wait time for access to an SBus resoure */ #define SBUS_TIMEOUT 4000 /* long enough for a FW download and SBR */ +/* ms wait time for a qsfp (i2c) chain to become available */ +#define QSFP_WAIT 20000 /* long enough for FW update to the F4 uc */ + void fabric_serdes_reset(struct hfi1_devdata *dd); int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result); diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 99845bc19437..665666cc2f04 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -465,16 +465,22 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, goto _free; } + ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); + if (ret) + goto _free; + total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count); if (total_written < 0) { ret = total_written; - goto _free; + goto _release; } *ppos += total_written; ret = total_written; + _release: + release_chip_resource(ppd->dd, i2c_target(target)); _free: kfree(buff); _return: @@ -526,10 +532,14 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, goto _return; } + ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); + if (ret) + goto _free; + total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); if (total_read < 0) { ret = total_read; - goto _free; + goto _release; } *ppos += total_read; @@ -537,11 +547,13 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, ret = copy_to_user(buf, buff, total_read); if (ret > 0) { ret = -EFAULT; - goto _free; + goto _release; } ret = total_read; + _release: + release_chip_resource(ppd->dd, i2c_target(target)); _free: kfree(buff); _return: @@ -592,7 +604,7 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, goto _free; } - total_written = qsfp_write(ppd, target, *ppos, buff, count); + total_written = one_qsfp_write(ppd, target, *ppos, buff, count); if (total_written < 0) { ret = total_written; goto _free; @@ -646,7 +658,7 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, goto _return; } - total_read = qsfp_read(ppd, target, *ppos, buff, count); + total_read = one_qsfp_read(ppd, target, *ppos, buff, count); if (total_read < 0) { ret = total_read; goto _free; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index e71a1c2fbfac..108015c09239 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1048,8 +1048,6 @@ struct hfi1_devdata { struct platform_config platform_config; struct platform_config_cache pcfg_cache; - /* control high-level access to qsfp */ - struct mutex qsfp_i2c_mutex; struct diag_client *diag_client; spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */ @@ -1938,6 +1936,18 @@ static inline void setextled(struct hfi1_devdata *dd, u32 on) write_csr(dd, DCC_CFG_LED_CNTRL, 0x10); } +/* return the i2c resource given the target */ +static inline u32 i2c_target(u32 target) +{ + return target ? CR_I2C2 : CR_I2C1; +} + +/* return the i2c chain chip resource that this HFI uses for QSFP */ +static inline u32 qsfp_resource(struct hfi1_devdata *dd) +{ + return i2c_target(dd->hfi1_id); +} + int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); #endif /* _HFI1_KERNEL_H */ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 260a8e19beb7..f21933ca93ce 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1065,7 +1065,6 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) spin_lock_init(&dd->sc_init_lock); spin_lock_init(&dd->dc8051_lock); spin_lock_init(&dd->dc8051_memlock); - mutex_init(&dd->qsfp_i2c_mutex); seqlock_init(&dd->sc2vl_lock); spin_lock_init(&dd->sde_map_lock); spin_lock_init(&dd->pio_map_lock); diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c index 4777414352d0..0a1d074583e4 100644 --- a/drivers/staging/rdma/hfi1/platform.c +++ b/drivers/staging/rdma/hfi1/platform.c @@ -601,23 +601,30 @@ static void apply_tunings( static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, u32 *ptr_rx_preset, u32 *ptr_total_atten) { - int ret = 0; + int ret; u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled; u8 *cache = ppd->qsfp_info.cache; + ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT); + if (ret) { + dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n", + __func__, (int)ppd->dd->hfi1_id); + return ret; + } + ppd->qsfp_info.limiting_active = 1; ret = set_qsfp_tx(ppd, 0); if (ret) - return ret; + goto bail_unlock; ret = qual_power(ppd); if (ret) - return ret; + goto bail_unlock; ret = qual_bitrate(ppd); if (ret) - return ret; + goto bail_unlock; if (ppd->qsfp_info.reset_needed) { reset_qsfp(ppd); @@ -629,7 +636,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, ret = set_qsfp_high_power(ppd); if (ret) - return ret; + goto bail_unlock; if (cache[QSFP_EQ_INFO_OFFS] & 0x4) { ret = get_platform_config_field( @@ -639,7 +646,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, ptr_tx_preset, 4); if (ret) { *ptr_tx_preset = OPA_INVALID_INDEX; - return ret; + goto bail_unlock; } } else { ret = get_platform_config_field( @@ -649,7 +656,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, ptr_tx_preset, 4); if (ret) { *ptr_tx_preset = OPA_INVALID_INDEX; - return ret; + goto bail_unlock; } } @@ -658,7 +665,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4); if (ret) { *ptr_rx_preset = OPA_INVALID_INDEX; - return ret; + goto bail_unlock; } if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G)) @@ -677,6 +684,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset, apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset); ret = set_qsfp_tx(ppd, 1); + +bail_unlock: + release_chip_resource(ppd->dd, qsfp_resource(ppd->dd)); return ret; } diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c index 7e76b93f8f94..9ed1963010fe 100644 --- a/drivers/staging/rdma/hfi1/qsfp.c +++ b/drivers/staging/rdma/hfi1/qsfp.c @@ -59,7 +59,7 @@ #define I2C_MAX_RETRY 4 /* - * Unlocked i2c write. Must hold dd->qsfp_i2c_mutex. + * Raw i2c write. No set-up or lock checking. */ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) @@ -88,15 +88,16 @@ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, return cnt; } +/* + * Caller must hold the i2c chain resource. + */ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { - struct hfi1_devdata *dd = ppd->dd; int ret; - ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex); - if (ret) - return ret; + if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__)) + return -EACCES; /* make sure the TWSI bus is in a sane state */ ret = hfi1_twsi_reset(ppd->dd, target); @@ -104,18 +105,14 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, hfi1_dev_porterr(ppd->dd, ppd->port, "I2C chain %d write interface reset failed\n", target); - goto done; + return ret; } - ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); - -done: - mutex_unlock(&dd->qsfp_i2c_mutex); - return ret; + return __i2c_write(ppd, target, i2c_addr, offset, bp, len); } /* - * Unlocked i2c read. Must hold dd->qsfp_i2c_mutex. + * Raw i2c read. No set-up or lock checking. */ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) @@ -157,15 +154,16 @@ exit: return ret; } +/* + * Caller must hold the i2c chain resource. + */ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) { - struct hfi1_devdata *dd = ppd->dd; int ret; - ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex); - if (ret) - return ret; + if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__)) + return -EACCES; /* make sure the TWSI bus is in a sane state */ ret = hfi1_twsi_reset(ppd->dd, target); @@ -173,19 +171,17 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, hfi1_dev_porterr(ppd->dd, ppd->port, "I2C chain %d read interface reset failed\n", target); - goto done; + return ret; } - ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); - -done: - mutex_unlock(&dd->qsfp_i2c_mutex); - return ret; + return __i2c_read(ppd, target, i2c_addr, offset, bp, len); } /* * Write page n, offset m of QSFP memory as defined by SFF 8636 * by writing @addr = ((256 * n) + m) + * + * Caller must hold the i2c chain resource. */ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) @@ -196,9 +192,8 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int ret; u8 page; - ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex); - if (ret) - return ret; + if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__)) + return -EACCES; /* make sure the TWSI bus is in a sane state */ ret = hfi1_twsi_reset(ppd->dd, target); @@ -206,7 +201,6 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP chain %d write interface reset failed\n", target); - mutex_unlock(&ppd->dd->qsfp_i2c_mutex); return ret; } @@ -242,16 +236,36 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, addr += ret; } - mutex_unlock(&ppd->dd->qsfp_i2c_mutex); - if (ret < 0) return ret; return count; } +/* + * Perform a stand-alone single QSFP write. Acquire the resource, do the + * read, then release the resource. + */ +int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, + int len) +{ + struct hfi1_devdata *dd = ppd->dd; + u32 resource = qsfp_resource(dd); + int ret; + + ret = acquire_chip_resource(dd, resource, QSFP_WAIT); + if (ret) + return ret; + ret = qsfp_write(ppd, target, addr, bp, len); + release_chip_resource(dd, resource); + + return ret; +} + /* * Access page n, offset m of QSFP memory as defined by SFF 8636 * by reading @addr = ((256 * n) + m) + * + * Caller must hold the i2c chain resource. */ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len) @@ -262,9 +276,8 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int ret; u8 page; - ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex); - if (ret) - return ret; + if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__)) + return -EACCES; /* make sure the TWSI bus is in a sane state */ ret = hfi1_twsi_reset(ppd->dd, target); @@ -272,7 +285,6 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, hfi1_dev_porterr(ppd->dd, ppd->port, "QSFP chain %d read interface reset failed\n", target); - mutex_unlock(&ppd->dd->qsfp_i2c_mutex); return ret; } @@ -309,13 +321,31 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, addr += ret; } - mutex_unlock(&ppd->dd->qsfp_i2c_mutex); - if (ret < 0) return ret; return count; } +/* + * Perform a stand-alone single QSFP read. Acquire the resource, do the + * read, then release the resource. + */ +int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, + int len) +{ + struct hfi1_devdata *dd = ppd->dd; + u32 resource = qsfp_resource(dd); + int ret; + + ret = acquire_chip_resource(dd, resource, QSFP_WAIT); + if (ret) + return ret; + ret = qsfp_read(ppd, target, addr, bp, len); + release_chip_resource(dd, resource); + + return ret; +} + /* * This function caches the QSFP memory range in 128 byte chunks. * As an example, the next byte after address 255 is byte 128 from @@ -341,9 +371,13 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) if (!qsfp_mod_present(ppd)) { ret = -ENODEV; - goto bail; + goto bail_no_release; } + ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT); + if (ret) + goto bail_no_release; + ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE); if (ret != QSFP_PAGESIZE) { dd_dev_info(ppd->dd, @@ -406,6 +440,8 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) } } + release_chip_resource(ppd->dd, qsfp_resource(ppd->dd)); + spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); ppd->qsfp_info.cache_valid = 1; ppd->qsfp_info.cache_refresh_required = 0; @@ -414,6 +450,8 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp) return 0; bail: + release_chip_resource(ppd->dd, qsfp_resource(ppd->dd)); +bail_no_release: memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); return ret; } diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h index 2ad59807573f..831fe4cf1345 100644 --- a/drivers/staging/rdma/hfi1/qsfp.h +++ b/drivers/staging/rdma/hfi1/qsfp.h @@ -235,3 +235,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len); int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, int len); +int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, + int len); +int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, + int len); -- cgit v1.2.3-59-g8ed1b From a453698b52dbfb248d23331450c638eaa3219025 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:11 -0800 Subject: IB/hfi1: Change thermal init to use resource reservation Use the resource reservation system to flag that the ASIC thermal has been initialized. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index d3a9b9f3b4f5..717091375810 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -14331,7 +14331,7 @@ static int thermal_init(struct hfi1_devdata *dd) int ret = 0; if (dd->icode != ICODE_RTL_SILICON || - !(dd->flags & HFI1_DO_INIT_ASIC)) + check_chip_resource(dd, CR_THERM_INIT, NULL)) return ret; ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); @@ -14386,6 +14386,12 @@ static int thermal_init(struct hfi1_devdata *dd) /* Enable polling of thermal readings */ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); + + /* Set initialized flag */ + ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); + if (ret) + THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); + done: release_chip_resource(dd, CR_SBUS); return ret; -- cgit v1.2.3-59-g8ed1b From 7a8f28ca3daa61dc48046b4f1fa73139fb47bbfe Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:17 -0800 Subject: IB/hfi1: Remove unused HFI1_DO_INIT_ASIC flag The flag HFI1_DO_INIT_ASIC flag is no longer used. Remove the flag and the code that sets it. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 12 ------------ drivers/staging/rdma/hfi1/hfi.h | 1 - 2 files changed, 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index 717091375810..ac0345257b9c 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -13815,11 +13815,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd) * Information can be shared between the two HFIs on the same ASIC * in the same OS. This function finds the peer device and sets * up a shared structure. - * - * Certain chip functions need to be initialized only once per asic - * instead of per-device. This function finds the peer device and - * checks whether that chip initialization needs to be done by this - * device. */ static int init_asic_data(struct hfi1_devdata *dd) { @@ -13837,13 +13832,6 @@ static int init_asic_data(struct hfi1_devdata *dd) } } - /* - * "Claim" the ASIC for initialization if it hasn't been - " "claimed" yet. - */ - if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC)) - dd->flags |= HFI1_DO_INIT_ASIC; - if (peer) { dd->asic_data = peer->asic_data; } else { diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 108015c09239..035a151d2d5c 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1602,7 +1602,6 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd) #define HFI1_HAS_SDMA_TIMEOUT 0x8 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ -#define HFI1_DO_INIT_ASIC 0x100 /* This device will init the ASIC */ /* IB dword length mask in PBC (lower 11 bits); same for all chips */ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) -- cgit v1.2.3-59-g8ed1b From b0506f4c56d66f4a8413eaeb57212cf8166e30e9 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:22 -0800 Subject: IB/hfi1: Reduce hardware mutex timeout The hardware mutex is now held only long enough to set or clear flags. Reduce the timeout to something more reasonable. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/firmware.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c index 1ea1ad84c784..3040162cb326 100644 --- a/drivers/staging/rdma/hfi1/firmware.c +++ b/drivers/staging/rdma/hfi1/firmware.c @@ -198,7 +198,7 @@ static const struct firmware *platform_config; #define RSA_ENGINE_TIMEOUT 100 /* ms */ /* hardware mutex timeout, in ms */ -#define HM_TIMEOUT 4000 /* 4 s */ +#define HM_TIMEOUT 10 /* ms */ /* 8051 memory access timeout, in us */ #define DC8051_ACCESS_TIMEOUT 100 /* us */ -- cgit v1.2.3-59-g8ed1b From ae993e7fba05c6159e1af1dc504bade46a94eb47 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:27 -0800 Subject: IB/hfi1: Hold i2c resource across debugfs open/close External i2c firmware updates are done in multiple steps and cannot have other things done in between. For debugfs files, acquire the resource on open and release it on close. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/debugfs.c | 145 ++++++++++++++++++++++++++++++------ 1 file changed, 124 insertions(+), 21 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 665666cc2f04..6a1bc28a9107 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "hfi.h" #include "debugfs.h" @@ -465,22 +466,16 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf, goto _free; } - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) - goto _free; - total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count); if (total_written < 0) { ret = total_written; - goto _release; + goto _free; } *ppos += total_written; ret = total_written; - _release: - release_chip_resource(ppd->dd, i2c_target(target)); _free: kfree(buff); _return: @@ -532,14 +527,10 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, goto _return; } - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) - goto _free; - total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count); if (total_read < 0) { ret = total_read; - goto _release; + goto _free; } *ppos += total_read; @@ -547,13 +538,11 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf, ret = copy_to_user(buf, buff, total_read); if (ret > 0) { ret = -EFAULT; - goto _release; + goto _free; } ret = total_read; - _release: - release_chip_resource(ppd->dd, i2c_target(target)); _free: kfree(buff); _return: @@ -604,7 +593,7 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf, goto _free; } - total_written = one_qsfp_write(ppd, target, *ppos, buff, count); + total_written = qsfp_write(ppd, target, *ppos, buff, count); if (total_written < 0) { ret = total_written; goto _free; @@ -658,7 +647,7 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf, goto _return; } - total_read = one_qsfp_read(ppd, target, *ppos, buff, count); + total_read = qsfp_read(ppd, target, *ppos, buff, count); if (total_read < 0) { ret = total_read; goto _free; @@ -695,6 +684,104 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, return __qsfp_debugfs_read(file, buf, count, ppos, 1); } +static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) +{ + struct hfi1_pportdata *ppd; + int ret; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + ppd = private2ppd(fp); + + ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); + if (ret) /* failed - release the module */ + module_put(THIS_MODULE); + + return ret; +} + +static int i2c1_debugfs_open(struct inode *in, struct file *fp) +{ + return __i2c_debugfs_open(in, fp, 0); +} + +static int i2c2_debugfs_open(struct inode *in, struct file *fp) +{ + return __i2c_debugfs_open(in, fp, 1); +} + +static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) +{ + struct hfi1_pportdata *ppd; + + ppd = private2ppd(fp); + + release_chip_resource(ppd->dd, i2c_target(target)); + module_put(THIS_MODULE); + + return 0; +} + +static int i2c1_debugfs_release(struct inode *in, struct file *fp) +{ + return __i2c_debugfs_release(in, fp, 0); +} + +static int i2c2_debugfs_release(struct inode *in, struct file *fp) +{ + return __i2c_debugfs_release(in, fp, 1); +} + +static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) +{ + struct hfi1_pportdata *ppd; + int ret; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + ppd = private2ppd(fp); + + ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); + if (ret) /* failed - release the module */ + module_put(THIS_MODULE); + + return ret; +} + +static int qsfp1_debugfs_open(struct inode *in, struct file *fp) +{ + return __qsfp_debugfs_open(in, fp, 0); +} + +static int qsfp2_debugfs_open(struct inode *in, struct file *fp) +{ + return __qsfp_debugfs_open(in, fp, 1); +} + +static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) +{ + struct hfi1_pportdata *ppd; + + ppd = private2ppd(fp); + + release_chip_resource(ppd->dd, i2c_target(target)); + module_put(THIS_MODULE); + + return 0; +} + +static int qsfp1_debugfs_release(struct inode *in, struct file *fp) +{ + return __qsfp_debugfs_release(in, fp, 0); +} + +static int qsfp2_debugfs_release(struct inode *in, struct file *fp) +{ + return __qsfp_debugfs_release(in, fp, 1); +} + #define DEBUGFS_OPS(nm, readroutine, writeroutine) \ { \ .name = nm, \ @@ -705,6 +792,18 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, }, \ } +#define DEBUGFS_XOPS(nm, readf, writef, openf, releasef) \ +{ \ + .name = nm, \ + .ops = { \ + .read = readf, \ + .write = writef, \ + .llseek = generic_file_llseek, \ + .open = openf, \ + .release = releasef \ + }, \ +} + static const struct counter_info cntr_ops[] = { DEBUGFS_OPS("counter_names", dev_names_read, NULL), DEBUGFS_OPS("counters", dev_counters_read, NULL), @@ -713,11 +812,15 @@ static const struct counter_info cntr_ops[] = { static const struct counter_info port_cntr_ops[] = { DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL), - DEBUGFS_OPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write), - DEBUGFS_OPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write), + DEBUGFS_XOPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write, + i2c1_debugfs_open, i2c1_debugfs_release), + DEBUGFS_XOPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write, + i2c2_debugfs_open, i2c2_debugfs_release), DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL), - DEBUGFS_OPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write), - DEBUGFS_OPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write), + DEBUGFS_XOPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write, + qsfp1_debugfs_open, qsfp1_debugfs_release), + DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write, + qsfp2_debugfs_open, qsfp2_debugfs_release), }; void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd) -- cgit v1.2.3-59-g8ed1b From c9c8ea3d47ebe025c3bca692e729f4c2e634c9a8 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:33 -0800 Subject: IB/hfi1: Add ASIC flag view/clear Different OSes using parts of the same hardware may leave cross-device flags set. Export a debugfs file to view and clear these flags if needed. Reviewed-by: Mitko Haralanov Reviewed-by: Easwar Hariharan Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/debugfs.c | 125 ++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c index 6a1bc28a9107..dbab9d9cc288 100644 --- a/drivers/staging/rdma/hfi1/debugfs.c +++ b/drivers/staging/rdma/hfi1/debugfs.c @@ -404,6 +404,130 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf, return rval; } +static void check_dyn_flag(u64 scratch0, char *p, int size, int *used, + int this_hfi, int hfi, u32 flag, const char *what) +{ + u32 mask; + + mask = flag << (hfi ? CR_DYN_SHIFT : 0); + if (scratch0 & mask) { + *used += scnprintf(p + *used, size - *used, + " 0x%08x - HFI%d %s in use, %s device\n", + mask, hfi, what, + this_hfi == hfi ? "this" : "other"); + } +} + +static ssize_t asic_flags_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hfi1_pportdata *ppd; + struct hfi1_devdata *dd; + u64 scratch0; + char *tmp; + int ret = 0; + int size; + int used; + int i; + + rcu_read_lock(); + ppd = private2ppd(file); + dd = ppd->dd; + size = PAGE_SIZE; + used = 0; + tmp = kmalloc(size, GFP_KERNEL); + if (!tmp) { + rcu_read_unlock(); + return -ENOMEM; + } + + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + used += scnprintf(tmp + used, size - used, + "Resource flags: 0x%016llx\n", scratch0); + + /* check permanent flag */ + if (scratch0 & CR_THERM_INIT) { + used += scnprintf(tmp + used, size - used, + " 0x%08x - thermal monitoring initialized\n", + (u32)CR_THERM_INIT); + } + + /* check each dynamic flag on each HFI */ + for (i = 0; i < 2; i++) { + check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, + CR_SBUS, "SBus"); + check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, + CR_EPROM, "EPROM"); + check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, + CR_I2C1, "i2c chain 1"); + check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i, + CR_I2C2, "i2c chain 2"); + } + used += scnprintf(tmp + used, size - used, "Write bits to clear\n"); + + ret = simple_read_from_buffer(buf, count, ppos, tmp, used); + rcu_read_unlock(); + kfree(tmp); + return ret; +} + +static ssize_t asic_flags_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hfi1_pportdata *ppd; + struct hfi1_devdata *dd; + char *buff; + int ret; + unsigned long long value; + u64 scratch0; + u64 clear; + + rcu_read_lock(); + ppd = private2ppd(file); + dd = ppd->dd; + + buff = kmalloc(count + 1, GFP_KERNEL); + if (!buff) { + ret = -ENOMEM; + goto do_return; + } + + ret = copy_from_user(buff, buf, count); + if (ret > 0) { + ret = -EFAULT; + goto do_free; + } + + /* zero terminate and read the expected integer */ + buff[count] = 0; + ret = kstrtoull(buff, 0, &value); + if (ret) + goto do_free; + clear = value; + + /* obtain exclusive access */ + mutex_lock(&dd->asic_data->asic_resource_mutex); + acquire_hw_mutex(dd); + + scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); + scratch0 &= ~clear; + write_csr(dd, ASIC_CFG_SCRATCH, scratch0); + /* force write to be visible to other HFI on another OS */ + (void)read_csr(dd, ASIC_CFG_SCRATCH); + + release_hw_mutex(dd); + mutex_unlock(&dd->asic_data->asic_resource_mutex); + + /* return the number of bytes written */ + ret = count; + + do_free: + kfree(buff); + do_return: + rcu_read_unlock(); + return ret; +} + /* * read the per-port QSFP data for ppd */ @@ -821,6 +945,7 @@ static const struct counter_info port_cntr_ops[] = { qsfp1_debugfs_open, qsfp1_debugfs_release), DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write, qsfp2_debugfs_open, qsfp2_debugfs_release), + DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write), }; void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd) -- cgit v1.2.3-59-g8ed1b From 8fefef125ed4b9347068d782aa5439f3da3dca32 Mon Sep 17 00:00:00 2001 From: Jubin John Date: Sat, 5 Mar 2016 08:50:38 -0800 Subject: IB/hfi1: Handle host handshake timeout Host handshake timeout can occur during the verify capability state. This is a LNI related failure and should be handled in the same way as other LNI failures. Reviewed-by: Dean Luick Reviewed-by: Easwar Hariharan Reviewed-by: Mike Marciniszyn Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/chip.c | 3 ++- drivers/staging/rdma/hfi1/chip.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index ac0345257b9c..c29860c05ed4 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -963,7 +963,8 @@ static struct flag_table dc8051_info_err_flags[] = { FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ), FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1), FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2), - FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT) + FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT), + FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT) }; /* diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h index 9313963b4cf4..4f3b878e43eb 100644 --- a/drivers/staging/rdma/hfi1/chip.h +++ b/drivers/staging/rdma/hfi1/chip.h @@ -253,12 +253,13 @@ #define FAILED_LNI_VERIFY_CAP1 BIT(9) #define FAILED_LNI_VERIFY_CAP2 BIT(10) #define FAILED_LNI_CONFIGLT BIT(11) +#define HOST_HANDSHAKE_TIMEOUT BIT(12) #define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \ | FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \ | FAILED_LNI_VERIFY_CAP1 \ | FAILED_LNI_VERIFY_CAP2 \ - | FAILED_LNI_CONFIGLT) + | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT) /* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */ #define HOST_REQ_DONE BIT(0) -- cgit v1.2.3-59-g8ed1b From 528ee9fbf0244406a76cb5e37406eef303b09a46 Mon Sep 17 00:00:00 2001 From: Dean Luick Date: Sat, 5 Mar 2016 08:50:43 -0800 Subject: IB/hfi1: Add adaptive cacheless verbs copy The kernel memcpy is faster than a cacheless copy. However, if too much of the L3 cache is overwritten by one-time copies then overall bandwidth suffers. Implement an adaptive scheme where full page copies are tracked and if the number of unique entries are larger than a threshold, verbs will use a cacheless copy. Tracked entries are gradually cleaned, allowing memcpy to resume once the larger copies have stopped. Reviewed-by: Dennis Dalessandro Reviewed-by: Mike Marciniszyn Signed-off-by: Dean Luick Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/init.c | 6 ++ drivers/staging/rdma/hfi1/verbs.c | 185 +++++++++++++++++++++++++++++++++++++- drivers/staging/rdma/hfi1/verbs.h | 22 +++++ 3 files changed, 211 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index f21933ca93ce..deabb0812023 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -1242,6 +1242,9 @@ static int __init hfi1_mod_init(void) idr_init(&hfi1_unit_table); hfi1_dbg_init(); + ret = hfi1_wss_init(); + if (ret < 0) + goto bail_wss; ret = pci_register_driver(&hfi1_pci_driver); if (ret < 0) { pr_err("Unable to register driver: error %d\n", -ret); @@ -1250,6 +1253,8 @@ static int __init hfi1_mod_init(void) goto bail; /* all OK */ bail_dev: + hfi1_wss_exit(); +bail_wss: hfi1_dbg_exit(); idr_destroy(&hfi1_unit_table); dev_cleanup(); @@ -1265,6 +1270,7 @@ module_init(hfi1_mod_init); static void __exit hfi1_mod_cleanup(void) { pci_unregister_driver(&hfi1_pci_driver); + hfi1_wss_exit(); hfi1_dbg_exit(); hfi1_cpulist_count = 0; kfree(hfi1_cpulist); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 220bdb0b70bc..82097571aa85 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -125,6 +125,13 @@ unsigned short piothreshold; module_param(piothreshold, ushort, S_IRUGO); MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); +#define COPY_CACHELESS 1 +#define COPY_ADAPTIVE 2 +static unsigned int sge_copy_mode; +module_param(sge_copy_mode, uint, S_IRUGO); +MODULE_PARM_DESC(sge_copy_mode, + "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS"); + static void verbs_sdma_complete( struct sdma_txreq *cookie, int status); @@ -137,6 +144,159 @@ static int pio_wait(struct rvt_qp *qp, /* Length of buffer to create verbs txreq cache name */ #define TXREQ_NAME_LEN 24 +static uint wss_threshold; +module_param(wss_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy"); +static uint wss_clean_period = 256; +module_param(wss_clean_period, uint, S_IRUGO); +MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned"); + +/* memory working set size */ +struct hfi1_wss { + unsigned long *entries; + atomic_t total_count; + atomic_t clean_counter; + atomic_t clean_entry; + + int threshold; + int num_entries; + long pages_mask; +}; + +static struct hfi1_wss wss; + +int hfi1_wss_init(void) +{ + long llc_size; + long llc_bits; + long table_size; + long table_bits; + + /* check for a valid percent range - default to 80 if none or invalid */ + if (wss_threshold < 1 || wss_threshold > 100) + wss_threshold = 80; + /* reject a wildly large period */ + if (wss_clean_period > 1000000) + wss_clean_period = 256; + /* reject a zero period */ + if (wss_clean_period == 0) + wss_clean_period = 1; + + /* + * Calculate the table size - the next power of 2 larger than the + * LLC size. LLC size is in KiB. + */ + llc_size = wss_llc_size() * 1024; + table_size = roundup_pow_of_two(llc_size); + + /* one bit per page in rounded up table */ + llc_bits = llc_size / PAGE_SIZE; + table_bits = table_size / PAGE_SIZE; + wss.pages_mask = table_bits - 1; + wss.num_entries = table_bits / BITS_PER_LONG; + + wss.threshold = (llc_bits * wss_threshold) / 100; + if (wss.threshold == 0) + wss.threshold = 1; + + atomic_set(&wss.clean_counter, wss_clean_period); + + wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries), + GFP_KERNEL); + if (!wss.entries) { + hfi1_wss_exit(); + return -ENOMEM; + } + + return 0; +} + +void hfi1_wss_exit(void) +{ + /* coded to handle partially initialized and repeat callers */ + kfree(wss.entries); + wss.entries = NULL; +} + +/* + * Advance the clean counter. When the clean period has expired, + * clean an entry. + * + * This is implemented in atomics to avoid locking. Because multiple + * variables are involved, it can be racy which can lead to slightly + * inaccurate information. Since this is only a heuristic, this is + * OK. Any innaccuracies will clean themselves out as the counter + * advances. That said, it is unlikely the entry clean operation will + * race - the next possible racer will not start until the next clean + * period. + * + * The clean counter is implemented as a decrement to zero. When zero + * is reached an entry is cleaned. + */ +static void wss_advance_clean_counter(void) +{ + int entry; + int weight; + unsigned long bits; + + /* become the cleaner if we decrement the counter to zero */ + if (atomic_dec_and_test(&wss.clean_counter)) { + /* + * Set, not add, the clean period. This avoids an issue + * where the counter could decrement below the clean period. + * Doing a set can result in lost decrements, slowing the + * clean advance. Since this a heuristic, this possible + * slowdown is OK. + * + * An alternative is to loop, advancing the counter by a + * clean period until the result is > 0. However, this could + * lead to several threads keeping another in the clean loop. + * This could be mitigated by limiting the number of times + * we stay in the loop. + */ + atomic_set(&wss.clean_counter, wss_clean_period); + + /* + * Uniquely grab the entry to clean and move to next. + * The current entry is always the lower bits of + * wss.clean_entry. The table size, wss.num_entries, + * is always a power-of-2. + */ + entry = (atomic_inc_return(&wss.clean_entry) - 1) + & (wss.num_entries - 1); + + /* clear the entry and count the bits */ + bits = xchg(&wss.entries[entry], 0); + weight = hweight64((u64)bits); + /* only adjust the contended total count if needed */ + if (weight) + atomic_sub(weight, &wss.total_count); + } +} + +/* + * Insert the given address into the working set array. + */ +static void wss_insert(void *address) +{ + u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask; + u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */ + u32 nr = page & (BITS_PER_LONG - 1); + + if (!test_and_set_bit(nr, &wss.entries[entry])) + atomic_inc(&wss.total_count); + + wss_advance_clean_counter(); +} + +/* + * Is the working set larger than the threshold? + */ +static inline int wss_exceeds_threshold(void) +{ + return atomic_read(&wss.total_count) >= wss.threshold; +} + /* * Translate ib_wr_opcode into ib_wc_opcode. */ @@ -258,7 +418,26 @@ void hfi1_copy_sge( struct rvt_sge *sge = &ss->sge; int in_last = 0; int i; + int cacheless_copy = 0; + if (sge_copy_mode == COPY_CACHELESS) { + cacheless_copy = length >= PAGE_SIZE; + } else if (sge_copy_mode == COPY_ADAPTIVE) { + if (length >= PAGE_SIZE) { + /* + * NOTE: this *assumes*: + * o The first vaddr is the dest. + * o If multiple pages, then vaddr is sequential. + */ + wss_insert(sge->vaddr); + if (length >= (2 * PAGE_SIZE)) + wss_insert(sge->vaddr + PAGE_SIZE); + + cacheless_copy = wss_exceeds_threshold(); + } else { + wss_advance_clean_counter(); + } + } if (copy_last) { if (length > 8) { length -= 8; @@ -277,10 +456,12 @@ again: if (len > sge->sge_length) len = sge->sge_length; WARN_ON_ONCE(len == 0); - if (in_last) { - /* enforce byte transer ordering */ + if (unlikely(in_last)) { + /* enforce byte transfer ordering */ for (i = 0; i < len; i++) ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i]; + } else if (cacheless_copy) { + cacheless_memcpy(sge->vaddr, data, len); } else { memcpy(sge->vaddr, data, len); } diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h index a85e6bc580b6..6c4670fffdbb 100644 --- a/drivers/staging/rdma/hfi1/verbs.h +++ b/drivers/staging/rdma/hfi1/verbs.h @@ -475,6 +475,28 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, u64 pbc); +int hfi1_wss_init(void); +void hfi1_wss_exit(void); + +/* platform specific: return the lowest level cache (llc) size, in KiB */ +static inline int wss_llc_size(void) +{ + /* assume that the boot CPU value is universal for all CPUs */ + return boot_cpu_data.x86_cache_size; +} + +/* platform specific: cacheless copy */ +static inline void cacheless_memcpy(void *dst, void *src, size_t n) +{ + /* + * Use the only available X64 cacheless copy. Add a __user cast + * to quiet sparse. The src agument is already in the kernel so + * there are no security issues. The extra fault recovery machinery + * is not invoked. + */ + __copy_user_nocache(dst, (void __user *)src, n, 0); +} + extern const enum ib_wc_opcode ib_hfi1_wc_opcode[]; extern const u8 hdr_len_by_opcode[]; -- cgit v1.2.3-59-g8ed1b From 831464ce4b74eaec723bad51ea48fe3879732f66 Mon Sep 17 00:00:00 2001 From: Kaike Wan Date: Sat, 5 Mar 2016 08:50:49 -0800 Subject: IB/hfi1: Don't call cond_resched in atomic mode when sending packets This patch fixed the problem where the driver might reschedule in atomic mode when sending packets. This is due to the fact that the call to cond_resched() in hfi1_do_send() might occur in atomic mode and a check is required to avoid the warning message: "kernel: BUG: scheduling while atomic: swapper/2/0/0x10000100." Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Kaike Wan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/ruc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c index aa53859503ee..08813cdbd475 100644 --- a/drivers/staging/rdma/hfi1/ruc.c +++ b/drivers/staging/rdma/hfi1/ruc.c @@ -906,8 +906,11 @@ void hfi1_do_send(struct rvt_qp *qp) *ps.ppd->dd->send_schedule); return; } - cond_resched(); - this_cpu_inc(*ps.ppd->dd->send_schedule); + if (!irqs_disabled()) { + cond_resched(); + this_cpu_inc( + *ps.ppd->dd->send_schedule); + } timeout = jiffies + (timeout_int) / 8; } spin_lock_irqsave(&qp->s_lock, flags); -- cgit v1.2.3-59-g8ed1b From 2243472e9d98c3ca0cb735f96ad48a7b59bdb34d Mon Sep 17 00:00:00 2001 From: Easwar Hariharan Date: Mon, 7 Mar 2016 11:35:03 -0800 Subject: IB/hfi1: Improve LED beaconing The current LED beaconing code is unclear and uses the timer handler to turn off the timer. This patch simplifies the code by removing the special semantics of timeon = timeoff = 0 being interpreted as a request to turn off the beaconing. Reviewed-by: Ira Weiny Reviewed-by: Dennis Dalessandro Signed-off-by: Easwar Hariharan Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/driver.c | 59 ++++++++++++++++---------------------- drivers/staging/rdma/hfi1/hfi.h | 10 ++----- drivers/staging/rdma/hfi1/mad.c | 20 ++++++------- 3 files changed, 38 insertions(+), 51 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 45818646eb99..914beedb556b 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -1170,18 +1170,20 @@ void shutdown_led_override(struct hfi1_pportdata *ppd) struct hfi1_devdata *dd = ppd->dd; /* - * This pairs with the memory barrier implied by the atomic_dec in - * hfi1_set_led_override to ensure that we read the correct state of - * LED beaconing represented by led_override_timer_active + * This pairs with the memory barrier in hfi1_start_led_override to + * ensure that we read the correct state of LED beaconing represented + * by led_override_timer_active */ - smp_mb(); + smp_rmb(); if (atomic_read(&ppd->led_override_timer_active)) { del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); + /* Ensure the atomic_set is visible to all CPUs */ + smp_wmb(); } - /* Shut off LEDs after we are sure timer is not running */ - setextled(dd, 0); + /* Hand control of the LED to the DC for normal operation */ + write_csr(dd, DCC_CFG_LED_CNTRL, 0); } static void run_led_override(unsigned long opaque) @@ -1195,59 +1197,48 @@ static void run_led_override(unsigned long opaque) return; phase_idx = ppd->led_override_phase & 1; + setextled(dd, phase_idx); timeout = ppd->led_override_vals[phase_idx]; + /* Set up for next phase */ ppd->led_override_phase = !ppd->led_override_phase; - /* - * don't re-fire the timer if user asked for it to be off; we let - * it fire one more time after they turn it off to simplify - */ - if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) { - mod_timer(&ppd->led_override_timer, jiffies + timeout); - } else { - /* Hand control of the LED to the DC for normal operation */ - write_csr(dd, DCC_CFG_LED_CNTRL, 0); - /* Record that we did not re-fire the timer */ - atomic_dec(&ppd->led_override_timer_active); - } + mod_timer(&ppd->led_override_timer, jiffies + timeout); } /* * To have the LED blink in a particular pattern, provide timeon and timeoff - * in milliseconds. To turn off custom blinking and return to normal operation, - * provide timeon = timeoff = 0. + * in milliseconds. + * To turn off custom blinking and return to normal operation, use + * shutdown_led_override() */ -void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, - unsigned int timeoff) +void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, + unsigned int timeoff) { - struct hfi1_devdata *dd = ppd->dd; - - if (!(dd->flags & HFI1_INITTED)) + if (!(ppd->dd->flags & HFI1_INITTED)) return; /* Convert to jiffies for direct use in timer */ ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); ppd->led_override_vals[1] = msecs_to_jiffies(timeon); - ppd->led_override_phase = 1; /* Arbitrarily start from LED on phase */ + + /* Arbitrarily start from LED on phase */ + ppd->led_override_phase = 1; /* * If the timer has not already been started, do so. Use a "quick" - * timeout so the function will be called soon, to look at our request. + * timeout so the handler will be called soon to look at our request. */ - if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { - /* Need to start timer */ + if (!timer_pending(&ppd->led_override_timer)) { setup_timer(&ppd->led_override_timer, run_led_override, (unsigned long)ppd); - ppd->led_override_timer.expires = jiffies + 1; add_timer(&ppd->led_override_timer); - } else { - if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) - mod_timer(&ppd->led_override_timer, jiffies + 1); - atomic_dec(&ppd->led_override_timer_active); + atomic_set(&ppd->led_override_timer_active, 1); + /* Ensure the atomic_set is visible to all CPUs */ + smp_wmb(); } } diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 035a151d2d5c..572288308406 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1623,13 +1623,9 @@ void hfi1_free_devdata(struct hfi1_devdata *); void cc_state_reclaim(struct rcu_head *rcu); struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); -void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, - unsigned int timeoff); -/* - * Only to be used for driver unload or device reset where we cannot allow - * the timer to fire even the one extra time, else use hfi1_set_led_override - * with timeon = timeoff = 0 - */ +/* LED beaconing functions */ +void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, + unsigned int timeoff); void shutdown_led_override(struct hfi1_pportdata *ppd); #define HFI1_CREDIT_RETURN_RATE (100) diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 5925798db4d1..0ec748e7e7b6 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -583,11 +583,11 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data, pi->port_states.ledenable_offlinereason |= ppd->is_sm_config_started << 5; /* - * This pairs with the memory barrier implied by the atomic_dec in - * hfi1_set_led_override to ensure that we read the correct state of - * LED beaconing represented by led_override_timer_active + * This pairs with the memory barrier in hfi1_start_led_override to + * ensure that we read the correct state of LED beaconing represented + * by led_override_timer_active */ - smp_mb(); + smp_rmb(); is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6; pi->port_states.ledenable_offlinereason |= @@ -3598,11 +3598,11 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, } /* - * This pairs with the memory barrier implied by the atomic_dec in - * hfi1_set_led_override to ensure that we read the correct state of - * LED beaconing represented by led_override_timer_active + * This pairs with the memory barrier in hfi1_start_led_override to + * ensure that we read the correct state of LED beaconing represented + * by led_override_timer_active */ - smp_mb(); + smp_rmb(); is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active); p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT); @@ -3627,9 +3627,9 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data, } if (on) - hfi1_set_led_override(dd->pport, 2000, 1500); + hfi1_start_led_override(dd->pport, 2000, 1500); else - hfi1_set_led_override(dd->pport, 0, 0); + shutdown_led_override(dd->pport); return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len); } -- cgit v1.2.3-59-g8ed1b From ef086c0d5dd9a151578c72b6f257e5b0e77d65eb Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:08 -0800 Subject: IB/hfi1: Report pid in qp_stats to aid debug Tracking user/QP ownership is needed to debug issues with user ULPs like OpenMPI. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rdmavt/qp.c | 1 + drivers/staging/rdma/hfi1/qp.c | 5 +++-- include/rdma/rdmavt_qp.h | 1 + 3 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index ef82abf2d89e..de34474b0dfb 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -786,6 +786,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, goto bail_ip; } } + qp->pid = current->pid; } spin_lock(&rdi->n_qps_lock); diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 9e831a162f19..6f8571518ad1 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -678,7 +678,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u CQ %u %u\n", + "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u CQ %u %u PID %d\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, @@ -712,7 +712,8 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) send_context, send_context ? send_context->sw_index : 0, ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, - ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail); + ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, + qp->pid); } void qp_comm_est(struct rvt_qp *qp) diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index f2f4df023aaa..497e59065c2c 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -251,6 +251,7 @@ struct rvt_qp { enum ib_mtu path_mtu; int srate_mbps; /* s_srate (below) converted to Mbit/s */ + pid_t pid; /* pid for user mode QPs */ u32 remote_qpn; u32 qkey; /* QKEY for this QP (for UD or RD) */ u32 s_size; /* send work queue size */ -- cgit v1.2.3-59-g8ed1b From ef6d8c4ec86f03b1e40791a804c746e5efacaf86 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:14 -0800 Subject: IB/hfi1: Fix issues with qp_stats print The changes are to aid in coorelating trace information with QPs between the trace and qp_stats information Such changes include adds a space after QP and clarifying that the second QP is actually the remote QP. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 6f8571518ad1..59ee12a96006 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -678,7 +678,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, - "N %d %s QP%x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) QP%x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u CQ %u %u PID %d\n", + "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, -- cgit v1.2.3-59-g8ed1b From 1db78eeebee7cde877194ddc8691f192e6279609 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:19 -0800 Subject: IB/hfi1: Add unique trace point for pio and sdma send This allows for separately enabling pio and sdma tracepoints to cut the volume of trace information. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/rc.c | 2 +- drivers/staging/rdma/hfi1/trace.h | 10 +++++++++- drivers/staging/rdma/hfi1/verbs.c | 8 ++++---- 3 files changed, 14 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c index 351f136c7caa..0d7e1017f3cb 100644 --- a/drivers/staging/rdma/hfi1/rc.c +++ b/drivers/staging/rdma/hfi1/rc.c @@ -881,7 +881,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, goto queue_ack; } - trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); + trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr); /* write the pbc and data */ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords); diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index b8b44168e62d..4d91c18fa694 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -530,7 +530,15 @@ DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr, TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr), TP_ARGS(dd, hdr)); -DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr, +DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr), + TP_ARGS(dd, hdr)); + +DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr, + TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr), + TP_ARGS(dd, hdr)); + +DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr, TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr), TP_ARGS(dd, hdr)); diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 82097571aa85..e605e09d8084 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -891,8 +891,8 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, if (unlikely(ret)) goto bail_build; } - trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), - &ps->s_txreq->phdr.hdr); + trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), + &ps->s_txreq->phdr.hdr); ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); if (unlikely(ret == -ECOMM)) goto bail_ecomm; @@ -1067,8 +1067,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, } } - trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), - &ps->s_txreq->phdr.hdr); + trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device), + &ps->s_txreq->phdr.hdr); pio_bail: if (qp->s_wqe) { -- cgit v1.2.3-59-g8ed1b From 5326dfbf005ca8589d709209a81d145c5b87b23d Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:24 -0800 Subject: IB/hfi1: Fix ordering of trace for accuracy The postitioning of the sdma ibhdr trace was causing an extra trace message when the tx send returned -EBUSY. Move the trace to just before the return and handle negative return values to avoid any trace. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index e605e09d8084..467e6c349c68 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -891,11 +891,14 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps, if (unlikely(ret)) goto bail_build; } + ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); + if (unlikely(ret < 0)) { + if (ret == -ECOMM) + goto bail_ecomm; + return ret; + } trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ps->s_txreq->phdr.hdr); - ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq); - if (unlikely(ret == -ECOMM)) - goto bail_ecomm; return ret; bail_ecomm: -- cgit v1.2.3-59-g8ed1b From 60df29581f67e06791a176641c774515ec1634e5 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:30 -0800 Subject: IB/hfi1: Fix PIO wakeup timing hole There is a timing hole if there had been greater than PIO_WAIT_BATCH_SIZE waiters. This code will dispatch the first batch but leave the others in the queue. If the restarted waiters don't in turn wait on a buffer, there is a hang. Fix by forcing a return when the QP queue is non-empty. Reviewed-by: Vennila Megavannan Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/pio.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c index e888e214356b..c6849ce9e5eb 100644 --- a/drivers/staging/rdma/hfi1/pio.c +++ b/drivers/staging/rdma/hfi1/pio.c @@ -1545,7 +1545,7 @@ static void sc_piobufavail(struct send_context *sc) struct iowait *wait; if (n == ARRAY_SIZE(qps)) - goto full; + break; wait = list_first_entry(list, struct iowait, list); qp = iowait_to_qp(wait); priv = qp->priv; @@ -1554,12 +1554,14 @@ static void sc_piobufavail(struct send_context *sc) qps[n++] = qp; } /* - * Counting: only call wantpiobuf_intr() if there were waiters and they - * are now all gone. + * If there had been waiters and there are more + * insure that we redo the force to avoid a potential hang. */ - if (n) + if (n) { hfi1_sc_wantpiobuf_intr(sc, 0); -full: + if (!list_empty(list)) + hfi1_sc_wantpiobuf_intr(sc, 1); + } write_sequnlock_irqrestore(&dev->iowait_lock, flags); for (i = 0; i < n; i++) -- cgit v1.2.3-59-g8ed1b From cef504c5c019ea4f59cf3a69e7341b2b34091cda Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:35 -0800 Subject: IB/hfi1: Fix panic in adaptive pio The following panic occurs while running ib_send_bw -a with adaptive pio turned on: [ 8551.143596] BUG: unable to handle kernel NULL pointer dereference at (null) [ 8551.152986] IP: [] pio_wait.isra.21+0x34/0x190 [hfi1] [ 8551.160926] PGD 80db21067 PUD 80bb45067 PMD 0 [ 8551.166431] Oops: 0000 [#1] SMP [ 8551.276725] task: ffff880816bf15c0 ti: ffff880812ac0000 task.ti: ffff880812ac0000 [ 8551.285705] RIP: 0010:[] pio_wait.isra.21+0x34/0x190 [hfi1] [ 8551.296462] RSP: 0018:ffff880812ac3b58 EFLAGS: 00010282 [ 8551.303029] RAX: 000000000000002d RBX: 0000000000000000 RCX: 0000000000000800 [ 8551.311633] RDX: ffff880812ac3c08 RSI: 0000000000000000 RDI: ffff8800b6665e40 [ 8551.320228] RBP: ffff880812ac3ba0 R08: 0000000000001000 R09: ffffffffa09039a0 [ 8551.328820] R10: ffff880817a0c000 R11: 0000000000000000 R12: ffff8800b6665e40 [ 8551.337406] R13: ffff880817a0c000 R14: ffff8800b6665800 R15: ffff8800b6665e40 [ 8551.355640] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 8551.362674] CR2: 0000000000000000 CR3: 000000080abe8000 CR4: 00000000001406e0 [ 8551.371262] Stack: [ 8551.374119] ffff880812ac3bf0 ffff88080cf54010 ffff880800000800 ffff880812ac3c08 [ 8551.383036] ffff8800b6665800 ffff8800b6665e40 0000000000000202 ffffffffa08e7b80 [ 8551.391941] 00000001007de431 ffff880812ac3bc8 ffffffffa0904645 ffff8800b6665800 [ 8551.400859] Call Trace: [ 8551.404214] [] ? hfi1_del_timers_sync+0x30/0x30 [hfi1] [ 8551.412417] [] hfi1_verbs_send+0x215/0x330 [hfi1] [ 8551.420154] [] hfi1_do_send+0x166/0x350 [hfi1] [ 8551.427618] [] rvt_post_send+0x533/0x6a0 [rdmavt] [ 8551.435367] [] ib_uverbs_post_send+0x30f/0x530 [ib_uverbs] [ 8551.443999] [] ib_uverbs_write+0x117/0x380 [ib_uverbs] [ 8551.452269] [] ? sock_recvmsg+0x3b/0x50 [ 8551.459071] [] ? sock_read_iter+0x92/0xe0 [ 8551.466068] [] __vfs_write+0x37/0x100 [ 8551.472692] [] ? rw_verify_area+0x52/0xd0 [ 8551.479682] [] vfs_write+0xa2/0x1a0 [ 8551.486089] [] ? do_audit_syscall_entry+0x66/0x70 [ 8551.493891] [] SyS_write+0x55/0xc0 [ 8551.500220] [] entry_SYSCALL_64_fastpath+0x12/0x71 [ 8551.531284] RIP [] pio_wait.isra.21+0x34/0x190 [hfi1] [ 8551.539508] RSP [ 8551.544110] CR2: 0000000000000000 The priv s_sendcontext pointer was not setup properly. Fix with this patch by using the s_sendcontext and eliminating its send engine use. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/qp.c | 2 ++ drivers/staging/rdma/hfi1/verbs.c | 6 +----- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 59ee12a96006..29a5ad28019b 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c @@ -220,6 +220,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_AV) { priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); + priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); } if (attr_mask & IB_QP_PATH_MIG_STATE && @@ -228,6 +229,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, qp->s_flags |= RVT_S_AHG_CLEAR; priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); + priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); } } diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 467e6c349c68..7acaa25e03e0 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1005,12 +1005,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, /* vl15 special case taken care of in ud.c */ sc5 = priv->s_sc; - sc = qp_to_send_context(qp, sc5); + sc = ps->s_txreq->psc; - if (!sc) { - ret = -EINVAL; - goto bail; - } if (likely(pbc == 0)) { u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5); /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */ -- cgit v1.2.3-59-g8ed1b From 47177f1bac9ca2b65eefdc9b0b63d0505bd4e11e Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:41 -0800 Subject: IB/hfi1: Fix adaptive pio packet corruption The adaptive pio heuristic missed a case that causes a corrupted packet on the wire. The case is if SDMA egress had been chosen for a pio-able packet and then encountered a ring space wait, the packet is queued. The sge cursor had been incremented as part of the packet build out for SDMA. After the send engine restart, the heuristic might now chose pio based on the sdma count being zero and start the mmio copy using the already incremented sge cursor. Fix this by forcing SDMA egress when the SDMA descriptor has already been built. Additionally, the code to wait for a QPs pio count to zero when switching to SDMA was missing. Add it. There is also an issue with UD QPs, in that the different SLs can pick a different egress send context. For now, just insure the UD/GSI always go through SDMA. Reviewed-by: Vennila Megavannan Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 7acaa25e03e0..62755af693a2 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1179,10 +1179,11 @@ bad: * and size */ static inline send_routine get_send_routine(struct rvt_qp *qp, - struct hfi1_ib_header *h) + struct verbs_txreq *tx) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; + struct hfi1_ib_header *h = &tx->phdr.hdr; if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA))) return dd->process_pio_send; @@ -1191,21 +1192,21 @@ static inline send_routine get_send_routine(struct rvt_qp *qp, return dd->process_pio_send; case IB_QPT_GSI: case IB_QPT_UD: - if (piothreshold && qp->s_cur_size <= piothreshold) - return dd->process_pio_send; break; case IB_QPT_RC: if (piothreshold && qp->s_cur_size <= min(piothreshold, qp->pmtu) && (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) && - iowait_sdma_pending(&priv->s_iowait) == 0) + iowait_sdma_pending(&priv->s_iowait) == 0 && + !sdma_txreq_built(&tx->txreq)) return dd->process_pio_send; break; case IB_QPT_UC: if (piothreshold && qp->s_cur_size <= min(piothreshold, qp->pmtu) && (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) && - iowait_sdma_pending(&priv->s_iowait) == 0) + iowait_sdma_pending(&priv->s_iowait) == 0 && + !sdma_txreq_built(&tx->txreq)) return dd->process_pio_send; break; default: @@ -1225,10 +1226,11 @@ static inline send_routine get_send_routine(struct rvt_qp *qp, int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_qp_priv *priv = qp->priv; send_routine sr; int ret; - sr = get_send_routine(qp, &ps->s_txreq->phdr.hdr); + sr = get_send_routine(qp, ps->s_txreq); ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp); if (unlikely(ret)) { /* @@ -1250,6 +1252,11 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) } return -EINVAL; } + if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait)) + return pio_wait(qp, + ps->s_txreq->psc, + ps, + RVT_S_WAIT_PIO_DRAIN); return sr(qp, ps, 0); } -- cgit v1.2.3-59-g8ed1b From d0e859c32801f6793790d71dc41a9330da0da371 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Mon, 7 Mar 2016 11:35:46 -0800 Subject: IB/hfi1: Enable adaptive pio by default Set the piothreshold to the agreed upon default of 256B. Reviewed-by: Jubin John Signed-off-by: Mike Marciniszyn Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 62755af693a2..89f2aad45c1b 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -121,7 +121,7 @@ unsigned int hfi1_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); -unsigned short piothreshold; +unsigned short piothreshold = 256; module_param(piothreshold, ushort, S_IRUGO); MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio"); -- cgit v1.2.3-59-g8ed1b From 06e0ffa69312ce33484bf5c63aa5fc576fde13a8 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:20 -0800 Subject: IB/hfi1: Re-factor MMU notification code The MMU notification code added to the expected receive side has been re-factored and split into it's own file. This was done in order to make the code more general and, therefore, usable by other parts of the driver. The caching behavior remains the same. However, the handling of the RB tree (insertion, deletions, and searching) as well as the MMU invalidation processing is now handled by functions in the mmu_rb.[ch] files. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/Makefile | 2 +- drivers/staging/rdma/hfi1/file_ops.c | 1 + drivers/staging/rdma/hfi1/hfi.h | 14 +- drivers/staging/rdma/hfi1/mmu_rb.c | 304 ++++++++++++++++++++++++++++ drivers/staging/rdma/hfi1/mmu_rb.h | 73 +++++++ drivers/staging/rdma/hfi1/user_exp_rcv.c | 336 ++++++++----------------------- 6 files changed, 471 insertions(+), 259 deletions(-) create mode 100644 drivers/staging/rdma/hfi1/mmu_rb.c create mode 100644 drivers/staging/rdma/hfi1/mmu_rb.h (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile index 9b117062d52e..8dc59382ee96 100644 --- a/drivers/staging/rdma/hfi1/Makefile +++ b/drivers/staging/rdma/hfi1/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \ eprom.o file_ops.o firmware.o \ - init.o intr.o mad.o pcie.o pio.o pio_copy.o platform.o \ + init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \ verbs_txreq.o diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index e4490aecf262..e460261f94b7 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -58,6 +58,7 @@ #include "user_exp_rcv.h" #include "eprom.h" #include "aspm.h" +#include "mmu_rb.h" #undef pr_fmt #define pr_fmt(fmt) DRIVER_NAME ": " fmt diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 572288308406..78c8e24b1970 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1179,6 +1179,7 @@ struct hfi1_devdata { #define PT_EAGER 1 #define PT_INVALID 2 +struct tid_rb_node; struct mmu_rb_node; /* Private data for file operations */ @@ -1189,20 +1190,17 @@ struct hfi1_filedata { struct hfi1_user_sdma_pkt_q *pq; /* for cpu affinity; -1 if none */ int rec_cpu_num; - struct mmu_notifier mn; struct rb_root tid_rb_root; - struct mmu_rb_node **entry_to_rb; + struct tid_rb_node **entry_to_rb; spinlock_t tid_lock; /* protect tid_[limit,used] counters */ u32 tid_limit; u32 tid_used; - spinlock_t rb_lock; /* protect tid_rb_root RB tree */ u32 *invalid_tids; u32 invalid_tid_idx; - spinlock_t invalid_lock; /* protect the invalid_tids array */ - int (*mmu_rb_insert)(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); - void (*mmu_rb_remove)(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); + /* protect invalid_tids array and invalid_tid_idx */ + spinlock_t invalid_lock; + int (*mmu_rb_insert)(struct rb_root *, struct mmu_rb_node *); + void (*mmu_rb_remove)(struct rb_root *, struct mmu_rb_node *); }; extern struct list_head hfi1_dev_list; diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c new file mode 100644 index 000000000000..779ebafd4f4d --- /dev/null +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -0,0 +1,304 @@ +/* + * Copyright(c) 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include +#include + +#include "mmu_rb.h" +#include "trace.h" + +struct mmu_rb_handler { + struct list_head list; + struct mmu_notifier mn; + struct rb_root *root; + spinlock_t lock; /* protect the RB tree */ + struct mmu_rb_ops *ops; +}; + +static LIST_HEAD(mmu_rb_handlers); +static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ + +static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); +static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, + unsigned long); +static inline void mmu_notifier_range_start(struct mmu_notifier *, + struct mm_struct *, + unsigned long, unsigned long); +static void mmu_notifier_mem_invalidate(struct mmu_notifier *, + unsigned long, unsigned long); +static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, + unsigned long, unsigned long); + +static struct mmu_notifier_ops mn_opts = { + .invalidate_page = mmu_notifier_page, + .invalidate_range_start = mmu_notifier_range_start, +}; + +int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) +{ + struct mmu_rb_handler *handlr; + + if (!ops->compare || !ops->invalidate) + return -EINVAL; + + handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); + if (!handlr) + return -ENOMEM; + + handlr->root = root; + handlr->ops = ops; + INIT_HLIST_NODE(&handlr->mn.hlist); + spin_lock_init(&handlr->lock); + handlr->mn.ops = &mn_opts; + spin_lock(&mmu_rb_lock); + list_add_tail(&handlr->list, &mmu_rb_handlers); + spin_unlock(&mmu_rb_lock); + + return mmu_notifier_register(&handlr->mn, current->mm); +} + +void hfi1_mmu_rb_unregister(struct rb_root *root) +{ + struct mmu_rb_handler *handler = find_mmu_handler(root); + + spin_lock(&mmu_rb_lock); + list_del(&handler->list); + spin_unlock(&mmu_rb_lock); + + if (!RB_EMPTY_ROOT(root)) { + struct rb_node *node; + struct mmu_rb_node *rbnode; + + while ((node = rb_first(root))) { + rbnode = rb_entry(node, struct mmu_rb_node, node); + if (handler->ops->remove) + handler->ops->remove(root, rbnode); + rb_erase(node, root); + kfree(rbnode); + } + } + + if (current->mm) + mmu_notifier_unregister(&handler->mn, current->mm); + kfree(handler); +} + +int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) +{ + struct rb_node **new, *parent = NULL; + struct mmu_rb_handler *handler = find_mmu_handler(root); + struct mmu_rb_node *this; + int res, ret = 0; + + if (!handler) + return -EINVAL; + + new = &handler->root->rb_node; + spin_lock(&handler->lock); + while (*new) { + this = container_of(*new, struct mmu_rb_node, node); + res = handler->ops->compare(this, mnode->addr, mnode->len); + parent = *new; + + if (res < 0) { + new = &((*new)->rb_left); + } else if (res > 0) { + new = &((*new)->rb_right); + } else { + ret = 1; + goto unlock; + } + } + + if (handler->ops->insert) { + ret = handler->ops->insert(root, mnode); + if (ret) + goto unlock; + } + + rb_link_node(&mnode->node, parent, new); + rb_insert_color(&mnode->node, root); +unlock: + spin_unlock(&handler->lock); + return ret; +} + +/* Caller must host handler lock */ +static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, + unsigned long addr, + unsigned long len) +{ + struct rb_node *node = handler->root->rb_node; + struct mmu_rb_node *mnode; + int res; + + while (node) { + mnode = container_of(node, struct mmu_rb_node, node); + res = handler->ops->compare(mnode, addr, len); + + if (res < 0) + node = node->rb_left; + else if (res > 0) + node = node->rb_right; + else + return mnode; + } + return NULL; +} + +static void __mmu_rb_remove(struct mmu_rb_handler *handler, + struct mmu_rb_node *node) +{ + /* Validity of handler and node pointers has been checked by caller. */ + if (handler->ops->remove) + handler->ops->remove(handler->root, node); + rb_erase(&node->node, handler->root); +} + +struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, + unsigned long len) +{ + struct mmu_rb_handler *handler = find_mmu_handler(root); + struct mmu_rb_node *node; + + if (!handler) + return ERR_PTR(-EINVAL); + + spin_lock(&handler->lock); + node = __mmu_rb_search(handler, addr, len); + spin_unlock(&handler->lock); + + return node; +} + +void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) +{ + struct mmu_rb_handler *handler = find_mmu_handler(root); + + if (!handler || !node) + return; + + spin_lock(&handler->lock); + __mmu_rb_remove(handler, node); + spin_unlock(&handler->lock); +} + +static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) +{ + struct mmu_rb_handler *handler; + + spin_lock(&mmu_rb_lock); + list_for_each_entry(handler, &mmu_rb_handlers, list) { + if (handler->root == root) + goto unlock; + } + handler = NULL; +unlock: + spin_unlock(&mmu_rb_lock); + return handler; +} + +static inline void mmu_notifier_page(struct mmu_notifier *mn, + struct mm_struct *mm, unsigned long addr) +{ + mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); +} + +static inline void mmu_notifier_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + mmu_notifier_mem_invalidate(mn, start, end); +} + +static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, + unsigned long start, unsigned long end) +{ + struct mmu_rb_handler *handler = + container_of(mn, struct mmu_rb_handler, mn); + struct rb_root *root = handler->root; + struct mmu_rb_node *node; + unsigned long addr = start; + + spin_lock(&handler->lock); + while (addr < end) { + /* + * There is no good way to provide a reasonable length to the + * search function at this point. Using the remaining length in + * the invalidation range is not the right thing to do. + * We have to rely on the fact that the insertion algorithm + * takes care of any overlap or length restrictions by using the + * actual size of each node. Therefore, we can use a page as an + * arbitrary, non-zero value. + */ + node = __mmu_rb_search(handler, addr, PAGE_SIZE); + + if (!node) { + /* + * Didn't find a node at this address. However, the + * range could be bigger than what we have registered + * so we have to keep looking. + */ + addr += PAGE_SIZE; + continue; + } + if (handler->ops->invalidate(root, node)) + __mmu_rb_remove(handler, node); + + /* + * The next address to be looked up is computed based + * on the node's starting address. This is due to the + * fact that the range where we start might be in the + * middle of the node's buffer so simply incrementing + * the address by the node's size would result is a + * bad address. + */ + addr = node->addr + node->len; + } + spin_unlock(&handler->lock); +} diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h new file mode 100644 index 000000000000..9fe1076ab39d --- /dev/null +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -0,0 +1,73 @@ +/* + * Copyright(c) 2016 Intel Corporation. + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * - Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _HFI1_MMU_RB_H +#define _HFI1_MMU_RB_H + +#include "hfi.h" + +struct mmu_rb_node { + struct rb_node node; + unsigned long addr; + unsigned long len; +}; + +struct mmu_rb_ops { + int (*compare)(struct mmu_rb_node *, unsigned long, + unsigned long); + int (*insert)(struct rb_root *, struct mmu_rb_node *); + void (*remove)(struct rb_root *, struct mmu_rb_node *); + int (*invalidate)(struct rb_root *, struct mmu_rb_node *); +}; + +int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops); +void hfi1_mmu_rb_unregister(struct rb_root *); +int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); +void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *); +struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long, + unsigned long); + +#endif /* _HFI1_MMU_RB_H */ diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index fccae508a5d0..c9e05ddd469f 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -48,6 +48,7 @@ #include "user_exp_rcv.h" #include "trace.h" +#include "mmu_rb.h" struct tid_group { struct list_head list; @@ -57,11 +58,9 @@ struct tid_group { u8 map; }; -struct mmu_rb_node { - struct rb_node rbnode; - unsigned long virt; +struct tid_rb_node { + struct mmu_rb_node mmu; unsigned long phys; - unsigned long len; struct tid_group *grp; u32 rcventry; dma_addr_t dma_addr; @@ -70,16 +69,6 @@ struct mmu_rb_node { struct page *pages[0]; }; -enum mmu_call_types { - MMU_INVALIDATE_PAGE = 0, - MMU_INVALIDATE_RANGE = 1 -}; - -static const char * const mmu_types[] = { - "PAGE", - "RANGE" -}; - struct tid_pageset { u16 idx; u16 count; @@ -99,28 +88,21 @@ static int set_rcvarray_entry(struct file *, unsigned long, u32, struct tid_group *, struct page **, unsigned); static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); -static struct mmu_rb_node *mmu_rb_search(struct rb_root *, unsigned long); -static int mmu_rb_insert_by_addr(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); -static int mmu_rb_insert_by_entry(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); -static void mmu_rb_remove_by_addr(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); -static void mmu_rb_remove_by_entry(struct hfi1_filedata *, struct rb_root *, - struct mmu_rb_node *); -static void mmu_notifier_mem_invalidate(struct mmu_notifier *, - unsigned long, unsigned long, - enum mmu_call_types); -static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, - unsigned long); -static inline void mmu_notifier_range_start(struct mmu_notifier *, - struct mm_struct *, - unsigned long, unsigned long); +static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); +static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *); +static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); static int program_rcvarray(struct file *, unsigned long, struct tid_group *, struct tid_pageset *, unsigned, u16, struct page **, u32 *, unsigned *, unsigned *); static int unprogram_rcvarray(struct file *, u32, struct tid_group **); -static void clear_tid_node(struct hfi1_filedata *, u16, struct mmu_rb_node *); +static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *); + +static struct mmu_rb_ops tid_rb_ops = { + .compare = mmu_addr_cmp, + .insert = mmu_rb_insert, + .remove = mmu_rb_remove, + .invalidate = mmu_rb_invalidate +}; static inline u32 rcventry2tidinfo(u32 rcventry) { @@ -167,11 +149,6 @@ static inline void tid_group_move(struct tid_group *group, tid_group_add_tail(group, s2); } -static struct mmu_notifier_ops mn_opts = { - .invalidate_page = mmu_notifier_page, - .invalidate_range_start = mmu_notifier_range_start, -}; - /* * Initialize context and file private data needed for Expected * receive caching. This needs to be done after the context has @@ -185,11 +162,8 @@ int hfi1_user_exp_rcv_init(struct file *fp) unsigned tidbase; int i, ret = 0; - INIT_HLIST_NODE(&fd->mn.hlist); - spin_lock_init(&fd->rb_lock); spin_lock_init(&fd->tid_lock); spin_lock_init(&fd->invalid_lock); - fd->mn.ops = &mn_opts; fd->tid_rb_root = RB_ROOT; if (!uctxt->subctxt_cnt || !fd->subctxt) { @@ -239,7 +213,7 @@ int hfi1_user_exp_rcv_init(struct file *fp) * fails, continue but turn off the TID caching for * all user contexts. */ - ret = mmu_notifier_register(&fd->mn, current->mm); + ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops); if (ret) { dd_dev_info(dd, "Failed MMU notifier registration %d\n", @@ -250,11 +224,11 @@ int hfi1_user_exp_rcv_init(struct file *fp) } if (HFI1_CAP_IS_USET(TID_UNMAP)) { - fd->mmu_rb_insert = mmu_rb_insert_by_entry; - fd->mmu_rb_remove = mmu_rb_remove_by_entry; + fd->mmu_rb_insert = mmu_rb_insert; + fd->mmu_rb_remove = mmu_rb_remove; } else { - fd->mmu_rb_insert = mmu_rb_insert_by_addr; - fd->mmu_rb_remove = mmu_rb_remove_by_addr; + fd->mmu_rb_insert = hfi1_mmu_rb_insert; + fd->mmu_rb_remove = hfi1_mmu_rb_remove; } /* @@ -295,8 +269,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) * The notifier would have been removed when the process'es mm * was freed. */ - if (current->mm && !HFI1_CAP_IS_USET(TID_UNMAP)) - mmu_notifier_unregister(&fd->mn, current->mm); + if (!HFI1_CAP_IS_USET(TID_UNMAP)) + hfi1_mmu_rb_unregister(&fd->tid_rb_root); kfree(fd->invalid_tids); @@ -312,19 +286,6 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) list_del_init(&grp->list); kfree(grp); } - spin_lock(&fd->rb_lock); - if (!RB_EMPTY_ROOT(&fd->tid_rb_root)) { - struct rb_node *node; - struct mmu_rb_node *rbnode; - - while ((node = rb_first(&fd->tid_rb_root))) { - rbnode = rb_entry(node, struct mmu_rb_node, - rbnode); - rb_erase(&rbnode->rbnode, &fd->tid_rb_root); - kfree(rbnode); - } - } - spin_unlock(&fd->rb_lock); hfi1_clear_tids(uctxt); } @@ -866,7 +827,7 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, int ret; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct mmu_rb_node *node; + struct tid_rb_node *node; struct hfi1_devdata *dd = uctxt->dd; struct rb_root *root = &fd->tid_rb_root; dma_addr_t phys; @@ -890,9 +851,9 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, return -EFAULT; } - node->virt = vaddr; + node->mmu.addr = vaddr; + node->mmu.len = npages * PAGE_SIZE; node->phys = page_to_phys(pages[0]); - node->len = npages * PAGE_SIZE; node->npages = npages; node->rcventry = rcventry; node->dma_addr = phys; @@ -900,21 +861,19 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, node->freed = false; memcpy(node->pages, pages, sizeof(struct page *) * npages); - spin_lock(&fd->rb_lock); - ret = fd->mmu_rb_insert(fd, root, node); - spin_unlock(&fd->rb_lock); + ret = fd->mmu_rb_insert(root, &node->mmu); if (ret) { hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", - node->rcventry, node->virt, node->phys, ret); + node->rcventry, node->mmu.addr, node->phys, ret); pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); kfree(node); return -EFAULT; } hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); - trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, - npages, node->virt, node->phys, phys); + trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, + node->mmu.addr, node->phys, phys); return 0; } @@ -924,7 +883,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; - struct mmu_rb_node *node; + struct tid_rb_node *node; u8 tidctrl = EXP_TID_GET(tidinfo, CTRL); u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry; @@ -939,14 +898,11 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, rcventry = tididx + (tidctrl - 1); - spin_lock(&fd->rb_lock); node = fd->entry_to_rb[rcventry]; - if (!node || node->rcventry != (uctxt->expected_base + rcventry)) { - spin_unlock(&fd->rb_lock); + if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; - } - fd->mmu_rb_remove(fd, &fd->tid_rb_root, node); - spin_unlock(&fd->rb_lock); + fd->mmu_rb_remove(&fd->tid_rb_root, &node->mmu); + if (grp) *grp = node->grp; clear_tid_node(fd, fd->subctxt, node); @@ -954,13 +910,13 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, } static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, - struct mmu_rb_node *node) + struct tid_rb_node *node) { struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_devdata *dd = uctxt->dd; trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, - node->npages, node->virt, node->phys, + node->npages, node->mmu.addr, node->phys, node->dma_addr); hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0); @@ -970,7 +926,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, */ flush_wc(); - pci_unmap_single(dd->pcidev, node->dma_addr, node->len, + pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, PCI_DMA_FROMDEVICE); hfi1_release_user_pages(node->pages, node->npages, true); @@ -997,216 +953,96 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, list_for_each_entry_safe(grp, ptr, &set->list, list) { list_del_init(&grp->list); - spin_lock(&fd->rb_lock); for (i = 0; i < grp->size; i++) { if (grp->map & (1 << i)) { u16 rcventry = grp->base + i; - struct mmu_rb_node *node; + struct tid_rb_node *node; node = fd->entry_to_rb[rcventry - uctxt->expected_base]; if (!node || node->rcventry != rcventry) continue; - fd->mmu_rb_remove(fd, root, node); + fd->mmu_rb_remove(root, &node->mmu); clear_tid_node(fd, -1, node); } } - spin_unlock(&fd->rb_lock); } } -static inline void mmu_notifier_page(struct mmu_notifier *mn, - struct mm_struct *mm, unsigned long addr) -{ - mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE, - MMU_INVALIDATE_PAGE); -} - -static inline void mmu_notifier_range_start(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end) +static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode) { - mmu_notifier_mem_invalidate(mn, start, end, MMU_INVALIDATE_RANGE); -} + struct hfi1_filedata *fdata = + container_of(root, struct hfi1_filedata, tid_rb_root); + struct hfi1_ctxtdata *uctxt = fdata->uctxt; + struct tid_rb_node *node = + container_of(mnode, struct tid_rb_node, mmu); -static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, - unsigned long start, unsigned long end, - enum mmu_call_types type) -{ - struct hfi1_filedata *fd = container_of(mn, struct hfi1_filedata, mn); - struct hfi1_ctxtdata *uctxt = fd->uctxt; - struct rb_root *root = &fd->tid_rb_root; - struct mmu_rb_node *node; - unsigned long addr = start; + if (node->freed) + return 0; - trace_hfi1_mmu_invalidate(uctxt->ctxt, fd->subctxt, mmu_types[type], - start, end); + trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr, + node->rcventry, node->npages, node->dma_addr); + node->freed = true; - spin_lock(&fd->rb_lock); - while (addr < end) { - node = mmu_rb_search(root, addr); + spin_lock(&fdata->invalid_lock); + if (fdata->invalid_tid_idx < uctxt->expected_count) { + fdata->invalid_tids[fdata->invalid_tid_idx] = + rcventry2tidinfo(node->rcventry - uctxt->expected_base); + fdata->invalid_tids[fdata->invalid_tid_idx] |= + EXP_TID_SET(LEN, node->npages); + if (!fdata->invalid_tid_idx) { + unsigned long *ev; - if (!node) { /* - * Didn't find a node at this address. However, the - * range could be bigger than what we have registered - * so we have to keep looking. + * hfi1_set_uevent_bits() sets a user event flag + * for all processes. Because calling into the + * driver to process TID cache invalidations is + * expensive and TID cache invalidations are + * handled on a per-process basis, we can + * optimize this to set the flag only for the + * process in question. */ - addr += PAGE_SIZE; - continue; - } - - /* - * The next address to be looked up is computed based - * on the node's starting address. This is due to the - * fact that the range where we start might be in the - * middle of the node's buffer so simply incrementing - * the address by the node's size would result is a - * bad address. - */ - addr = node->virt + (node->npages * PAGE_SIZE); - if (node->freed) - continue; - - trace_hfi1_exp_tid_inval(uctxt->ctxt, fd->subctxt, node->virt, - node->rcventry, node->npages, - node->dma_addr); - node->freed = true; - - spin_lock(&fd->invalid_lock); - if (fd->invalid_tid_idx < uctxt->expected_count) { - fd->invalid_tids[fd->invalid_tid_idx] = - rcventry2tidinfo(node->rcventry - - uctxt->expected_base); - fd->invalid_tids[fd->invalid_tid_idx] |= - EXP_TID_SET(LEN, node->npages); - if (!fd->invalid_tid_idx) { - unsigned long *ev; - - /* - * hfi1_set_uevent_bits() sets a user event flag - * for all processes. Because calling into the - * driver to process TID cache invalidations is - * expensive and TID cache invalidations are - * handled on a per-process basis, we can - * optimize this to set the flag only for the - * process in question. - */ - ev = uctxt->dd->events + - (((uctxt->ctxt - - uctxt->dd->first_user_ctxt) * - HFI1_MAX_SHARED_CTXTS) + fd->subctxt); - set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); - } - fd->invalid_tid_idx++; + ev = uctxt->dd->events + + (((uctxt->ctxt - uctxt->dd->first_user_ctxt) * + HFI1_MAX_SHARED_CTXTS) + fdata->subctxt); + set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev); } - spin_unlock(&fd->invalid_lock); + fdata->invalid_tid_idx++; } - spin_unlock(&fd->rb_lock); + spin_unlock(&fdata->invalid_lock); + return 0; } -static inline int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr, - unsigned long len) +static int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr, + unsigned long len) { - if ((addr + len) <= node->virt) + if ((addr + len) <= node->addr) return -1; - else if (addr >= node->virt && addr < (node->virt + node->len)) + else if (addr >= node->addr && addr < (node->addr + node->len)) return 0; else return 1; } -static inline int mmu_entry_cmp(struct mmu_rb_node *node, u32 entry) -{ - if (entry < node->rcventry) - return -1; - else if (entry > node->rcventry) - return 1; - else - return 0; -} - -static struct mmu_rb_node *mmu_rb_search(struct rb_root *root, - unsigned long addr) -{ - struct rb_node *node = root->rb_node; - - while (node) { - struct mmu_rb_node *mnode = - container_of(node, struct mmu_rb_node, rbnode); - /* - * When searching, use at least one page length for size. The - * MMU notifier will not give us anything less than that. We - * also don't need anything more than a page because we are - * guaranteed to have non-overlapping buffers in the tree. - */ - int result = mmu_addr_cmp(mnode, addr, PAGE_SIZE); - - if (result < 0) - node = node->rb_left; - else if (result > 0) - node = node->rb_right; - else - return mnode; - } - return NULL; -} - -static int mmu_rb_insert_by_entry(struct hfi1_filedata *fdata, - struct rb_root *root, - struct mmu_rb_node *node) +static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node) { + struct hfi1_filedata *fdata = + container_of(root, struct hfi1_filedata, tid_rb_root); + struct tid_rb_node *tnode = + container_of(node, struct tid_rb_node, mmu); u32 base = fdata->uctxt->expected_base; - fdata->entry_to_rb[node->rcventry - base] = node; + fdata->entry_to_rb[tnode->rcventry - base] = tnode; return 0; } -static int mmu_rb_insert_by_addr(struct hfi1_filedata *fdata, - struct rb_root *root, struct mmu_rb_node *node) -{ - struct rb_node **new = &root->rb_node, *parent = NULL; - u32 base = fdata->uctxt->expected_base; - - /* Figure out where to put new node */ - while (*new) { - struct mmu_rb_node *this = - container_of(*new, struct mmu_rb_node, rbnode); - int result = mmu_addr_cmp(this, node->virt, node->len); - - parent = *new; - if (result < 0) - new = &((*new)->rb_left); - else if (result > 0) - new = &((*new)->rb_right); - else - return 1; - } - - /* Add new node and rebalance tree. */ - rb_link_node(&node->rbnode, parent, new); - rb_insert_color(&node->rbnode, root); - - fdata->entry_to_rb[node->rcventry - base] = node; - return 0; -} - -static void mmu_rb_remove_by_entry(struct hfi1_filedata *fdata, - struct rb_root *root, - struct mmu_rb_node *node) -{ - u32 base = fdata->uctxt->expected_base; - - fdata->entry_to_rb[node->rcventry - base] = NULL; -} - -static void mmu_rb_remove_by_addr(struct hfi1_filedata *fdata, - struct rb_root *root, - struct mmu_rb_node *node) +static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) { + struct hfi1_filedata *fdata = + container_of(root, struct hfi1_filedata, tid_rb_root); + struct tid_rb_node *tnode = + container_of(node, struct tid_rb_node, mmu); u32 base = fdata->uctxt->expected_base; - fdata->entry_to_rb[node->rcventry - base] = NULL; - rb_erase(&node->rbnode, root); + fdata->entry_to_rb[tnode->rcventry - base] = NULL; } -- cgit v1.2.3-59-g8ed1b From c81e1f6452406a633b7c4ea3e0a12e5deaf57f5c Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:25 -0800 Subject: IB/hfi1: Allow MMU function execution in IRQ context Future users of the MMU/RB functions might be searching or manipulating the MMU RB trees in interrupt context. Therefore, the MMU/RB functions need to be able to run in interrupt context. This requires that we use the IRQ-aware API for spin locks. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 779ebafd4f4d..648f7e0a1d17 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -81,6 +81,7 @@ static struct mmu_notifier_ops mn_opts = { int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) { struct mmu_rb_handler *handlr; + unsigned long flags; if (!ops->compare || !ops->invalidate) return -EINVAL; @@ -94,9 +95,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) INIT_HLIST_NODE(&handlr->mn.hlist); spin_lock_init(&handlr->lock); handlr->mn.ops = &mn_opts; - spin_lock(&mmu_rb_lock); + spin_lock_irqsave(&mmu_rb_lock, flags); list_add_tail(&handlr->list, &mmu_rb_handlers); - spin_unlock(&mmu_rb_lock); + spin_unlock_irqrestore(&mmu_rb_lock, flags); return mmu_notifier_register(&handlr->mn, current->mm); } @@ -104,10 +105,11 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) void hfi1_mmu_rb_unregister(struct rb_root *root) { struct mmu_rb_handler *handler = find_mmu_handler(root); + unsigned long flags; - spin_lock(&mmu_rb_lock); + spin_lock_irqsave(&mmu_rb_lock, flags); list_del(&handler->list); - spin_unlock(&mmu_rb_lock); + spin_unlock_irqrestore(&mmu_rb_lock, flags); if (!RB_EMPTY_ROOT(root)) { struct rb_node *node; @@ -132,13 +134,14 @@ int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) struct rb_node **new, *parent = NULL; struct mmu_rb_handler *handler = find_mmu_handler(root); struct mmu_rb_node *this; + unsigned long flags; int res, ret = 0; if (!handler) return -EINVAL; new = &handler->root->rb_node; - spin_lock(&handler->lock); + spin_lock_irqsave(&handler->lock, flags); while (*new) { this = container_of(*new, struct mmu_rb_node, node); res = handler->ops->compare(this, mnode->addr, mnode->len); @@ -163,7 +166,7 @@ int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) rb_link_node(&mnode->node, parent, new); rb_insert_color(&mnode->node, root); unlock: - spin_unlock(&handler->lock); + spin_unlock_irqrestore(&handler->lock, flags); return ret; } @@ -204,13 +207,14 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, { struct mmu_rb_handler *handler = find_mmu_handler(root); struct mmu_rb_node *node; + unsigned long flags; if (!handler) return ERR_PTR(-EINVAL); - spin_lock(&handler->lock); + spin_lock_irqsave(&handler->lock, flags); node = __mmu_rb_search(handler, addr, len); - spin_unlock(&handler->lock); + spin_unlock_irqrestore(&handler->lock, flags); return node; } @@ -218,27 +222,29 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) { struct mmu_rb_handler *handler = find_mmu_handler(root); + unsigned long flags; if (!handler || !node) return; - spin_lock(&handler->lock); + spin_lock_irqsave(&handler->lock, flags); __mmu_rb_remove(handler, node); - spin_unlock(&handler->lock); + spin_unlock_irqrestore(&handler->lock, flags); } static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) { struct mmu_rb_handler *handler; + unsigned long flags; - spin_lock(&mmu_rb_lock); + spin_lock_irqsave(&mmu_rb_lock, flags); list_for_each_entry(handler, &mmu_rb_handlers, list) { if (handler->root == root) goto unlock; } handler = NULL; unlock: - spin_unlock(&mmu_rb_lock); + spin_unlock_irqrestore(&mmu_rb_lock, flags); return handler; } @@ -263,9 +269,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, container_of(mn, struct mmu_rb_handler, mn); struct rb_root *root = handler->root; struct mmu_rb_node *node; - unsigned long addr = start; + unsigned long addr = start, flags; - spin_lock(&handler->lock); + spin_lock_irqsave(&handler->lock, flags); while (addr < end) { /* * There is no good way to provide a reasonable length to the @@ -300,5 +306,5 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, */ addr = node->addr + node->len; } - spin_unlock(&handler->lock); + spin_unlock_irqrestore(&handler->lock, flags); } -- cgit v1.2.3-59-g8ed1b From 4b00d9490f2147d29f107f36391b0cc77bcd944f Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:31 -0800 Subject: IB/hfi1: Prevent NULL pointer dereference Prevent a potential NULL pointer dereference (found by code inspection) when unregistering an MMU handler. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 648f7e0a1d17..f42a33b55dc4 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -107,6 +107,9 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) struct mmu_rb_handler *handler = find_mmu_handler(root); unsigned long flags; + if (!handler) + return; + spin_lock_irqsave(&mmu_rb_lock, flags); list_del(&handler->list); spin_unlock_irqrestore(&mmu_rb_lock, flags); -- cgit v1.2.3-59-g8ed1b From eef9c896a94e715fcf8eb41e98b2469319641c73 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:36 -0800 Subject: IB/hfi1: Allow remove MMU callbacks to free nodes In order to allow the remove MMU callbacks to free the RB nodes, it is necessary to prevent any references to the nodes after the remove callback has been called. Therefore, remove the node from the tree prior to calling the callback. In other words, the MMU/RB API now guarantees that all RB node operations it performs will be done prior to calling the remove callback and that the RB node will not be touched afterwards. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index f42a33b55dc4..a3515d7f6354 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -120,10 +120,9 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) while ((node = rb_first(root))) { rbnode = rb_entry(node, struct mmu_rb_node, node); + rb_erase(node, root); if (handler->ops->remove) handler->ops->remove(root, rbnode); - rb_erase(node, root); - kfree(rbnode); } } @@ -200,9 +199,9 @@ static void __mmu_rb_remove(struct mmu_rb_handler *handler, struct mmu_rb_node *node) { /* Validity of handler and node pointers has been checked by caller. */ + rb_erase(&node->node, handler->root); if (handler->ops->remove) handler->ops->remove(handler->root, node); - rb_erase(&node->node, handler->root); } struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, @@ -272,7 +271,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, container_of(mn, struct mmu_rb_handler, mn); struct rb_root *root = handler->root; struct mmu_rb_node *node; - unsigned long addr = start, flags; + unsigned long addr = start, naddr, nlen, flags; spin_lock_irqsave(&handler->lock, flags); while (addr < end) { @@ -296,6 +295,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, addr += PAGE_SIZE; continue; } + + naddr = node->addr; + nlen = node->len; if (handler->ops->invalidate(root, node)) __mmu_rb_remove(handler, node); @@ -307,7 +309,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, * the address by the node's size would result is a * bad address. */ - addr = node->addr + node->len; + addr = naddr + nlen; } spin_unlock_irqrestore(&handler->lock, flags); } -- cgit v1.2.3-59-g8ed1b From 368f2b59d024fbb58015dfd0e09c54c424cda979 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:42 -0800 Subject: IB/hfi1: Remove the use of add/remove RB function pointers The usage of function pointers for RB node insertion and removal in the expected receive code path was meant to be a small performance optimization. However, maintaining it, especially with the new MMU API, would become more troublesome as the API is extended. Since the performance optimization is minor, remove the function pointers and replace with direct calls. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 2 -- drivers/staging/rdma/hfi1/user_exp_rcv.c | 25 ++++++++++++++----------- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 78c8e24b1970..2107cdc8ce3f 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1199,8 +1199,6 @@ struct hfi1_filedata { u32 invalid_tid_idx; /* protect invalid_tids array and invalid_tid_idx */ spinlock_t invalid_lock; - int (*mmu_rb_insert)(struct rb_root *, struct mmu_rb_node *); - void (*mmu_rb_remove)(struct rb_root *, struct mmu_rb_node *); }; extern struct list_head hfi1_dev_list; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index c9e05ddd469f..b0b193f30fac 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -223,14 +223,6 @@ int hfi1_user_exp_rcv_init(struct file *fp) } } - if (HFI1_CAP_IS_USET(TID_UNMAP)) { - fd->mmu_rb_insert = mmu_rb_insert; - fd->mmu_rb_remove = mmu_rb_remove; - } else { - fd->mmu_rb_insert = hfi1_mmu_rb_insert; - fd->mmu_rb_remove = hfi1_mmu_rb_remove; - } - /* * PSM does not have a good way to separate, count, and * effectively enforce a limit on RcvArray entries used by @@ -861,7 +853,10 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr, node->freed = false; memcpy(node->pages, pages, sizeof(struct page *) * npages); - ret = fd->mmu_rb_insert(root, &node->mmu); + if (HFI1_CAP_IS_USET(TID_UNMAP)) + ret = mmu_rb_insert(root, &node->mmu); + else + ret = hfi1_mmu_rb_insert(root, &node->mmu); if (ret) { hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", @@ -901,7 +896,10 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, node = fd->entry_to_rb[rcventry]; if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; - fd->mmu_rb_remove(&fd->tid_rb_root, &node->mmu); + if (HFI1_CAP_IS_USET(TID_UNMAP)) + mmu_rb_remove(&fd->tid_rb_root, &node->mmu); + else + hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); if (grp) *grp = node->grp; @@ -962,7 +960,12 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, uctxt->expected_base]; if (!node || node->rcventry != rcventry) continue; - fd->mmu_rb_remove(root, &node->mmu); + if (HFI1_CAP_IS_USET(TID_UNMAP)) + mmu_rb_remove(&fd->tid_rb_root, + &node->mmu); + else + hfi1_mmu_rb_remove(&fd->tid_rb_root, + &node->mmu); clear_tid_node(fd, -1, node); } } -- cgit v1.2.3-59-g8ed1b From 909e2cd004b639276678c195760efeea6c173626 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:48 -0800 Subject: IB/hfi1: Notify remove MMU/RB callback of calling context Tell the remove MMU/RB callback if it's being called as part of a memory invalidation or not. This can be important in preventing a deadlock if the remove callback attempts to take the map_sem semaphore because the kernel's MMU invalidation functions have already taken it. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 10 +++++----- drivers/staging/rdma/hfi1/mmu_rb.h | 2 +- drivers/staging/rdma/hfi1/user_exp_rcv.c | 9 +++++---- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index a3515d7f6354..29d6d3e0694d 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -122,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) rbnode = rb_entry(node, struct mmu_rb_node, node); rb_erase(node, root); if (handler->ops->remove) - handler->ops->remove(root, rbnode); + handler->ops->remove(root, rbnode, false); } } @@ -196,12 +196,12 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, } static void __mmu_rb_remove(struct mmu_rb_handler *handler, - struct mmu_rb_node *node) + struct mmu_rb_node *node, bool arg) { /* Validity of handler and node pointers has been checked by caller. */ rb_erase(&node->node, handler->root); if (handler->ops->remove) - handler->ops->remove(handler->root, node); + handler->ops->remove(handler->root, node, arg); } struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, @@ -230,7 +230,7 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) return; spin_lock_irqsave(&handler->lock, flags); - __mmu_rb_remove(handler, node); + __mmu_rb_remove(handler, node, false); spin_unlock_irqrestore(&handler->lock, flags); } @@ -299,7 +299,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, naddr = node->addr; nlen = node->len; if (handler->ops->invalidate(root, node)) - __mmu_rb_remove(handler, node); + __mmu_rb_remove(handler, node, true); /* * The next address to be looked up is computed based diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index 9fe1076ab39d..fdd978757b90 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -59,7 +59,7 @@ struct mmu_rb_ops { int (*compare)(struct mmu_rb_node *, unsigned long, unsigned long); int (*insert)(struct rb_root *, struct mmu_rb_node *); - void (*remove)(struct rb_root *, struct mmu_rb_node *); + void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); int (*invalidate)(struct rb_root *, struct mmu_rb_node *); }; diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index b0b193f30fac..1d971c0fedd6 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -89,7 +89,7 @@ static int set_rcvarray_entry(struct file *, unsigned long, u32, static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, unsigned long); static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); -static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *); +static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); static int program_rcvarray(struct file *, unsigned long, struct tid_group *, struct tid_pageset *, unsigned, u16, struct page **, @@ -897,7 +897,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, if (!node || node->rcventry != (uctxt->expected_base + rcventry)) return -EBADF; if (HFI1_CAP_IS_USET(TID_UNMAP)) - mmu_rb_remove(&fd->tid_rb_root, &node->mmu); + mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); else hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); @@ -962,7 +962,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, continue; if (HFI1_CAP_IS_USET(TID_UNMAP)) mmu_rb_remove(&fd->tid_rb_root, - &node->mmu); + &node->mmu, false); else hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); @@ -1039,7 +1039,8 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node) return 0; } -static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) +static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, + bool notifier) { struct hfi1_filedata *fdata = container_of(root, struct hfi1_filedata, tid_rb_root); -- cgit v1.2.3-59-g8ed1b From df5a00f81dab36b3479a2b84c836e98e701c78bc Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:53 -0800 Subject: IB/hfi1: Use interval RB trees The interval RB trees can handle RB nodes which hold ranged information. This is exactly the usage for the buffer cache implemented in the expected receive code path. Convert the MMU/RB functions to use the interval RB tree API. This will help with future users of the caching API, as well. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 106 +++++++++++-------------------------- drivers/staging/rdma/hfi1/mmu_rb.h | 3 +- 2 files changed, 34 insertions(+), 75 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 29d6d3e0694d..540e267eee3c 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -46,7 +46,7 @@ */ #include #include -#include +#include #include "mmu_rb.h" #include "trace.h" @@ -62,6 +62,8 @@ struct mmu_rb_handler { static LIST_HEAD(mmu_rb_handlers); static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */ +static unsigned long mmu_node_start(struct mmu_rb_node *); +static unsigned long mmu_node_last(struct mmu_rb_node *); static struct mmu_rb_handler *find_mmu_handler(struct rb_root *); static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, unsigned long); @@ -78,6 +80,19 @@ static struct mmu_notifier_ops mn_opts = { .invalidate_range_start = mmu_notifier_range_start, }; +INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last, + mmu_node_start, mmu_node_last, static, __mmu_int_rb); + +static unsigned long mmu_node_start(struct mmu_rb_node *node) +{ + return node->addr & PAGE_MASK; +} + +static unsigned long mmu_node_last(struct mmu_rb_node *node) +{ + return ((node->addr & PAGE_MASK) + node->len); +} + int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) { struct mmu_rb_handler *handlr; @@ -133,40 +148,27 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) { - struct rb_node **new, *parent = NULL; struct mmu_rb_handler *handler = find_mmu_handler(root); - struct mmu_rb_node *this; + struct mmu_rb_node *node; unsigned long flags; - int res, ret = 0; + int ret = 0; if (!handler) return -EINVAL; - new = &handler->root->rb_node; spin_lock_irqsave(&handler->lock, flags); - while (*new) { - this = container_of(*new, struct mmu_rb_node, node); - res = handler->ops->compare(this, mnode->addr, mnode->len); - parent = *new; - - if (res < 0) { - new = &((*new)->rb_left); - } else if (res > 0) { - new = &((*new)->rb_right); - } else { - ret = 1; - goto unlock; - } + node = __mmu_rb_search(handler, mnode->addr, mnode->len); + if (node) { + ret = -EINVAL; + goto unlock; } + __mmu_int_rb_insert(mnode, root); if (handler->ops->insert) { ret = handler->ops->insert(root, mnode); if (ret) - goto unlock; + __mmu_int_rb_remove(mnode, root); } - - rb_link_node(&mnode->node, parent, new); - rb_insert_color(&mnode->node, root); unlock: spin_unlock_irqrestore(&handler->lock, flags); return ret; @@ -177,29 +179,17 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, unsigned long addr, unsigned long len) { - struct rb_node *node = handler->root->rb_node; - struct mmu_rb_node *mnode; - int res; - - while (node) { - mnode = container_of(node, struct mmu_rb_node, node); - res = handler->ops->compare(mnode, addr, len); - - if (res < 0) - node = node->rb_left; - else if (res > 0) - node = node->rb_right; - else - return mnode; - } - return NULL; + struct mmu_rb_node *node; + + node = __mmu_int_rb_iter_first(handler->root, addr, len); + return node; } static void __mmu_rb_remove(struct mmu_rb_handler *handler, struct mmu_rb_node *node, bool arg) { /* Validity of handler and node pointers has been checked by caller. */ - rb_erase(&node->node, handler->root); + __mmu_int_rb_remove(node, handler->root); if (handler->ops->remove) handler->ops->remove(handler->root, node, arg); } @@ -271,45 +261,13 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, container_of(mn, struct mmu_rb_handler, mn); struct rb_root *root = handler->root; struct mmu_rb_node *node; - unsigned long addr = start, naddr, nlen, flags; + unsigned long flags; spin_lock_irqsave(&handler->lock, flags); - while (addr < end) { - /* - * There is no good way to provide a reasonable length to the - * search function at this point. Using the remaining length in - * the invalidation range is not the right thing to do. - * We have to rely on the fact that the insertion algorithm - * takes care of any overlap or length restrictions by using the - * actual size of each node. Therefore, we can use a page as an - * arbitrary, non-zero value. - */ - node = __mmu_rb_search(handler, addr, PAGE_SIZE); - - if (!node) { - /* - * Didn't find a node at this address. However, the - * range could be bigger than what we have registered - * so we have to keep looking. - */ - addr += PAGE_SIZE; - continue; - } - - naddr = node->addr; - nlen = node->len; + for (node = __mmu_int_rb_iter_first(root, start, end); node; + node = __mmu_int_rb_iter_next(node, start, end)) { if (handler->ops->invalidate(root, node)) __mmu_rb_remove(handler, node, true); - - /* - * The next address to be looked up is computed based - * on the node's starting address. This is due to the - * fact that the range where we start might be in the - * middle of the node's buffer so simply incrementing - * the address by the node's size would result is a - * bad address. - */ - addr = naddr + nlen; } spin_unlock_irqrestore(&handler->lock, flags); } diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index fdd978757b90..abed3a69e467 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -50,9 +50,10 @@ #include "hfi.h" struct mmu_rb_node { - struct rb_node node; unsigned long addr; unsigned long len; + unsigned long __last; + struct rb_node node; }; struct mmu_rb_ops { -- cgit v1.2.3-59-g8ed1b From 353b71c7c08ed75fe83843a382e5ca53376d07ca Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:14:59 -0800 Subject: IB/hfi1: Add MMU tracing Add a new tracepoint type for the MMU functions and calls to that tracepoint to allow tracing of MMU functionality. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 10 ++++++++++ drivers/staging/rdma/hfi1/trace.c | 1 + drivers/staging/rdma/hfi1/trace.h | 1 + 3 files changed, 12 insertions(+) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 540e267eee3c..c30373df3874 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -157,6 +157,8 @@ int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) return -EINVAL; spin_lock_irqsave(&handler->lock, flags); + hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr, + mnode->len); node = __mmu_rb_search(handler, mnode->addr, mnode->len); if (node) { ret = -EINVAL; @@ -181,7 +183,11 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, { struct mmu_rb_node *node; + hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); node = __mmu_int_rb_iter_first(handler->root, addr, len); + if (node) + hfi1_cdbg(MMU, "Found node addr 0x%llx, len %u", node->addr, + node->len); return node; } @@ -189,6 +195,8 @@ static void __mmu_rb_remove(struct mmu_rb_handler *handler, struct mmu_rb_node *node, bool arg) { /* Validity of handler and node pointers has been checked by caller. */ + hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, + node->len); __mmu_int_rb_remove(node, handler->root); if (handler->ops->remove) handler->ops->remove(handler->root, node, arg); @@ -266,6 +274,8 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, spin_lock_irqsave(&handler->lock, flags); for (node = __mmu_int_rb_iter_first(root, start, end); node; node = __mmu_int_rb_iter_next(node, start, end)) { + hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", + node->addr, node->len); if (handler->ops->invalidate(root, node)) __mmu_rb_remove(handler, node, true); } diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c index 6821d7c377e5..8b62fefcf903 100644 --- a/drivers/staging/rdma/hfi1/trace.c +++ b/drivers/staging/rdma/hfi1/trace.c @@ -232,3 +232,4 @@ __hfi1_trace_fn(DC8051); __hfi1_trace_fn(FIRMWARE); __hfi1_trace_fn(RCVCTRL); __hfi1_trace_fn(TID); +__hfi1_trace_fn(MMU); diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h index 4d91c18fa694..963dc948c38a 100644 --- a/drivers/staging/rdma/hfi1/trace.h +++ b/drivers/staging/rdma/hfi1/trace.h @@ -1340,6 +1340,7 @@ __hfi1_trace_def(DC8051); __hfi1_trace_def(FIRMWARE); __hfi1_trace_def(RCVCTRL); __hfi1_trace_def(TID); +__hfi1_trace_def(MMU); #define hfi1_cdbg(which, fmt, ...) \ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__) -- cgit v1.2.3-59-g8ed1b From b8718e2e2e4702dfbe0d9a15d527e0531807e871 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:04 -0800 Subject: IB/hfi1: Remove compare callback Interval RB trees provide their own searching function, which also takes care of determining the path through the tree that should be taken. This make the compare callback unnecessary. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 2 +- drivers/staging/rdma/hfi1/mmu_rb.h | 2 -- drivers/staging/rdma/hfi1/user_exp_rcv.c | 14 -------------- 3 files changed, 1 insertion(+), 17 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index c30373df3874..5d27fee577b9 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -98,7 +98,7 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) struct mmu_rb_handler *handlr; unsigned long flags; - if (!ops->compare || !ops->invalidate) + if (!ops->invalidate) return -EINVAL; handlr = kmalloc(sizeof(*handlr), GFP_KERNEL); diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index abed3a69e467..9c2600981e88 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -57,8 +57,6 @@ struct mmu_rb_node { }; struct mmu_rb_ops { - int (*compare)(struct mmu_rb_node *, unsigned long, - unsigned long); int (*insert)(struct rb_root *, struct mmu_rb_node *); void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); int (*invalidate)(struct rb_root *, struct mmu_rb_node *); diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 1d971c0fedd6..bf670cbf82da 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -86,8 +86,6 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *, static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *); static int set_rcvarray_entry(struct file *, unsigned long, u32, struct tid_group *, struct page **, unsigned); -static inline int mmu_addr_cmp(struct mmu_rb_node *, unsigned long, - unsigned long); static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); @@ -98,7 +96,6 @@ static int unprogram_rcvarray(struct file *, u32, struct tid_group **); static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *); static struct mmu_rb_ops tid_rb_ops = { - .compare = mmu_addr_cmp, .insert = mmu_rb_insert, .remove = mmu_rb_remove, .invalidate = mmu_rb_invalidate @@ -1016,17 +1013,6 @@ static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode) return 0; } -static int mmu_addr_cmp(struct mmu_rb_node *node, unsigned long addr, - unsigned long len) -{ - if ((addr + len) <= node->addr) - return -1; - else if (addr >= node->addr && addr < (node->addr + node->len)) - return 0; - else - return 1; -} - static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node) { struct hfi1_filedata *fdata = -- cgit v1.2.3-59-g8ed1b From 0f310a00e02094ea7a2a7d2ae45bd51d97706caa Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:10 -0800 Subject: IB/hfi1: Add filter callback This commit adds a filter callback, which can be used to filter out interval RB nodes matching a certain interval down to a single one. This is needed for the upcoming SDMA-side caching where buffers will need to be filtered by their virtual address. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 19 ++++++++++++++----- drivers/staging/rdma/hfi1/mmu_rb.h | 1 + 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 5d27fee577b9..6edd5f022057 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -181,13 +181,22 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, unsigned long addr, unsigned long len) { - struct mmu_rb_node *node; + struct mmu_rb_node *node = NULL; hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len); - node = __mmu_int_rb_iter_first(handler->root, addr, len); - if (node) - hfi1_cdbg(MMU, "Found node addr 0x%llx, len %u", node->addr, - node->len); + if (!handler->ops->filter) { + node = __mmu_int_rb_iter_first(handler->root, addr, + (addr + len) - 1); + } else { + for (node = __mmu_int_rb_iter_first(handler->root, addr, + (addr + len) - 1); + node; + node = __mmu_int_rb_iter_next(node, addr, + (addr + len) - 1)) { + if (handler->ops->filter(node, addr, len)) + return node; + } + } return node; } diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index 9c2600981e88..f8523fdb8a18 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h @@ -57,6 +57,7 @@ struct mmu_rb_node { }; struct mmu_rb_ops { + bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); int (*insert)(struct rb_root *, struct mmu_rb_node *); void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); int (*invalidate)(struct rb_root *, struct mmu_rb_node *); -- cgit v1.2.3-59-g8ed1b From a489876010377481823ae5dbbd83fa32792a2e16 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:16 -0800 Subject: IB/hfi1: Adjust last address values for intervals Last address values for intervals in the interval RB tree nodes should be non-inclusive in order to avoid confusing ranges. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/mmu_rb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index 6edd5f022057..c7ad0164ea9a 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c @@ -90,7 +90,7 @@ static unsigned long mmu_node_start(struct mmu_rb_node *node) static unsigned long mmu_node_last(struct mmu_rb_node *node) { - return ((node->addr & PAGE_MASK) + node->len); + return PAGE_ALIGN((node->addr & PAGE_MASK) + node->len) - 1; } int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops) @@ -281,8 +281,8 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, unsigned long flags; spin_lock_irqsave(&handler->lock, flags); - for (node = __mmu_int_rb_iter_first(root, start, end); node; - node = __mmu_int_rb_iter_next(node, start, end)) { + for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; + node = __mmu_int_rb_iter_next(node, start, end - 1)) { hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", node->addr, node->len); if (handler->ops->invalidate(root, node)) -- cgit v1.2.3-59-g8ed1b From 5cd3a88d7f2b050164dc1df59a398294515126d9 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:22 -0800 Subject: IB/hfi1: Implement SDMA-side buffer caching Add support for caching of user buffers used for SDMA transfers. This change improves performance by avoiding repeatedly pinning the pages of buffers, which are being re-used by the application. While the cost of the pinning operation has been made heavier by adding the extra code to search the cache tree, re-allocate pages arrays, and future cache evictions, that cost will be amortized against the savings when the same buffer is re-used. It is also worth noting that in most cases, the cost of pinning should be much lower due to the buffer already being in the cache. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 255 ++++++++++++++++++++-------------- drivers/staging/rdma/hfi1/user_sdma.h | 1 + 2 files changed, 155 insertions(+), 101 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index 14fe0790a35b..a53edb96ca50 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -68,6 +68,7 @@ #include "verbs.h" /* for the headers */ #include "common.h" /* for struct hfi1_tid_info */ #include "trace.h" +#include "mmu_rb.h" static uint hfi1_sdma_comp_ring_size = 128; module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO); @@ -145,9 +146,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 /* Last packet in the request */ #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) -/* Last packet that uses a particular io vector */ -#define TXREQ_FLAGS_IOVEC_LAST_PKT BIT(0) - #define SDMA_REQ_IN_USE 0 #define SDMA_REQ_FOR_THREAD 1 #define SDMA_REQ_SEND_DONE 2 @@ -183,6 +181,13 @@ struct user_sdma_iovec { u64 offset; }; +struct sdma_mmu_node { + struct mmu_rb_node rb; + atomic_t refcount; + struct page **pages; + unsigned npages; +}; + struct user_sdma_request { struct sdma_req_info info; struct hfi1_user_sdma_pkt_q *pq; @@ -252,11 +257,6 @@ struct user_sdma_txreq { struct sdma_txreq txreq; struct list_head list; struct user_sdma_request *req; - struct { - struct user_sdma_iovec *vec; - u8 flags; - } iovecs[3]; - int idx; u16 flags; unsigned busycount; u64 seqnum; @@ -277,7 +277,7 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, struct user_sdma_iovec *); -static void unpin_vector_pages(struct user_sdma_iovec *); +static void unpin_vector_pages(struct page **, unsigned); static int check_header_template(struct user_sdma_request *, struct hfi1_pkt_header *, u32, u32); static int set_txreq_header(struct user_sdma_request *, @@ -296,6 +296,17 @@ static int defer_packet_queue( struct sdma_txreq *, unsigned seq); static void activate_packet_queue(struct iowait *, int); +static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); +static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); +static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); +static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); + +static struct mmu_rb_ops sdma_rb_ops = { + .filter = sdma_rb_filter, + .insert = sdma_rb_insert, + .remove = sdma_rb_remove, + .invalidate = sdma_rb_invalidate +}; static int defer_packet_queue( struct sdma_engine *sde, @@ -385,6 +396,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) pq->state = SDMA_PKT_Q_INACTIVE; atomic_set(&pq->n_reqs, 0); init_waitqueue_head(&pq->wait); + pq->sdma_rb_root = RB_ROOT; iowait_init(&pq->busy, 0, NULL, defer_packet_queue, activate_packet_queue, NULL); @@ -415,6 +427,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) cq->nentries = hfi1_sdma_comp_ring_size; fd->cq = cq; + ret = hfi1_mmu_rb_register(&pq->sdma_rb_root, &sdma_rb_ops); + if (ret) { + dd_dev_err(dd, "Failed to register with MMU %d", ret); + goto done; + } + spin_lock_irqsave(&uctxt->sdma_qlock, flags); list_add(&pq->list, &uctxt->sdma_queues); spin_unlock_irqrestore(&uctxt->sdma_qlock, flags); @@ -444,6 +462,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit, uctxt->ctxt, fd->subctxt); pq = fd->pq; + hfi1_mmu_rb_unregister(&pq->sdma_rb_root); if (pq) { spin_lock_irqsave(&uctxt->sdma_qlock, flags); if (!list_empty(&pq->list)) @@ -477,7 +496,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, struct hfi1_user_sdma_pkt_q *pq = fd->pq; struct hfi1_user_sdma_comp_q *cq = fd->cq; struct hfi1_devdata *dd = pq->dd; - unsigned long idx = 0, unpinned; + unsigned long idx = 0; u8 pcount = initial_pkt_count; struct sdma_req_info info; struct user_sdma_request *req; @@ -498,14 +517,6 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, return -EFAULT; } - /* Process any completed vectors */ - unpinned = xchg(&pq->unpinned, 0); - if (unpinned) { - down_write(¤t->mm->mmap_sem); - current->mm->pinned_vm -= unpinned; - up_write(¤t->mm->mmap_sem); - } - trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, (u16 *)&info); if (cq->comps[info.comp_idx].status == QUEUED || @@ -609,7 +620,11 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, while (i < req->data_iovs) { INIT_LIST_HEAD(&req->iovs[i].list); memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec)); - req->iovs[i].offset = 0; + ret = pin_vector_pages(req, &req->iovs[i]); + if (ret) { + req->status = ret; + goto free_req; + } req->data_len += req->iovs[i++].iov.iov_len; } SDMA_DBG(req, "total data length %u", req->data_len); @@ -827,9 +842,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) tx->flags = 0; tx->req = req; tx->busycount = 0; - tx->idx = -1; INIT_LIST_HEAD(&tx->list); - memset(tx->iovecs, 0, sizeof(tx->iovecs)); if (req->seqnum == req->info.npkts - 1) tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT; @@ -850,18 +863,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) WARN_ON(iovec->offset); } - /* - * This request might include only a header and no user - * data, so pin pages only if there is data and it the - * pages have not been pinned already. - */ - if (unlikely(!iovec->pages && iovec->iov.iov_len)) { - ret = pin_vector_pages(req, iovec); - if (ret) - goto free_tx; - } - - tx->iovecs[++tx->idx].vec = iovec; datalen = compute_data_length(req, tx); if (!datalen) { SDMA_DBG(req, @@ -960,19 +961,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) data_sent += len; if (unlikely(queued < datalen && pageidx == iovec->npages && - req->iov_idx < req->data_iovs - 1 && - tx->idx < ARRAY_SIZE(tx->iovecs))) { + req->iov_idx < req->data_iovs - 1)) { iovec->offset += iov_offset; - tx->iovecs[tx->idx].flags |= - TXREQ_FLAGS_IOVEC_LAST_PKT; iovec = &req->iovs[++req->iov_idx]; - if (!iovec->pages) { - ret = pin_vector_pages(req, iovec); - if (ret) - goto free_txreq; - } iov_offset = 0; - tx->iovecs[++tx->idx].vec = iovec; } } /* @@ -983,18 +975,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) if (req_opcode(req->info.ctrl) == EXPECTED) req->tidoffset += datalen; req->sent += data_sent; - if (req->data_len) { - tx->iovecs[tx->idx].vec->offset += iov_offset; - /* - * If we've reached the end of the io vector, mark it - * so the callback can unpin the pages and free it. - */ - if (tx->iovecs[tx->idx].vec->offset == - tx->iovecs[tx->idx].vec->iov.iov_len) - tx->iovecs[tx->idx].flags |= - TXREQ_FLAGS_IOVEC_LAST_PKT; - } - + if (req->data_len) + iovec->offset += iov_offset; list_add_tail(&tx->txreq.list, &req->txps); /* * It is important to increment this here as it is used to @@ -1047,38 +1029,78 @@ static inline int num_user_pages(const struct iovec *iov) static int pin_vector_pages(struct user_sdma_request *req, struct user_sdma_iovec *iovec) { - int pinned, npages; + int ret = 0, pinned, npages; + struct page **pages; + struct hfi1_user_sdma_pkt_q *pq = req->pq; + struct sdma_mmu_node *node = NULL; + struct mmu_rb_node *rb_node; + + rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, + (unsigned long)iovec->iov.iov_base, + iovec->iov.iov_len); + if (rb_node) + node = container_of(rb_node, struct sdma_mmu_node, rb); + + if (!node) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; - npages = num_user_pages(&iovec->iov); - iovec->pages = kcalloc(npages, sizeof(*iovec->pages), GFP_KERNEL); - if (!iovec->pages) { - SDMA_DBG(req, "Failed page array alloc"); - return -ENOMEM; + node->rb.addr = (unsigned long)iovec->iov.iov_base; + node->rb.len = iovec->iov.iov_len; + atomic_set(&node->refcount, 0); } - pinned = hfi1_acquire_user_pages((unsigned long)iovec->iov.iov_base, - npages, 0, iovec->pages); - - if (pinned < 0) - return pinned; + npages = num_user_pages(&iovec->iov); + if (node->npages < npages) { + pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + SDMA_DBG(req, "Failed page array alloc"); + ret = -ENOMEM; + goto bail; + } + memcpy(pages, node->pages, node->npages * sizeof(*pages)); + + npages -= node->npages; + pinned = hfi1_acquire_user_pages( + ((unsigned long)iovec->iov.iov_base + + (node->npages * PAGE_SIZE)), npages, 0, + pages + node->npages); + if (pinned < 0) { + kfree(pages); + ret = pinned; + goto bail; + } + if (pinned != npages) { + unpin_vector_pages(pages, pinned); + ret = -EFAULT; + goto bail; + } + kfree(node->pages); + node->pages = pages; + node->npages += pinned; + npages = node->npages; + } + iovec->pages = node->pages; + iovec->npages = npages; - iovec->npages = pinned; - if (pinned != npages) { - SDMA_DBG(req, "Failed to pin pages (%d/%u)", pinned, npages); - unpin_vector_pages(iovec); - return -EFAULT; + if (!rb_node) { + if (hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb)) + goto bail; + } else { + atomic_inc(&node->refcount); } return 0; +bail: + if (!rb_node) + kfree(node); + return ret; } -static void unpin_vector_pages(struct user_sdma_iovec *iovec) +static void unpin_vector_pages(struct page **pages, unsigned npages) { - hfi1_release_user_pages(iovec->pages, iovec->npages, 0); - - kfree(iovec->pages); - iovec->pages = NULL; - iovec->npages = 0; - iovec->offset = 0; + hfi1_release_user_pages(pages, npages, 0); + kfree(pages); } static int check_header_template(struct user_sdma_request *req, @@ -1360,7 +1382,6 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) struct hfi1_user_sdma_pkt_q *pq; struct hfi1_user_sdma_comp_q *cq; u16 idx; - int i, j; if (!tx->req) return; @@ -1369,24 +1390,6 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) pq = req->pq; cq = req->cq; - /* - * If we have any io vectors associated with this txreq, - * check whether they need to be 'freed'. - */ - for (i = tx->idx; i >= 0; i--) { - if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) { - struct user_sdma_iovec *vec = - tx->iovecs[i].vec; - - for (j = 0; j < vec->npages; j++) - put_page(vec->pages[j]); - xadd(&pq->unpinned, vec->npages); - kfree(vec->pages); - vec->pages = NULL; - vec->npages = 0; - } - } - if (status != SDMA_TXREQ_S_OK) { SDMA_DBG(req, "SDMA completion with error %d", status); @@ -1439,12 +1442,26 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) kmem_cache_free(req->pq->txreq_cache, tx); } } - if (req->data_iovs && unpin) { + if (req->data_iovs) { + struct sdma_mmu_node *node; + struct mmu_rb_node *mnode; int i; - for (i = 0; i < req->data_iovs; i++) - if (req->iovs[i].npages && req->iovs[i].pages) - unpin_vector_pages(&req->iovs[i]); + for (i = 0; i < req->data_iovs; i++) { + mnode = hfi1_mmu_rb_search( + &req->pq->sdma_rb_root, + (unsigned long)req->iovs[i].iov.iov_base, + req->iovs[i].iov.iov_len); + if (!mnode) + continue; + + node = container_of(mnode, struct sdma_mmu_node, rb); + if (unpin) + hfi1_mmu_rb_remove(&req->pq->sdma_rb_root, + &node->rb); + else + atomic_dec(&node->refcount); + } } kfree(req->tids); clear_bit(SDMA_REQ_IN_USE, &req->flags); @@ -1463,3 +1480,39 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, idx, state, ret); } + +static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr, + unsigned long len) +{ + return (bool)(node->addr == addr); +} + +static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) +{ + struct sdma_mmu_node *node = + container_of(mnode, struct sdma_mmu_node, rb); + + atomic_inc(&node->refcount); + return 0; +} + +static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, + bool notifier) +{ + struct sdma_mmu_node *node = + container_of(mnode, struct sdma_mmu_node, rb); + + if (!notifier) + unpin_vector_pages(node->pages, node->npages); + kfree(node); +} + +static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode) +{ + struct sdma_mmu_node *node = + container_of(mnode, struct sdma_mmu_node, rb); + + if (!atomic_read(&node->refcount)) + return 1; + return 0; +} diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h index e0d0fe02d557..39866b546523 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/staging/rdma/hfi1/user_sdma.h @@ -67,6 +67,7 @@ struct hfi1_user_sdma_pkt_q { unsigned state; wait_queue_head_t wait; unsigned long unpinned; + struct rb_root sdma_rb_root; }; struct hfi1_user_sdma_comp_q { -- cgit v1.2.3-59-g8ed1b From 2c97ce4f3c292e9ab75c7b6b4d9f69f0a9ee241d Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:28 -0800 Subject: IB/hfi1: Add pin query function System administrators can use the locked memory ulimit setting to set the maximum amount of memory a user can lock/pin. However, this setting alone is not enough to guarantee good operation of the hfi1 driver due to the fact that the setting does not have fine enough granularity to account for the limit being used by multiple user processes and caches. Therefore, a better limiting algorithm is needed. This is where the new hfi1_can_pin_pages() function and the cache_size module parameter come in. The function works by looking at the ulimit and cache_size value to compute a cache size. The algorithm examines the ulimit value and, if it is not "unlimited", computes a per-cache limit based on the number of configured user contexts. After that, the lower of the two - cache_size and computed per-cache limit - is used. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/user_pages.c | 52 ++++++++++++++++++++++++++++++---- 2 files changed, 47 insertions(+), 6 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 2107cdc8ce3f..ff3b37ad89a3 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1664,6 +1664,7 @@ void shutdown_led_override(struct hfi1_pportdata *ppd); */ #define DEFAULT_RCVHDR_ENTSIZE 32 +bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32); int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **); void hfi1_release_user_pages(struct page **, size_t, bool); diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index 3bf81086c24d..bd7a8ab0d635 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -48,22 +48,62 @@ #include #include #include +#include #include "hfi.h" -int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, - struct page **pages) +static unsigned long cache_size = 256; +module_param(cache_size, ulong, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)"); + +/* + * Determine whether the caller can pin pages. + * + * This function should be used in the implementation of buffer caches. + * The cache implementation should call this function prior to attempting + * to pin buffer pages in order to determine whether they should do so. + * The function computes cache limits based on the configured ulimit and + * cache size. Use of this function is especially important for caches + * which are not limited in any other way (e.g. by HW resources) and, thus, + * could keeping caching buffers. + * + */ +bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages) { - unsigned long pinned, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit, + size = (cache_size * (1UL << 20)); /* convert to bytes */ + unsigned usr_ctxts = dd->num_rcv_contexts - dd->first_user_ctxt; bool can_lock = capable(CAP_IPC_LOCK); - int ret; + + /* + * Calculate per-cache size. The calculation below uses only a quarter + * of the available per-context limit. This leaves space for other + * pinning. Should we worry about shared ctxts? + */ + cache_limit = (ulimit / usr_ctxts) / 4; + + /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */ + if (ulimit != (-1UL) && size > cache_limit) + size = cache_limit; + + /* Convert to number of pages */ + size = DIV_ROUND_UP(size, PAGE_SIZE); down_read(¤t->mm->mmap_sem); pinned = current->mm->pinned_vm; up_read(¤t->mm->mmap_sem); - if (pinned + npages > lock_limit && !can_lock) - return -ENOMEM; + /* First, check the absolute limit against all pinned pages. */ + if (pinned + npages >= ulimit && !can_lock) + return false; + + return ((nlocked + npages) <= size) || can_lock; +} + +int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, + struct page **pages) +{ + int ret; ret = get_user_pages_fast(vaddr, npages, writable, pages); if (ret < 0) -- cgit v1.2.3-59-g8ed1b From bd3a8947de916534722b0861d865d3a809c0743c Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:33 -0800 Subject: IB/hfi1: Specify mm when releasing pages This change adds a pointer to the process mm_struct when calling hfi1_release_user_pages(). Previously, the function used the mm_struct of the current process to adjust the number of pinned pages. However, is some cases, namely when unpinning pages due to a MMU notifier call, we want to drop into that code block as it will cause a deadlock (the MMU notifiers take the process' mmap_sem prior to calling the callbacks). By allowing to caller to specify the pointer to the mm_struct, the caller has finer control over that part of hfi1_release_user_pages(). Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 2 +- drivers/staging/rdma/hfi1/user_exp_rcv.c | 4 ++-- drivers/staging/rdma/hfi1/user_pages.c | 11 ++++++----- drivers/staging/rdma/hfi1/user_sdma.c | 19 +++++++++++++------ 4 files changed, 22 insertions(+), 14 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index ff3b37ad89a3..3dc644d92e3a 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1666,7 +1666,7 @@ void shutdown_led_override(struct hfi1_pportdata *ppd); bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32); int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **); -void hfi1_release_user_pages(struct page **, size_t, bool); +void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool); static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) { diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index bf670cbf82da..591605a13243 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -550,7 +550,7 @@ nomem: * for example), unpin all unmapped pages so we can pin them nex time. */ if (mapped_pages != pinned) - hfi1_release_user_pages(&pages[mapped_pages], + hfi1_release_user_pages(current->mm, &pages[mapped_pages], pinned - mapped_pages, false); bail: @@ -923,7 +923,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, PCI_DMA_FROMDEVICE); - hfi1_release_user_pages(node->pages, node->npages, true); + hfi1_release_user_pages(current->mm, node->pages, node->npages, true); node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index bd7a8ab0d635..88e10b5f55f1 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -116,7 +116,8 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, return ret; } -void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty) +void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, + size_t npages, bool dirty) { size_t i; @@ -126,9 +127,9 @@ void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty) put_page(p[i]); } - if (current->mm) { /* during close after signal, mm can be NULL */ - down_write(¤t->mm->mmap_sem); - current->mm->pinned_vm -= npages; - up_write(¤t->mm->mmap_sem); + if (mm) { /* during close after signal, mm can be NULL */ + down_write(&mm->mmap_sem); + mm->pinned_vm -= npages; + up_write(&mm->mmap_sem); } } diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index a53edb96ca50..bf55a41d151a 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -277,7 +277,7 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, struct user_sdma_iovec *); -static void unpin_vector_pages(struct page **, unsigned); +static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); static int check_header_template(struct user_sdma_request *, struct hfi1_pkt_header *, u32, u32); static int set_txreq_header(struct user_sdma_request *, @@ -1072,7 +1072,7 @@ static int pin_vector_pages(struct user_sdma_request *req, goto bail; } if (pinned != npages) { - unpin_vector_pages(pages, pinned); + unpin_vector_pages(current->mm, pages, pinned); ret = -EFAULT; goto bail; } @@ -1097,9 +1097,10 @@ bail: return ret; } -static void unpin_vector_pages(struct page **pages, unsigned npages) +static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, + unsigned npages) { - hfi1_release_user_pages(pages, npages, 0); + hfi1_release_user_pages(mm, pages, npages, 0); kfree(pages); } @@ -1502,8 +1503,14 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); - if (!notifier) - unpin_vector_pages(node->pages, node->npages); + unpin_vector_pages(notifier ? NULL : current->mm, node->pages, + node->npages); + /* + * If called by the MMU notifier, we have to adjust the pinned + * page count ourselves. + */ + if (notifier) + current->mm->pinned_vm -= node->npages; kfree(node); } -- cgit v1.2.3-59-g8ed1b From a7922f7ddf023c93b0c409d7a3557fdf0b5ce343 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:39 -0800 Subject: IB/hfi1: Switch to using the pin query function Use the new function to query whether the expected receive user buffer can be pinned successfully. This requires that a new variable be added to the hfi1_filedata structure used to hold the number of pages pinned by the expected receive code. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/hfi.h | 1 + drivers/staging/rdma/hfi1/user_exp_rcv.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 3dc644d92e3a..16cbdc4073e0 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1190,6 +1190,7 @@ struct hfi1_filedata { struct hfi1_user_sdma_pkt_q *pq; /* for cpu affinity; -1 if none */ int rec_cpu_num; + u32 tid_n_pinned; struct rb_root tid_rb_root; struct tid_rb_node **entry_to_rb; spinlock_t tid_lock; /* protect tid_[limit,used] counters */ diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 591605a13243..0861e095df8d 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -396,11 +396,14 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo) * pages, accept the amount pinned so far and program only that. * User space knows how to deal with partially programmed buffers. */ + if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages)) + return -ENOMEM; pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages); if (pinned <= 0) { ret = pinned; goto bail; } + fd->tid_n_pinned += npages; /* Find sets of physically contiguous pages */ npagesets = find_phys_blocks(pages, pinned, pagesets); @@ -549,10 +552,12 @@ nomem: * If not everything was mapped (due to insufficient RcvArray entries, * for example), unpin all unmapped pages so we can pin them nex time. */ - if (mapped_pages != pinned) + if (mapped_pages != pinned) { hfi1_release_user_pages(current->mm, &pages[mapped_pages], pinned - mapped_pages, false); + fd->tid_n_pinned -= pinned - mapped_pages; + } bail: kfree(pagesets); kfree(pages); @@ -924,6 +929,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, PCI_DMA_FROMDEVICE); hfi1_release_user_pages(current->mm, node->pages, node->npages, true); + fd->tid_n_pinned -= node->npages; node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); -- cgit v1.2.3-59-g8ed1b From 5511d7810752f426f0a9f999100fd249d352c2ef Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Tue, 8 Mar 2016 11:15:44 -0800 Subject: IB/hfi1: Add SDMA cache eviction algorithm This commit adds a cache eviction algorithm for the SDMA user buffer cache. Besides the interval RB tree used for node lookup, the cache nodes are also arranged in a doubly-linked list. When a node is used, it is put at the beginning of the list. Less frequently used nodes naturally move to the tail of the list. When the cache limit is reached, the eviction code starts traversing the linked list in reverse, freeing buffers until enough space has been freed to fit the new user buffer. This guarantees that only the least used cache nodes will be removed from the cache. Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- drivers/staging/rdma/hfi1/user_sdma.c | 61 +++++++++++++++++++++++++++++++++-- drivers/staging/rdma/hfi1/user_sdma.h | 3 ++ 2 files changed, 62 insertions(+), 2 deletions(-) (limited to 'drivers/staging') diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index bf55a41d151a..46e254d52dad 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -183,6 +183,8 @@ struct user_sdma_iovec { struct sdma_mmu_node { struct mmu_rb_node rb; + struct list_head list; + struct hfi1_user_sdma_pkt_q *pq; atomic_t refcount; struct page **pages; unsigned npages; @@ -397,6 +399,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) atomic_set(&pq->n_reqs, 0); init_waitqueue_head(&pq->wait); pq->sdma_rb_root = RB_ROOT; + INIT_LIST_HEAD(&pq->evict); + spin_lock_init(&pq->evict_lock); iowait_init(&pq->busy, 0, NULL, defer_packet_queue, activate_packet_queue, NULL); @@ -1027,9 +1031,33 @@ static inline int num_user_pages(const struct iovec *iov) return 1 + ((epage - spage) >> PAGE_SHIFT); } +/* Caller must hold pq->evict_lock */ +static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) +{ + u32 cleared = 0; + struct sdma_mmu_node *node, *ptr; + + list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) { + /* Make sure that no one is still using the node. */ + if (!atomic_read(&node->refcount)) { + /* + * Need to use the page count now as the remove callback + * will free the node. + */ + cleared += node->npages; + spin_unlock(&pq->evict_lock); + hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb); + spin_lock(&pq->evict_lock); + if (cleared >= npages) + break; + } + } + return cleared; +} + static int pin_vector_pages(struct user_sdma_request *req, struct user_sdma_iovec *iovec) { - int ret = 0, pinned, npages; + int ret = 0, pinned, npages, cleared; struct page **pages; struct hfi1_user_sdma_pkt_q *pq = req->pq; struct sdma_mmu_node *node = NULL; @@ -1048,7 +1076,9 @@ static int pin_vector_pages(struct user_sdma_request *req, node->rb.addr = (unsigned long)iovec->iov.iov_base; node->rb.len = iovec->iov.iov_len; + node->pq = pq; atomic_set(&node->refcount, 0); + INIT_LIST_HEAD(&node->list); } npages = num_user_pages(&iovec->iov); @@ -1062,6 +1092,14 @@ static int pin_vector_pages(struct user_sdma_request *req, memcpy(pages, node->pages, node->npages * sizeof(*pages)); npages -= node->npages; +retry: + if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) { + spin_lock(&pq->evict_lock); + cleared = sdma_cache_evict(pq, npages); + spin_unlock(&pq->evict_lock); + if (cleared >= npages) + goto retry; + } pinned = hfi1_acquire_user_pages( ((unsigned long)iovec->iov.iov_base + (node->npages * PAGE_SIZE)), npages, 0, @@ -1080,13 +1118,27 @@ static int pin_vector_pages(struct user_sdma_request *req, node->pages = pages; node->npages += pinned; npages = node->npages; + spin_lock(&pq->evict_lock); + if (!rb_node) + list_add(&node->list, &pq->evict); + else + list_move(&node->list, &pq->evict); + pq->n_locked += pinned; + spin_unlock(&pq->evict_lock); } iovec->pages = node->pages; iovec->npages = npages; if (!rb_node) { - if (hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb)) + ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb); + if (ret) { + spin_lock(&pq->evict_lock); + list_del(&node->list); + pq->n_locked -= node->npages; + spin_unlock(&pq->evict_lock); + ret = 0; goto bail; + } } else { atomic_inc(&node->refcount); } @@ -1503,6 +1555,11 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); + spin_lock(&node->pq->evict_lock); + list_del(&node->list); + node->pq->n_locked -= node->npages; + spin_unlock(&node->pq->evict_lock); + unpin_vector_pages(notifier ? NULL : current->mm, node->pages, node->npages); /* diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h index 39866b546523..b9240e351161 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.h +++ b/drivers/staging/rdma/hfi1/user_sdma.h @@ -68,6 +68,9 @@ struct hfi1_user_sdma_pkt_q { wait_queue_head_t wait; unsigned long unpinned; struct rb_root sdma_rb_root; + u32 n_locked; + struct list_head evict; + spinlock_t evict_lock; /* protect evict and n_locked */ }; struct hfi1_user_sdma_comp_q { -- cgit v1.2.3-59-g8ed1b