aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/rdma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/rdma')
-rw-r--r--drivers/staging/rdma/amso1100/c2.c17
-rw-r--r--drivers/staging/rdma/amso1100/c2.h80
-rw-r--r--drivers/staging/rdma/amso1100/c2_cq.c3
-rw-r--r--drivers/staging/rdma/amso1100/c2_mq.h14
-rw-r--r--drivers/staging/rdma/amso1100/c2_provider.c82
-rw-r--r--drivers/staging/rdma/amso1100/c2_rnic.c5
-rw-r--r--drivers/staging/rdma/amso1100/c2_vq.h20
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c6
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h5
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c3
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h16
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c7
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c483
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h5
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c3
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c3
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c1
-rw-r--r--drivers/staging/rdma/hfi1/Makefile2
-rw-r--r--drivers/staging/rdma/hfi1/chip.c3550
-rw-r--r--drivers/staging/rdma/hfi1/chip.h273
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h4
-rw-r--r--drivers/staging/rdma/hfi1/common.h15
-rw-r--r--drivers/staging/rdma/hfi1/diag.c499
-rw-r--r--drivers/staging/rdma/hfi1/driver.c215
-rw-r--r--drivers/staging/rdma/hfi1/efivar.c169
-rw-r--r--drivers/staging/rdma/hfi1/efivar.h60
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c119
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c207
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c193
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h104
-rw-r--r--drivers/staging/rdma/hfi1/init.c101
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h6
-rw-r--r--drivers/staging/rdma/hfi1/mad.c227
-rw-r--r--drivers/staging/rdma/hfi1/mad.h114
-rw-r--r--drivers/staging/rdma/hfi1/mr.c51
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c36
-rw-r--r--drivers/staging/rdma/hfi1/pio.c68
-rw-r--r--drivers/staging/rdma/hfi1/pio.h2
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c6
-rw-r--r--drivers/staging/rdma/hfi1/qp.c48
-rw-r--r--drivers/staging/rdma/hfi1/qp.h46
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c5
-rw-r--r--drivers/staging/rdma/hfi1/rc.c81
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c55
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c67
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h15
-rw-r--r--drivers/staging/rdma/hfi1/trace.c29
-rw-r--r--drivers/staging/rdma/hfi1/trace.h3
-rw-r--r--drivers/staging/rdma/hfi1/uc.c15
-rw-r--r--drivers/staging/rdma/hfi1/ud.c22
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.h74
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c97
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c363
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h12
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c131
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h54
-rw-r--r--drivers/staging/rdma/ipath/ipath_file_ops.c8
-rw-r--r--drivers/staging/rdma/ipath/ipath_fs.c8
-rw-r--r--drivers/staging/rdma/ipath/ipath_mr.c55
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.c1
-rw-r--r--drivers/staging/rdma/ipath/ipath_verbs.h4
61 files changed, 5849 insertions, 2118 deletions
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
index 35ac536b2d45..b46ebd1ae15a 100644
--- a/drivers/staging/rdma/amso1100/c2.c
+++ b/drivers/staging/rdma/amso1100/c2.c
@@ -87,11 +87,6 @@ static struct pci_device_id c2_pci_table[] = {
MODULE_DEVICE_TABLE(pci, c2_pci_table);
-static void c2_print_macaddr(struct net_device *netdev)
-{
- pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
-}
-
static void c2_set_rxbufsize(struct c2_port *c2_port)
{
struct net_device *netdev = c2_port->netdev;
@@ -116,7 +111,8 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
struct c2_element *elem;
int i;
- tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
+ tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
+ GFP_KERNEL);
if (!tx_ring->start)
return -ENOMEM;
@@ -165,7 +161,8 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
struct c2_element *elem;
int i;
- rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
+ rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
+ GFP_KERNEL);
if (!rx_ring->start)
return -ENOMEM;
@@ -908,7 +905,8 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev,
/* Validate the MAC address */
if (!is_valid_ether_addr(netdev->dev_addr)) {
pr_debug("Invalid MAC Address\n");
- c2_print_macaddr(netdev);
+ pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
+ netdev->dev_addr, netdev->irq);
free_netdev(netdev);
return NULL;
}
@@ -1142,7 +1140,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
/* Print out the MAC address */
- c2_print_macaddr(netdev);
+ pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
+ netdev->irq);
ret = c2_rnic_init(c2dev);
if (ret) {
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
index d619d735838b..21b565a91fd6 100644
--- a/drivers/staging/rdma/amso1100/c2.h
+++ b/drivers/staging/rdma/amso1100/c2.h
@@ -476,72 +476,72 @@ static inline int c2_errno(void *reply)
}
/* Device */
-extern int c2_register_device(struct c2_dev *c2dev);
-extern void c2_unregister_device(struct c2_dev *c2dev);
-extern int c2_rnic_init(struct c2_dev *c2dev);
-extern void c2_rnic_term(struct c2_dev *c2dev);
-extern void c2_rnic_interrupt(struct c2_dev *c2dev);
-extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
+int c2_register_device(struct c2_dev *c2dev);
+void c2_unregister_device(struct c2_dev *c2dev);
+int c2_rnic_init(struct c2_dev *c2dev);
+void c2_rnic_term(struct c2_dev *c2dev);
+void c2_rnic_interrupt(struct c2_dev *c2dev);
+int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
+int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
/* QPs */
-extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
+int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
+void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
+struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
+int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
struct ib_qp_attr *attr, int attr_mask);
-extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
+int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
int ord, int ird);
-extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct ib_send_wr **bad_wr);
-extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
+int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
struct ib_recv_wr **bad_wr);
-extern void c2_init_qp_table(struct c2_dev *c2dev);
-extern void c2_cleanup_qp_table(struct c2_dev *c2dev);
-extern void c2_set_qp_state(struct c2_qp *, int);
-extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
+void c2_init_qp_table(struct c2_dev *c2dev);
+void c2_cleanup_qp_table(struct c2_dev *c2dev);
+void c2_set_qp_state(struct c2_qp *, int);
+struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
/* PDs */
-extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-extern int c2_init_pd_table(struct c2_dev *c2dev);
-extern void c2_cleanup_pd_table(struct c2_dev *c2dev);
+int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
+void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
+int c2_init_pd_table(struct c2_dev *c2dev);
+void c2_cleanup_pd_table(struct c2_dev *c2dev);
/* CQs */
-extern int c2_init_cq(struct c2_dev *c2dev, int entries,
+int c2_init_cq(struct c2_dev *c2dev, int entries,
struct c2_ucontext *ctx, struct c2_cq *cq);
-extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
+void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
+void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
+int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/* CM */
-extern int c2_llp_connect(struct iw_cm_id *cm_id,
+int c2_llp_connect(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *iw_param);
-extern int c2_llp_accept(struct iw_cm_id *cm_id,
+int c2_llp_accept(struct iw_cm_id *cm_id,
struct iw_cm_conn_param *iw_param);
-extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
+int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
u8 pdata_len);
-extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
+int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
+int c2_llp_service_destroy(struct iw_cm_id *cm_id);
/* MM */
-extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
+int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
int page_size, int pbl_depth, u32 length,
u32 off, u64 *va, enum c2_acf acf,
struct c2_mr *mr);
-extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
+int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
/* AE */
-extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
+void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
/* MQSP Allocator */
-extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
+int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
struct sp_chunk **root);
-extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
+void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
+__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
dma_addr_t *dma_addr, gfp_t gfp_mask);
-extern void c2_free_mqsp(__be16* mqsp);
+void c2_free_mqsp(__be16* mqsp);
#endif
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
index 3ef881f2da0f..7ad0c082485a 100644
--- a/drivers/staging/rdma/amso1100/c2_cq.c
+++ b/drivers/staging/rdma/amso1100/c2_cq.c
@@ -173,9 +173,6 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
case C2_WR_TYPE_RDMA_READ:
entry->opcode = IB_WC_RDMA_READ;
break;
- case C2_WR_TYPE_BIND_MW:
- entry->opcode = IB_WC_BIND_MW;
- break;
case C2_WR_TYPE_RECV:
entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
entry->opcode = IB_WC_RECV;
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
index fc1b9a7cec4b..8e1b4d13409e 100644
--- a/drivers/staging/rdma/amso1100/c2_mq.h
+++ b/drivers/staging/rdma/amso1100/c2_mq.h
@@ -93,14 +93,14 @@ static __inline__ int c2_mq_full(struct c2_mq *q)
return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
}
-extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-extern void *c2_mq_alloc(struct c2_mq *q);
-extern void c2_mq_produce(struct c2_mq *q);
-extern void *c2_mq_consume(struct c2_mq *q);
-extern void c2_mq_free(struct c2_mq *q);
-extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
+void *c2_mq_alloc(struct c2_mq *q);
+void c2_mq_produce(struct c2_mq *q);
+void *c2_mq_consume(struct c2_mq *q);
+void c2_mq_free(struct c2_mq *q);
+void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
+void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
u8 *pool_start, u16 __iomem *peer, u32 type);
#endif /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
index c707e45887c2..de8d10e1bde3 100644
--- a/drivers/staging/rdma/amso1100/c2_provider.c
+++ b/drivers/staging/rdma/amso1100/c2_provider.c
@@ -337,43 +337,21 @@ static inline u32 c2_convert_access(int acc)
C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
}
-static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 * iova_start)
+static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
{
struct c2_mr *mr;
u64 *page_list;
- u32 total_len;
- int err, i, j, k, page_shift, pbl_depth;
+ const u32 total_len = 0xffffffff; /* AMSO1100 limit */
+ int err, page_shift, pbl_depth, i;
+ u64 kva = 0;
- pbl_depth = 0;
- total_len = 0;
+ pr_debug("%s:%u\n", __func__, __LINE__);
- page_shift = PAGE_SHIFT;
/*
- * If there is only 1 buffer we assume this could
- * be a map of all phy mem...use a 32k page_shift.
+ * This is a map of all phy mem...use a 32k page_shift.
*/
- if (num_phys_buf == 1)
- page_shift += 3;
-
- for (i = 0; i < num_phys_buf; i++) {
-
- if (offset_in_page(buffer_list[i].addr)) {
- pr_debug("Unaligned Memory Buffer: 0x%x\n",
- (unsigned int) buffer_list[i].addr);
- return ERR_PTR(-EINVAL);
- }
-
- if (!buffer_list[i].size) {
- pr_debug("Invalid Buffer Size\n");
- return ERR_PTR(-EINVAL);
- }
-
- total_len += buffer_list[i].size;
- pbl_depth += ALIGN(buffer_list[i].size,
- BIT(page_shift)) >> page_shift;
- }
+ page_shift = PAGE_SHIFT + 3;
+ pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
page_list = vmalloc(sizeof(u64) * pbl_depth);
if (!page_list) {
@@ -382,16 +360,8 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return ERR_PTR(-ENOMEM);
}
- for (i = 0, j = 0; i < num_phys_buf; i++) {
-
- int naddrs;
-
- naddrs = ALIGN(buffer_list[i].size,
- BIT(page_shift)) >> page_shift;
- for (k = 0; k < naddrs; k++)
- page_list[j++] = (buffer_list[i].addr +
- (k << page_shift));
- }
+ for (i = 0; i < pbl_depth; i++)
+ page_list[i] = (i << page_shift);
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
@@ -399,17 +369,17 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return ERR_PTR(-ENOMEM);
}
- mr->pd = to_c2pd(ib_pd);
+ mr->pd = to_c2pd(pd);
mr->umem = NULL;
pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
"*iova_start %llx, first pa %llx, last pa %llx\n",
__func__, page_shift, pbl_depth, total_len,
- (unsigned long long) *iova_start,
+ (unsigned long long) kva,
(unsigned long long) page_list[0],
(unsigned long long) page_list[pbl_depth-1]);
- err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
+ err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
BIT(page_shift), pbl_depth,
- total_len, 0, iova_start,
+ total_len, 0, &kva,
c2_convert_access(acc), mr);
vfree(page_list);
if (err) {
@@ -420,19 +390,6 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return &mr->ibmr;
}
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct ib_phys_buf bl;
- u64 kva = 0;
-
- pr_debug("%s:%u\n", __func__, __LINE__);
-
- /* AMSO1100 limit */
- bl.size = 0xffffffff;
- bl.addr = 0;
- return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
-}
-
static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
@@ -620,12 +577,9 @@ static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err;
-
pr_debug("%s:%u\n", __func__, __LINE__);
- err = c2_llp_reject(cm_id, pdata, pdata_len);
- return err;
+ return c2_llp_reject(cm_id, pdata, pdata_len);
}
static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
@@ -642,12 +596,9 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
static int c2_service_destroy(struct iw_cm_id *cm_id)
{
- int err;
pr_debug("%s:%u\n", __func__, __LINE__);
- err = c2_llp_service_destroy(cm_id);
-
- return err;
+ return c2_llp_service_destroy(cm_id);
}
static int c2_pseudo_up(struct net_device *netdev)
@@ -846,7 +797,6 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.destroy_cq = c2_destroy_cq;
dev->ibdev.poll_cq = c2_poll_cq;
dev->ibdev.get_dma_mr = c2_get_dma_mr;
- dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
dev->ibdev.reg_user_mr = c2_reg_user_mr;
dev->ibdev.dereg_mr = c2_dereg_mr;
dev->ibdev.get_port_immutable = c2_port_immutable;
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
index d3c0f77767d9..5e65c6d07ca4 100644
--- a/drivers/staging/rdma/amso1100/c2_rnic.c
+++ b/drivers/staging/rdma/amso1100/c2_rnic.c
@@ -81,7 +81,6 @@
static int c2_adapter_init(struct c2_dev *c2dev)
{
struct c2wr_init_req wr;
- int err;
memset(&wr, 0, sizeof(wr));
c2_wr_set_id(&wr, CCWR_INIT);
@@ -94,9 +93,7 @@ static int c2_adapter_init(struct c2_dev *c2dev)
wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
/* Post the init message */
- err = vq_send_wr(c2dev, (union c2wr *) & wr);
-
- return err;
+ return vq_send_wr(c2dev, (union c2wr *) & wr);
}
/*
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
index 33805627a607..c1f6cef60213 100644
--- a/drivers/staging/rdma/amso1100/c2_vq.h
+++ b/drivers/staging/rdma/amso1100/c2_vq.h
@@ -47,17 +47,17 @@ struct c2_vq_req {
struct c2_qp *qp;
};
-extern int vq_init(struct c2_dev *c2dev);
-extern void vq_term(struct c2_dev *c2dev);
+int vq_init(struct c2_dev *c2dev);
+void vq_term(struct c2_dev *c2dev);
-extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
+struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
+void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
+void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
+void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
+int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-extern void *vq_repbuf_alloc(struct c2_dev *c2dev);
-extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
+void *vq_repbuf_alloc(struct c2_dev *c2dev);
+void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
+int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
#endif /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
index 465926319f3d..94e088c2d989 100644
--- a/drivers/staging/rdma/ehca/ehca_av.c
+++ b/drivers/staging/rdma/ehca/ehca_av.c
@@ -105,6 +105,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
if (ehca_static_rate < 0) {
u32 ipd;
+
if (ehca_calc_ipd(shca, ah_attr->port_num,
ah_attr->static_rate, &ipd)) {
ret = -EINVAL;
@@ -128,6 +129,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
+
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(pd->device, ah_attr->port_num,
&port_attr);
@@ -192,6 +194,7 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
+
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(ah->device, ah_attr->port_num,
&port_attr);
@@ -272,6 +275,5 @@ int ehca_init_av_cache(void)
void ehca_cleanup_av_cache(void)
{
- if (av_cache)
- kmem_cache_destroy(av_cache);
+ kmem_cache_destroy(av_cache);
}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
index bd45e0f3923f..e8c3387d7aaa 100644
--- a/drivers/staging/rdma/ehca/ehca_classes.h
+++ b/drivers/staging/rdma/ehca/ehca_classes.h
@@ -316,9 +316,8 @@ struct ehca_mr_pginfo {
union {
struct { /* type EHCA_MR_PGI_PHYS section */
- int num_phys_buf;
- struct ib_phys_buf *phys_buf_array;
- u64 next_buf;
+ u64 addr;
+ u16 size;
} phy;
struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region;
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
index ea1b5c1203b4..1aa7931fe860 100644
--- a/drivers/staging/rdma/ehca/ehca_cq.c
+++ b/drivers/staging/rdma/ehca/ehca_cq.c
@@ -393,6 +393,5 @@ int ehca_init_cq_cache(void)
void ehca_cleanup_cq_cache(void)
{
- if (cq_cache)
- kmem_cache_destroy(cq_cache);
+ kmem_cache_destroy(cq_cache);
}
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
index 80e6a3d5df3e..cca5933fcda6 100644
--- a/drivers/staging/rdma/ehca/ehca_iverbs.h
+++ b/drivers/staging/rdma/ehca/ehca_iverbs.h
@@ -80,30 +80,14 @@ int ehca_destroy_ah(struct ib_ah *ah);
struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags, u64 *iova_start);
-
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int mr_access_flags,
struct ib_udata *udata);
-int ehca_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf, int mr_access_flags, u64 *iova_start);
-
-int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
-
int ehca_dereg_mr(struct ib_mr *mr);
struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
- struct ib_mw_bind *mw_bind);
-
int ehca_dealloc_mw(struct ib_mw *mw);
struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
index 8246418cd4e0..832f22f40862 100644
--- a/drivers/staging/rdma/ehca/ehca_main.c
+++ b/drivers/staging/rdma/ehca/ehca_main.c
@@ -245,8 +245,7 @@ static void ehca_destroy_slab_caches(void)
ehca_cleanup_cq_cache();
ehca_cleanup_pd_cache();
#ifdef CONFIG_PPC_64K_PAGES
- if (ctblk_cache)
- kmem_cache_destroy(ctblk_cache);
+ kmem_cache_destroy(ctblk_cache);
#endif
}
@@ -512,13 +511,9 @@ static int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.req_notify_cq = ehca_req_notify_cq;
/* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
shca->ib_device.get_dma_mr = ehca_get_dma_mr;
- shca->ib_device.reg_phys_mr = ehca_reg_phys_mr;
shca->ib_device.reg_user_mr = ehca_reg_user_mr;
- shca->ib_device.query_mr = ehca_query_mr;
shca->ib_device.dereg_mr = ehca_dereg_mr;
- shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr;
shca->ib_device.alloc_mw = ehca_alloc_mw;
- shca->ib_device.bind_mw = ehca_bind_mw;
shca->ib_device.dealloc_mw = ehca_dealloc_mw;
shca->ib_device.alloc_fmr = ehca_alloc_fmr;
shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
index f914b30999f8..3367205e3160 100644
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.c
@@ -196,120 +196,6 @@ get_dma_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- struct ib_mr *ib_mr;
- int ret;
- struct ehca_mr *e_mr;
- struct ehca_shca *shca =
- container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-
- u64 size;
-
- if ((num_phys_buf <= 0) || !phys_buf_array) {
- ehca_err(pd->device, "bad input values: num_phys_buf=%x "
- "phys_buf_array=%p", num_phys_buf, phys_buf_array);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
- if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(pd->device, "bad input values: mr_access_flags=%x",
- mr_access_flags);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
-
- /* check physical buffer list and calculate size */
- ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
- iova_start, &size);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit0;
- }
- if ((size == 0) ||
- (((u64)iova_start + size) < (u64)iova_start)) {
- ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
- size, iova_start);
- ib_mr = ERR_PTR(-EINVAL);
- goto reg_phys_mr_exit0;
- }
-
- e_mr = ehca_mr_new();
- if (!e_mr) {
- ehca_err(pd->device, "out of memory");
- ib_mr = ERR_PTR(-ENOMEM);
- goto reg_phys_mr_exit0;
- }
-
- /* register MR on HCA */
- if (ehca_mr_is_maxmr(size, iova_start)) {
- e_mr->flags |= EHCA_MR_FLAG_MAXMR;
- ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
- e_pd, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit1;
- }
- } else {
- struct ehca_mr_pginfo pginfo;
- u32 num_kpages;
- u32 num_hwpages;
- u64 hw_pgsize;
-
- num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
- PAGE_SIZE);
- /* for kernel space we try most possible pgsize */
- hw_pgsize = ehca_get_max_hwpage_size(shca);
- num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
- hw_pgsize);
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.phy.num_phys_buf = num_phys_buf;
- pginfo.u.phy.phys_buf_array = phys_buf_array;
- pginfo.next_hwpage =
- ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
-
- ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
- e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
- if (ret) {
- ib_mr = ERR_PTR(ret);
- goto reg_phys_mr_exit1;
- }
- }
-
- /* successful registration of all pages */
- return &e_mr->ib.ib_mr;
-
-reg_phys_mr_exit1:
- ehca_mr_delete(e_mr);
-reg_phys_mr_exit0:
- if (IS_ERR(ib_mr))
- ehca_err(pd->device, "h_ret=%li pd=%p phys_buf_array=%p "
- "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
- PTR_ERR(ib_mr), pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- return ib_mr;
-} /* end ehca_reg_phys_mr() */
-
-/*----------------------------------------------------------------------*/
-
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int mr_access_flags,
struct ib_udata *udata)
@@ -437,207 +323,6 @@ reg_user_mr_exit0:
/*----------------------------------------------------------------------*/
-int ehca_rereg_phys_mr(struct ib_mr *mr,
- int mr_rereg_mask,
- struct ib_pd *pd,
- struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- int mr_access_flags,
- u64 *iova_start)
-{
- int ret;
-
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- u64 new_size;
- u64 *new_start;
- u32 new_acl;
- struct ehca_pd *new_pd;
- u32 tmp_lkey, tmp_rkey;
- unsigned long sl_flags;
- u32 num_kpages = 0;
- u32 num_hwpages = 0;
- struct ehca_mr_pginfo pginfo;
-
- if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
- /* TODO not supported, because PHYP rereg hCall needs pages */
- ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
- "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- if (mr_rereg_mask & IB_MR_REREG_PD) {
- if (!pd) {
- ehca_err(mr->device, "rereg with bad pd, pd=%p "
- "mr_rereg_mask=%x", pd, mr_rereg_mask);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- }
-
- if ((mr_rereg_mask &
- ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
- (mr_rereg_mask == 0)) {
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- /* check other parameters */
- if (e_mr == shca->maxmr) {
- /* should be impossible, however reject to be sure */
- ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
- "shca->maxmr=%p mr->lkey=%x",
- mr, shca->maxmr, mr->lkey);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
- if (e_mr->flags & EHCA_MR_FLAG_FMR) {
- ehca_err(mr->device, "not supported for FMR, mr=%p "
- "flags=%x", mr, e_mr->flags);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- if (!phys_buf_array || num_phys_buf <= 0) {
- ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
- " phys_buf_array=%p num_phys_buf=%x",
- mr_rereg_mask, phys_buf_array, num_phys_buf);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
- }
- if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
- (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
- ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
- /*
- * Remote Write Access requires Local Write Access
- * Remote Atomic Access requires Local Write Access
- */
- ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
- "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
- ret = -EINVAL;
- goto rereg_phys_mr_exit0;
- }
-
- /* set requested values dependent on rereg request */
- spin_lock_irqsave(&e_mr->mrlock, sl_flags);
- new_start = e_mr->start;
- new_size = e_mr->size;
- new_acl = e_mr->acl;
- new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
-
- if (mr_rereg_mask & IB_MR_REREG_TRANS) {
- u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
-
- new_start = iova_start; /* change address */
- /* check physical buffer list and calculate size */
- ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
- num_phys_buf, iova_start,
- &new_size);
- if (ret)
- goto rereg_phys_mr_exit1;
- if ((new_size == 0) ||
- (((u64)iova_start + new_size) < (u64)iova_start)) {
- ehca_err(mr->device, "bad input values: new_size=%llx "
- "iova_start=%p", new_size, iova_start);
- ret = -EINVAL;
- goto rereg_phys_mr_exit1;
- }
- num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
- new_size, PAGE_SIZE);
- num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
- new_size, hw_pgsize);
- memset(&pginfo, 0, sizeof(pginfo));
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_kpages = num_kpages;
- pginfo.hwpage_size = hw_pgsize;
- pginfo.num_hwpages = num_hwpages;
- pginfo.u.phy.num_phys_buf = num_phys_buf;
- pginfo.u.phy.phys_buf_array = phys_buf_array;
- pginfo.next_hwpage =
- ((u64)iova_start & ~PAGE_MASK) / hw_pgsize;
- }
- if (mr_rereg_mask & IB_MR_REREG_ACCESS)
- new_acl = mr_access_flags;
- if (mr_rereg_mask & IB_MR_REREG_PD)
- new_pd = container_of(pd, struct ehca_pd, ib_pd);
-
- ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
- new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
- if (ret)
- goto rereg_phys_mr_exit1;
-
- /* successful reregistration */
- if (mr_rereg_mask & IB_MR_REREG_PD)
- mr->pd = pd;
- mr->lkey = tmp_lkey;
- mr->rkey = tmp_rkey;
-
-rereg_phys_mr_exit1:
- spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
-rereg_phys_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p mr_rereg_mask=%x pd=%p "
- "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
- "iova_start=%p",
- ret, mr, mr_rereg_mask, pd, phys_buf_array,
- num_phys_buf, mr_access_flags, iova_start);
- return ret;
-} /* end ehca_rereg_phys_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
-{
- int ret = 0;
- u64 h_ret;
- struct ehca_shca *shca =
- container_of(mr->device, struct ehca_shca, ib_device);
- struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
- unsigned long sl_flags;
- struct ehca_mr_hipzout_parms hipzout;
-
- if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
- ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
- "e_mr->flags=%x", mr, e_mr, e_mr->flags);
- ret = -EINVAL;
- goto query_mr_exit0;
- }
-
- memset(mr_attr, 0, sizeof(struct ib_mr_attr));
- spin_lock_irqsave(&e_mr->mrlock, sl_flags);
-
- h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
- if (h_ret != H_SUCCESS) {
- ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
- "hca_hndl=%llx mr_hndl=%llx lkey=%x",
- h_ret, mr, shca->ipz_hca_handle.handle,
- e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca2ib_return_code(h_ret);
- goto query_mr_exit1;
- }
- mr_attr->pd = mr->pd;
- mr_attr->device_virt_addr = hipzout.vaddr;
- mr_attr->size = hipzout.len;
- mr_attr->lkey = hipzout.lkey;
- mr_attr->rkey = hipzout.rkey;
- ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
-
-query_mr_exit1:
- spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
-query_mr_exit0:
- if (ret)
- ehca_err(mr->device, "ret=%i mr=%p mr_attr=%p",
- ret, mr, mr_attr);
- return ret;
-} /* end ehca_query_mr() */
-
-/*----------------------------------------------------------------------*/
-
int ehca_dereg_mr(struct ib_mr *mr)
{
int ret = 0;
@@ -728,18 +413,6 @@ alloc_mw_exit0:
/*----------------------------------------------------------------------*/
-int ehca_bind_mw(struct ib_qp *qp,
- struct ib_mw *mw,
- struct ib_mw_bind *mw_bind)
-{
- /* TODO: not supported up to now */
- ehca_gen_err("bind MW currently not supported by HCAD");
-
- return -EPERM;
-} /* end ehca_bind_mw() */
-
-/*----------------------------------------------------------------------*/
-
int ehca_dealloc_mw(struct ib_mw *mw)
{
u64 h_ret;
@@ -1616,7 +1289,6 @@ int ehca_reg_internal_maxmr(
u64 *iova_start;
u64 size_maxmr;
struct ehca_mr_pginfo pginfo;
- struct ib_phys_buf ib_pbuf;
u32 num_kpages;
u32 num_hwpages;
u64 hw_pgsize;
@@ -1637,8 +1309,6 @@ int ehca_reg_internal_maxmr(
/* register internal max-MR on HCA */
size_maxmr = ehca_mr_len;
iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
- ib_pbuf.addr = 0;
- ib_pbuf.size = size_maxmr;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
PAGE_SIZE);
hw_pgsize = ehca_get_max_hwpage_size(shca);
@@ -1650,8 +1320,8 @@ int ehca_reg_internal_maxmr(
pginfo.num_kpages = num_kpages;
pginfo.num_hwpages = num_hwpages;
pginfo.hwpage_size = hw_pgsize;
- pginfo.u.phy.num_phys_buf = 1;
- pginfo.u.phy.phys_buf_array = &ib_pbuf;
+ pginfo.u.phy.addr = 0;
+ pginfo.u.phy.size = size_maxmr;
ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
&pginfo, &e_mr->ib.ib_mr.lkey,
@@ -1669,7 +1339,6 @@ int ehca_reg_internal_maxmr(
e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
e_mr->ib.ib_mr.uobject = NULL;
atomic_inc(&(e_pd->ib_pd.usecnt));
- atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
*e_maxmr = e_mr;
return 0;
@@ -1762,61 +1431,6 @@ ehca_dereg_internal_maxmr_exit0:
/*----------------------------------------------------------------------*/
-/*
- * check physical buffer array of MR verbs for validness and
- * calculates MR size
- */
-int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- u64 *iova_start,
- u64 *size)
-{
- struct ib_phys_buf *pbuf = phys_buf_array;
- u64 size_count = 0;
- u32 i;
-
- if (num_phys_buf == 0) {
- ehca_gen_err("bad phys buf array len, num_phys_buf=0");
- return -EINVAL;
- }
- /* check first buffer */
- if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
- ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
- "pbuf->addr=%llx pbuf->size=%llx",
- iova_start, pbuf->addr, pbuf->size);
- return -EINVAL;
- }
- if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
- (num_phys_buf > 1)) {
- ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
- "pbuf->size=%llx", pbuf->addr, pbuf->size);
- return -EINVAL;
- }
-
- for (i = 0; i < num_phys_buf; i++) {
- if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
- ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
- "pbuf->size=%llx",
- i, pbuf->addr, pbuf->size);
- return -EINVAL;
- }
- if (((i > 0) && /* not 1st */
- (i < (num_phys_buf - 1)) && /* not last */
- (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
- ehca_gen_err("bad size, i=%x pbuf->size=%llx",
- i, pbuf->size);
- return -EINVAL;
- }
- size_count += pbuf->size;
- pbuf++;
- }
-
- *size = size_count;
- return 0;
-} /* end ehca_mr_chk_buf_and_calc_size() */
-
-/*----------------------------------------------------------------------*/
-
/* check page list of map FMR verb for validness */
int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
u64 *page_list,
@@ -2002,57 +1616,54 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
u32 number, u64 *kpage)
{
int ret = 0;
- struct ib_phys_buf *pbuf;
+ u64 addr = pginfo->u.phy.addr;
+ u64 size = pginfo->u.phy.size;
u64 num_hw, offs_hw;
u32 i = 0;
- /* loop over desired phys_buf_array entries */
- while (i < number) {
- pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
- num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
- pbuf->size, pginfo->hwpage_size);
- offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
- pginfo->hwpage_size;
- while (pginfo->next_hwpage < offs_hw + num_hw) {
- /* sanity check */
- if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
- (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
- ehca_gen_err("kpage_cnt >= num_kpages, "
- "kpage_cnt=%llx num_kpages=%llx "
- "hwpage_cnt=%llx "
- "num_hwpages=%llx i=%x",
- pginfo->kpage_cnt,
- pginfo->num_kpages,
- pginfo->hwpage_cnt,
- pginfo->num_hwpages, i);
- return -EFAULT;
- }
- *kpage = (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
- (pginfo->next_hwpage * pginfo->hwpage_size);
- if ( !(*kpage) && pbuf->addr ) {
- ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
- "next_hwpage=%llx", pbuf->addr,
- pbuf->size, pginfo->next_hwpage);
- return -EFAULT;
- }
- (pginfo->hwpage_cnt)++;
- (pginfo->next_hwpage)++;
- if (PAGE_SIZE >= pginfo->hwpage_size) {
- if (pginfo->next_hwpage %
- (PAGE_SIZE / pginfo->hwpage_size) == 0)
- (pginfo->kpage_cnt)++;
- } else
- pginfo->kpage_cnt += pginfo->hwpage_size /
- PAGE_SIZE;
- kpage++;
- i++;
- if (i >= number) break;
+ num_hw = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
+ pginfo->hwpage_size);
+ offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
+
+ while (pginfo->next_hwpage < offs_hw + num_hw) {
+ /* sanity check */
+ if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
+ (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
+ ehca_gen_err("kpage_cnt >= num_kpages, "
+ "kpage_cnt=%llx num_kpages=%llx "
+ "hwpage_cnt=%llx "
+ "num_hwpages=%llx i=%x",
+ pginfo->kpage_cnt,
+ pginfo->num_kpages,
+ pginfo->hwpage_cnt,
+ pginfo->num_hwpages, i);
+ return -EFAULT;
}
- if (pginfo->next_hwpage >= offs_hw + num_hw) {
- (pginfo->u.phy.next_buf)++;
- pginfo->next_hwpage = 0;
+ *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
+ (pginfo->next_hwpage * pginfo->hwpage_size);
+ if ( !(*kpage) && addr ) {
+ ehca_gen_err("addr=%llx size=%llx "
+ "next_hwpage=%llx", addr,
+ size, pginfo->next_hwpage);
+ return -EFAULT;
}
+ (pginfo->hwpage_cnt)++;
+ (pginfo->next_hwpage)++;
+ if (PAGE_SIZE >= pginfo->hwpage_size) {
+ if (pginfo->next_hwpage %
+ (PAGE_SIZE / pginfo->hwpage_size) == 0)
+ (pginfo->kpage_cnt)++;
+ } else
+ pginfo->kpage_cnt += pginfo->hwpage_size /
+ PAGE_SIZE;
+ kpage++;
+ i++;
+ if (i >= number) break;
+ }
+ if (pginfo->next_hwpage >= offs_hw + num_hw) {
+ pginfo->next_hwpage = 0;
}
+
return ret;
}
@@ -2251,10 +1862,8 @@ int ehca_init_mrmw_cache(void)
void ehca_cleanup_mrmw_cache(void)
{
- if (mr_cache)
- kmem_cache_destroy(mr_cache);
- if (mw_cache)
- kmem_cache_destroy(mw_cache);
+ kmem_cache_destroy(mr_cache);
+ kmem_cache_destroy(mw_cache);
}
static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
index 50d8b51306dd..52bfa95697f7 100644
--- a/drivers/staging/rdma/ehca/ehca_mrmw.h
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.h
@@ -98,11 +98,6 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
-int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
- int num_phys_buf,
- u64 *iova_start,
- u64 *size);
-
int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
u64 *page_list,
int list_len);
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
index 351577a6670a..2a8aae411941 100644
--- a/drivers/staging/rdma/ehca/ehca_pd.c
+++ b/drivers/staging/rdma/ehca/ehca_pd.c
@@ -119,6 +119,5 @@ int ehca_init_pd_cache(void)
void ehca_cleanup_pd_cache(void)
{
- if (pd_cache)
- kmem_cache_destroy(pd_cache);
+ kmem_cache_destroy(pd_cache);
}
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
index 2e89356c46fa..896c01f810f6 100644
--- a/drivers/staging/rdma/ehca/ehca_qp.c
+++ b/drivers/staging/rdma/ehca/ehca_qp.c
@@ -2252,6 +2252,5 @@ int ehca_init_qp_cache(void)
void ehca_cleanup_qp_cache(void)
{
- if (qp_cache)
- kmem_cache_destroy(qp_cache);
+ kmem_cache_destroy(qp_cache);
}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
index 10e2074384f5..11813b880e16 100644
--- a/drivers/staging/rdma/ehca/ehca_reqs.c
+++ b/drivers/staging/rdma/ehca/ehca_reqs.c
@@ -614,7 +614,6 @@ int ehca_post_srq_recv(struct ib_srq *srq,
static const u8 ib_wc_opcode[255] = {
[0x01] = IB_WC_RECV+1,
[0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
- [0x04] = IB_WC_BIND_MW+1,
[0x08] = IB_WC_FETCH_ADD+1,
[0x10] = IB_WC_COMP_SWAP+1,
[0x20] = IB_WC_RDMA_WRITE+1,
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile
index 2e5daa6cdcc2..68c5a315e557 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/staging/rdma/hfi1/Makefile
@@ -7,7 +7,7 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o eprom.o file_ops.o firmware.o \
+hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o efivar.o eprom.o file_ops.o firmware.o \
init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \
qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
uc.o ud.o user_pages.o user_sdma.o verbs_mcast.o verbs.o
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index e48981994b10..bbe5ad85cec0 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -63,6 +63,7 @@
#include "pio.h"
#include "sdma.h"
#include "eprom.h"
+#include "efivar.h"
#define NUM_IB_PORTS 1
@@ -121,8 +122,8 @@ struct flag_table {
#define SEC_SC_HALTED 0x4 /* per-context only */
#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
-#define VL15CTXT 1
#define MIN_KERNEL_KCTXTS 2
+#define FIRST_KERNEL_KCTXT 1
#define NUM_MAP_REGS 32
/* Bit offset into the GUID which carries HFI id information */
@@ -663,7 +664,7 @@ static struct flag_table egress_err_info_flags[] = {
*/
#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
static struct flag_table send_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", SES(CSR_PARITY)),
+/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
};
@@ -1417,6 +1418,17 @@ static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
}
+static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
+}
+
static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
{
@@ -1538,6 +1550,2336 @@ static u64 access_sw_send_schedule(const struct cntr_entry *entry,
return dd->verbs_dev.n_send_schedule;
}
+/* Software counters for the error status bits within MISC_ERR_STATUS */
+static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[12];
+}
+
+static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[11];
+}
+
+static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[10];
+}
+
+static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[9];
+}
+
+static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[8];
+}
+
+static u64 access_misc_efuse_read_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[7];
+}
+
+static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[6];
+}
+
+static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[5];
+}
+
+static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[4];
+}
+
+static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[3];
+}
+
+static u64 access_misc_csr_write_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[2];
+}
+
+static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[1];
+}
+
+static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->misc_err_status_cnt[0];
+}
+
+/*
+ * Software counter for the aggregate of
+ * individual CceErrStatus counters
+ */
+static u64 access_sw_cce_err_status_aggregated_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_cce_err_status_aggregate;
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within CceErrStatus
+ */
+static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[40];
+}
+
+static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[39];
+}
+
+static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[38];
+}
+
+static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[37];
+}
+
+static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[36];
+}
+
+static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[35];
+}
+
+static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[34];
+}
+
+static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[33];
+}
+
+static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[32];
+}
+
+static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[31];
+}
+
+static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[30];
+}
+
+static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[29];
+}
+
+static u64 access_pcic_transmit_back_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[28];
+}
+
+static u64 access_pcic_transmit_front_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[27];
+}
+
+static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[26];
+}
+
+static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[25];
+}
+
+static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[24];
+}
+
+static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[23];
+}
+
+static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[22];
+}
+
+static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[21];
+}
+
+static u64 access_pcic_n_post_dat_q_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[20];
+}
+
+static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[19];
+}
+
+static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[18];
+}
+
+static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[17];
+}
+
+static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[16];
+}
+
+static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[15];
+}
+
+static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[14];
+}
+
+static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[13];
+}
+
+static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[12];
+}
+
+static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[11];
+}
+
+static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[10];
+}
+
+static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[9];
+}
+
+static u64 access_cce_cli2_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[8];
+}
+
+static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[7];
+}
+
+static u64 access_cce_cli0_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[6];
+}
+
+static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[5];
+}
+
+static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[4];
+}
+
+static u64 access_cce_trgt_async_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[3];
+}
+
+static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[2];
+}
+
+static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[1];
+}
+
+static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->cce_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within RcvErrStatus
+ */
+static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[63];
+}
+
+static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[62];
+}
+
+static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[61];
+}
+
+static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[60];
+}
+
+static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[59];
+}
+
+static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[58];
+}
+
+static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[57];
+}
+
+static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[56];
+}
+
+static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[55];
+}
+
+static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[54];
+}
+
+static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[53];
+}
+
+static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[52];
+}
+
+static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[51];
+}
+
+static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[50];
+}
+
+static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[49];
+}
+
+static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[48];
+}
+
+static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[47];
+}
+
+static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[46];
+}
+
+static u64 access_rx_hq_intr_csr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[45];
+}
+
+static u64 access_rx_lookup_csr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[44];
+}
+
+static u64 access_rx_lookup_rcv_array_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[43];
+}
+
+static u64 access_rx_lookup_rcv_array_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[42];
+}
+
+static u64 access_rx_lookup_des_part2_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[41];
+}
+
+static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[40];
+}
+
+static u64 access_rx_lookup_des_part1_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[39];
+}
+
+static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[38];
+}
+
+static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[37];
+}
+
+static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[36];
+}
+
+static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[35];
+}
+
+static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[34];
+}
+
+static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[33];
+}
+
+static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[32];
+}
+
+static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[31];
+}
+
+static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[30];
+}
+
+static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[29];
+}
+
+static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[28];
+}
+
+static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[27];
+}
+
+static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[26];
+}
+
+static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[25];
+}
+
+static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[24];
+}
+
+static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[23];
+}
+
+static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[22];
+}
+
+static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[21];
+}
+
+static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[20];
+}
+
+static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[19];
+}
+
+static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[18];
+}
+
+static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[17];
+}
+
+static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[16];
+}
+
+static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[15];
+}
+
+static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[14];
+}
+
+static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[13];
+}
+
+static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[12];
+}
+
+static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[11];
+}
+
+static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[10];
+}
+
+static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[9];
+}
+
+static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[8];
+}
+
+static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[7];
+}
+
+static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[6];
+}
+
+static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[5];
+}
+
+static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[4];
+}
+
+static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[3];
+}
+
+static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[2];
+}
+
+static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[1];
+}
+
+static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->rcv_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendPioErrStatus
+ */
+static u64 access_pio_pec_sop_head_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[35];
+}
+
+static u64 access_pio_pcc_sop_head_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[34];
+}
+
+static u64 access_pio_last_returned_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[33];
+}
+
+static u64 access_pio_current_free_cnt_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[32];
+}
+
+static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[31];
+}
+
+static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[30];
+}
+
+static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[29];
+}
+
+static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[28];
+}
+
+static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[27];
+}
+
+static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[26];
+}
+
+static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[25];
+}
+
+static u64 access_pio_block_qw_count_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[24];
+}
+
+static u64 access_pio_write_qw_valid_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[23];
+}
+
+static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[22];
+}
+
+static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[21];
+}
+
+static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[20];
+}
+
+static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[19];
+}
+
+static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[18];
+}
+
+static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[17];
+}
+
+static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[16];
+}
+
+static u64 access_pio_credit_ret_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[15];
+}
+
+static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[14];
+}
+
+static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[13];
+}
+
+static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[12];
+}
+
+static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[11];
+}
+
+static u64 access_pio_sm_pkt_reset_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[10];
+}
+
+static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[9];
+}
+
+static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[8];
+}
+
+static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[7];
+}
+
+static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[6];
+}
+
+static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[5];
+}
+
+static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[4];
+}
+
+static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[3];
+}
+
+static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[2];
+}
+
+static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[1];
+}
+
+static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_pio_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendDmaErrStatus
+ */
+static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[3];
+}
+
+static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[2];
+}
+
+static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[1];
+}
+
+static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_dma_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendEgressErrStatus
+ */
+static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[63];
+}
+
+static u64 access_tx_read_sdma_memory_csr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[62];
+}
+
+static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[61];
+}
+
+static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[60];
+}
+
+static u64 access_tx_read_sdma_memory_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[59];
+}
+
+static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[58];
+}
+
+static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[57];
+}
+
+static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[56];
+}
+
+static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[55];
+}
+
+static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[54];
+}
+
+static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[53];
+}
+
+static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[52];
+}
+
+static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[51];
+}
+
+static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[50];
+}
+
+static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[49];
+}
+
+static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[48];
+}
+
+static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[47];
+}
+
+static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[46];
+}
+
+static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[45];
+}
+
+static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[44];
+}
+
+static u64 access_tx_read_sdma_memory_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[43];
+}
+
+static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[42];
+}
+
+static u64 access_tx_credit_return_partiy_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[41];
+}
+
+static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[40];
+}
+
+static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[39];
+}
+
+static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[38];
+}
+
+static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[37];
+}
+
+static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[36];
+}
+
+static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[35];
+}
+
+static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[34];
+}
+
+static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[33];
+}
+
+static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[32];
+}
+
+static u64 access_tx_sdma15_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[31];
+}
+
+static u64 access_tx_sdma14_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[30];
+}
+
+static u64 access_tx_sdma13_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[29];
+}
+
+static u64 access_tx_sdma12_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[28];
+}
+
+static u64 access_tx_sdma11_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[27];
+}
+
+static u64 access_tx_sdma10_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[26];
+}
+
+static u64 access_tx_sdma9_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[25];
+}
+
+static u64 access_tx_sdma8_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[24];
+}
+
+static u64 access_tx_sdma7_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[23];
+}
+
+static u64 access_tx_sdma6_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[22];
+}
+
+static u64 access_tx_sdma5_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[21];
+}
+
+static u64 access_tx_sdma4_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[20];
+}
+
+static u64 access_tx_sdma3_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[19];
+}
+
+static u64 access_tx_sdma2_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[18];
+}
+
+static u64 access_tx_sdma1_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[17];
+}
+
+static u64 access_tx_sdma0_disallowed_packet_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[16];
+}
+
+static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[15];
+}
+
+static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[14];
+}
+
+static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[13];
+}
+
+static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[12];
+}
+
+static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[11];
+}
+
+static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[10];
+}
+
+static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[9];
+}
+
+static u64 access_tx_sdma_launch_intf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[8];
+}
+
+static u64 access_tx_pio_launch_intf_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[7];
+}
+
+static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[6];
+}
+
+static u64 access_tx_incorrect_link_state_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[5];
+}
+
+static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[4];
+}
+
+static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[3];
+}
+
+static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[2];
+}
+
+static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[1];
+}
+
+static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_egress_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendErrStatus
+ */
+static u64 access_send_csr_write_bad_addr_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[2];
+}
+
+static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[1];
+}
+
+static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->send_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendCtxtErrStatus
+ */
+static u64 access_pio_write_out_of_bounds_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[4];
+}
+
+static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[3];
+}
+
+static u64 access_pio_write_crosses_boundary_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[2];
+}
+
+static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[1];
+}
+
+static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_ctxt_err_status_cnt[0];
+}
+
+/*
+ * Software counters corresponding to each of the
+ * error status bits within SendDmaEngErrStatus
+ */
+static u64 access_sdma_header_request_fifo_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[23];
+}
+
+static u64 access_sdma_header_storage_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[22];
+}
+
+static u64 access_sdma_packet_tracking_cor_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[21];
+}
+
+static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[20];
+}
+
+static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[19];
+}
+
+static u64 access_sdma_header_request_fifo_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[18];
+}
+
+static u64 access_sdma_header_storage_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[17];
+}
+
+static u64 access_sdma_packet_tracking_unc_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[16];
+}
+
+static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[15];
+}
+
+static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[14];
+}
+
+static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[13];
+}
+
+static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[12];
+}
+
+static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[11];
+}
+
+static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[10];
+}
+
+static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[9];
+}
+
+static u64 access_sdma_packet_desc_overflow_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[8];
+}
+
+static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl,
+ int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[7];
+}
+
+static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[6];
+}
+
+static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[5];
+}
+
+static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[4];
+}
+
+static u64 access_sdma_tail_out_of_bounds_err_cnt(
+ const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[3];
+}
+
+static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[2];
+}
+
+static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[1];
+}
+
+static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->sw_send_dma_eng_err_status_cnt[0];
+}
+
#define def_access_sw_cpu(cntr) \
static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
void *context, int vl, int mode, u64 data) \
@@ -1587,8 +3929,6 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
RCV_TID_FLOW_GEN_MISMATCH_CNT,
CNTR_NORMAL),
-[C_RX_CTX_RHQS] = RXE32_DEV_CNTR_ELEM(RxCtxRHQS, RCV_CONTEXT_RHQ_STALL,
- CNTR_NORMAL),
[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
CNTR_NORMAL),
[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
@@ -1730,6 +4070,794 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_kmem_wait),
[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
access_sw_send_schedule),
+/* MISC_ERR_STATUS */
+[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_pll_lock_fail_err_cnt),
+[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_mbist_fail_err_cnt),
+[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_invalid_eep_cmd_err_cnt),
+[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_done_parity_err_cnt),
+[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_write_err_cnt),
+[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
+ 0, CNTR_NORMAL,
+ access_misc_efuse_read_bad_addr_err_cnt),
+[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_efuse_csr_parity_err_cnt),
+[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_fw_auth_failed_err_cnt),
+[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_key_mismatch_err_cnt),
+[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_sbus_write_failed_err_cnt),
+[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_write_bad_addr_err_cnt),
+[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_read_bad_addr_err_cnt),
+[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
+ CNTR_NORMAL,
+ access_misc_csr_parity_err_cnt),
+/* CceErrStatus */
+[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
+ CNTR_NORMAL,
+ access_sw_cce_err_status_aggregated_cnt),
+[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_csr_parity_err_cnt),
+[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_int_map_unc_err_cnt),
+[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_int_map_cor_err_cnt),
+[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_table_unc_err_cnt),
+[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_msix_table_cor_err_cnt),
+[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_rxdma_conv_fifo_parity_err_cnt),
+[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_rcpl_async_fifo_parity_err_cnt),
+[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_seg_write_bad_addr_err_cnt),
+[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_seg_read_bad_addr_err_cnt),
+[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
+ CNTR_NORMAL,
+ access_la_triggered_cnt),
+[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_trgt_cpl_timeout_err_cnt),
+[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_receive_parity_err_cnt),
+[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_transmit_back_parity_err_cnt),
+[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pcic_transmit_front_parity_err_cnt),
+[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_dat_q_unc_err_cnt),
+[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_hd_q_unc_err_cnt),
+[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_dat_q_unc_err_cnt),
+[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_hd_q_unc_err_cnt),
+[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_sot_mem_unc_err_cnt),
+[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_mem_unc_err),
+[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_n_post_dat_q_parity_err_cnt),
+[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_n_post_h_q_parity_err_cnt),
+[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_dat_q_cor_err_cnt),
+[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_cpl_hd_q_cor_err_cnt),
+[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_dat_q_cor_err_cnt),
+[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_post_hd_q_cor_err_cnt),
+[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_sot_mem_cor_err_cnt),
+[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pcic_retry_mem_cor_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoDbgParityError", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_dbg_parity_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoRxdmaParityError", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_rxdma_parity_err_cnt
+ ),
+[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
+[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
+ "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
+[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_cli2_async_fifo_parity_err_cnt),
+[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_cfg_bus_parity_err_cnt),
+[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_cli0_async_fifo_parity_err_cnt),
+[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_rspd_data_parity_err_cnt),
+[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_trgt_access_err_cnt),
+[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_cce_trgt_async_fifo_parity_err_cnt),
+[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_write_bad_addr_err_cnt),
+[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_cce_csr_read_bad_addr_err_cnt),
+[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_ccs_csr_parity_err_cnt),
+
+/* RcvErrStatus */
+[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_parity_err_cnt),
+[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_write_bad_addr_err_cnt),
+[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_csr_read_bad_addr_err_cnt),
+[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_unc_err_cnt),
+[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_dq_fsm_encoding_err_cnt),
+[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_eq_fsm_encoding_err_cnt),
+[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_parity_err_cnt),
+[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_data_cor_err_cnt),
+[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_data_unc_err_cnt),
+[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_data_fifo_rd_cor_err_cnt),
+[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_data_fifo_rd_unc_err_cnt),
+[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_hdr_fifo_rd_cor_err_cnt),
+[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_hdr_fifo_rd_unc_err_cnt),
+[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part2_cor_err_cnt),
+[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part2_unc_err_cnt),
+[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part1_cor_err_cnt),
+[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_desc_part1_unc_err_cnt),
+[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_hq_intr_fsm_err_cnt),
+[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_hq_intr_csr_parity_err_cnt),
+[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_csr_parity_err_cnt),
+[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_rcv_array_cor_err_cnt),
+[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_rcv_array_unc_err_cnt),
+[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_lookup_des_part2_parity_err_cnt),
+[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_lookup_des_part1_unc_cor_err_cnt),
+[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_lookup_des_part1_unc_err_cnt),
+[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_next_free_buf_cor_err_cnt),
+[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_next_free_buf_unc_err_cnt),
+[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufFlInitWrAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_fl_init_wr_addr_parity_err_cnt),
+[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_fl_initdone_parity_err_cnt),
+[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_fl_write_addr_parity_err_cnt),
+[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_fl_rd_addr_parity_err_cnt),
+[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_empty_err_cnt),
+[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_full_err_cnt),
+[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_bad_lookup_err_cnt),
+[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_ctx_id_parity_err_cnt),
+[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rbuf_csr_qeopdw_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQNumOfPktParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQTlPtrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
+[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
+ "RxRbufCsrQHeadBufNumParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
+[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_block_list_read_cor_err_cnt),
+[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_rx_rbuf_block_list_read_unc_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_cor_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_unc_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
+ "RxRbufLookupDesRegUncCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
+[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_lookup_des_reg_unc_err_cnt),
+[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_free_list_cor_err_cnt),
+[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rbuf_free_list_unc_err_cnt),
+[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_fsm_encoding_err_cnt),
+[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_flag_cor_err_cnt),
+[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_flag_unc_err_cnt),
+[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dc_sop_eop_parity_err_cnt),
+[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_csr_parity_err_cnt),
+[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_qp_map_table_cor_err_cnt),
+[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_qp_map_table_unc_err_cnt),
+[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_data_cor_err_cnt),
+[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_data_unc_err_cnt),
+[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_hdr_cor_err_cnt),
+[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_rcv_hdr_unc_err_cnt),
+[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dc_intf_parity_err_cnt),
+[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_rx_dma_csr_cor_err_cnt),
+/* SendPioErrStatus */
+[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pec_sop_head_parity_err_cnt),
+[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pcc_sop_head_parity_err_cnt),
+[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_pio_last_returned_cnt_parity_err_cnt),
+[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pio_current_free_cnt_parity_err_cnt),
+[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
+ CNTR_NORMAL,
+ access_pio_reserved_31_err_cnt),
+[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
+ CNTR_NORMAL,
+ access_pio_reserved_30_err_cnt),
+[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_sop_len_err_cnt),
+[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_bqc_mem_parity_err_cnt),
+[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vl_fifo_parity_err_cnt),
+[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vlf_sop_parity_err_cnt),
+[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_vlf_v1_len_parity_err_cnt),
+[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_block_qw_count_parity_err_cnt),
+[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_qw_valid_parity_err_cnt),
+[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_state_machine_err_cnt),
+[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_data_parity_err_cnt),
+[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_host_addr_mem_cor_err_cnt),
+[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_host_addr_mem_unc_err_cnt),
+[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
+[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_init_sm_in_err_cnt),
+[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_ppmc_pbl_fifo_err_cnt),
+[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
+ 0, CNTR_NORMAL,
+ access_pio_credit_ret_fifo_parity_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank1_cor_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank0_cor_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank1_unc_err_cnt),
+[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_v1_len_mem_bank0_unc_err_cnt),
+[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sm_pkt_reset_parity_err_cnt),
+[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pkt_evict_fifo_parity_err_cnt),
+[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
+ "PioSbrdctrlCrrelFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
+[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sbrdctl_crrel_parity_err_cnt),
+[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pec_fifo_parity_err_cnt),
+[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_pcc_fifo_parity_err_cnt),
+[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sb_mem_fifo1_err_cnt),
+[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
+ CNTR_NORMAL,
+ access_pio_sb_mem_fifo0_err_cnt),
+[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_csr_parity_err_cnt),
+[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_addr_parity_err_cnt),
+[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_bad_ctxt_err_cnt),
+/* SendDmaErrStatus */
+[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
+ 0, CNTR_NORMAL,
+ access_sdma_pcie_req_tracking_cor_err_cnt),
+[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_sdma_pcie_req_tracking_unc_err_cnt),
+[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_csr_parity_err_cnt),
+[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_rpy_tag_err_cnt),
+/* SendEgressErrStatus */
+[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_csr_unc_err_cnt),
+[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
+ 0, CNTR_NORMAL,
+ access_tx_read_sdma_memory_csr_err_cnt),
+[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifo_cor_err_cnt),
+[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_cor_err_cnt),
+[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_sdma_memory_cor_err_cnt),
+[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sb_hdr_cor_err_cnt),
+[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_overrun_err_cnt),
+[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo8_cor_err_cnt),
+[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo7_cor_err_cnt),
+[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo6_cor_err_cnt),
+[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo5_cor_err_cnt),
+[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo4_cor_err_cnt),
+[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo3_cor_err_cnt),
+[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo2_cor_err_cnt),
+[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo1_cor_err_cnt),
+[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_fifo0_cor_err_cnt),
+[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_return_vl_err_cnt),
+[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_hcrc_insertion_err_cnt),
+[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifo_unc_err_cnt),
+[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_pio_memory_unc_err_cnt),
+[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_read_sdma_memory_unc_err_cnt),
+[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sb_hdr_unc_err_cnt),
+[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_credit_return_partiy_err_cnt),
+[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo8_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo7_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo6_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo5_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo4_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo3_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo2_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo1_unc_or_parity_err_cnt),
+[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_launch_fifo0_unc_or_parity_err_cnt),
+[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma15_disallowed_packet_err_cnt),
+[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma14_disallowed_packet_err_cnt),
+[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma13_disallowed_packet_err_cnt),
+[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma12_disallowed_packet_err_cnt),
+[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma11_disallowed_packet_err_cnt),
+[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma10_disallowed_packet_err_cnt),
+[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma9_disallowed_packet_err_cnt),
+[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma8_disallowed_packet_err_cnt),
+[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma7_disallowed_packet_err_cnt),
+[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma6_disallowed_packet_err_cnt),
+[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma5_disallowed_packet_err_cnt),
+[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma4_disallowed_packet_err_cnt),
+[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma3_disallowed_packet_err_cnt),
+[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma2_disallowed_packet_err_cnt),
+[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma1_disallowed_packet_err_cnt),
+[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma0_disallowed_packet_err_cnt),
+[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_config_parity_err_cnt),
+[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sbrd_ctl_csr_parity_err_cnt),
+[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_launch_csr_parity_err_cnt),
+[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_illegal_vl_err_cnt),
+[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
+ "TxSbrdCtlStateMachineParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_sbrd_ctl_state_machine_parity_err_cnt),
+[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_10_err_cnt),
+[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_9_err_cnt),
+[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
+ 0, 0, CNTR_NORMAL,
+ access_tx_sdma_launch_intf_parity_err_cnt),
+[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pio_launch_intf_parity_err_cnt),
+[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_6_err_cnt),
+[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_incorrect_link_state_err_cnt),
+[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_linkdown_err_cnt),
+[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
+ "EgressFifoUnderrunOrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_egress_fifi_underrun_or_parity_err_cnt),
+[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
+ CNTR_NORMAL,
+ access_egress_reserved_2_err_cnt),
+[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pkt_integrity_mem_unc_err_cnt),
+[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_tx_pkt_integrity_mem_cor_err_cnt),
+/* SendErrStatus */
+[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_write_bad_addr_err_cnt),
+[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_read_bad_addr_err_cnt),
+[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
+ CNTR_NORMAL,
+ access_send_csr_parity_cnt),
+/* SendCtxtErrStatus */
+[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_out_of_bounds_err_cnt),
+[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_write_overflow_err_cnt),
+[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
+ 0, 0, CNTR_NORMAL,
+ access_pio_write_crosses_boundary_err_cnt),
+[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_disallowed_packet_err_cnt),
+[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
+ CNTR_NORMAL,
+ access_pio_inconsistent_sop_err_cnt),
+/* SendDmaEngErrStatus */
+[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
+ 0, 0, CNTR_NORMAL,
+ access_sdma_header_request_fifo_cor_err_cnt),
+[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_storage_cor_err_cnt),
+[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_tracking_cor_err_cnt),
+[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_assembly_cor_err_cnt),
+[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_desc_table_cor_err_cnt),
+[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
+ 0, 0, CNTR_NORMAL,
+ access_sdma_header_request_fifo_unc_err_cnt),
+[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_storage_unc_err_cnt),
+[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_tracking_unc_err_cnt),
+[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_assembly_unc_err_cnt),
+[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_desc_table_unc_err_cnt),
+[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_timeout_err_cnt),
+[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_length_err_cnt),
+[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_address_err_cnt),
+[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_header_select_err_cnt),
+[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_reserved_9_err_cnt),
+[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_packet_desc_overflow_err_cnt),
+[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_length_mismatch_err_cnt),
+[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_halt_err_cnt),
+[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_mem_read_err_cnt),
+[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_first_desc_err_cnt),
+[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_tail_out_of_bounds_err_cnt),
+[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_too_long_err_cnt),
+[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_gen_mismatch_err_cnt),
+[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
+ CNTR_NORMAL,
+ access_sdma_wrong_dw_err_cnt),
};
static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
@@ -1762,6 +4890,8 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
access_sw_link_dn_cnt),
[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
access_sw_link_up_cnt),
+[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
+ access_sw_unknown_frame_cnt),
[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
access_sw_xmit_discards),
[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
@@ -1873,13 +5003,6 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
/* ======================================================================== */
-/* return true if this is chip revision revision a0 */
-int is_a0(struct hfi1_devdata *dd)
-{
- return ((dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
- & CCE_REVISION_CHIP_REV_MINOR_MASK) == 0;
-}
-
/* return true if this is chip revision revision a */
int is_ax(struct hfi1_devdata *dd)
{
@@ -1895,7 +5018,7 @@ int is_bx(struct hfi1_devdata *dd)
u8 chip_rev_minor =
dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
& CCE_REVISION_CHIP_REV_MINOR_MASK;
- return !!(chip_rev_minor & 0x10);
+ return (chip_rev_minor & 0xF0) == 0x10;
}
/*
@@ -2182,6 +5305,7 @@ static char *send_err_status_string(char *buf, int buf_len, u64 flags)
static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
/*
* For most these errors, there is nothing that can be done except
@@ -2190,13 +5314,20 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd_dev_info(dd, "CCE Error: %s\n",
cce_err_status_string(buf, sizeof(buf), reg));
- if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK)
- && is_a0(dd)
- && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
+ if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
+ is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
/* this error requires a manual drop into SPC freeze mode */
/* then a fix up */
start_freeze_handling(dd->pport, FREEZE_SELF);
}
+
+ for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i)) {
+ incr_cntr64(&dd->cce_err_status_cnt[i]);
+ /* maintain a counter over all cce_err_status errors */
+ incr_cntr64(&dd->sw_cce_err_status_aggregate);
+ }
+ }
}
/*
@@ -2241,6 +5372,7 @@ static void free_rcverr(struct hfi1_devdata *dd)
static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
dd_dev_info(dd, "Receive Error: %s\n",
rxe_err_status_string(buf, sizeof(buf), reg));
@@ -2252,41 +5384,63 @@ static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* Freeze mode recovery is disabled for the errors
* in RXE_FREEZE_ABORT_MASK
*/
- if (is_a0(dd) && (reg & RXE_FREEZE_ABORT_MASK))
+ if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
flags = FREEZE_ABORT;
start_freeze_handling(dd->pport, flags);
}
+
+ for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->rcv_err_status_cnt[i]);
+ }
}
static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
dd_dev_info(dd, "Misc Error: %s",
misc_err_status_string(buf, sizeof(buf), reg));
+ for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->misc_err_status_cnt[i]);
+ }
}
static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
dd_dev_info(dd, "PIO Error: %s\n",
pio_err_status_string(buf, sizeof(buf), reg));
if (reg & ALL_PIO_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
+
+ for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_pio_err_status_cnt[i]);
+ }
}
static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
dd_dev_info(dd, "SDMA Error: %s\n",
sdma_err_status_string(buf, sizeof(buf), reg));
if (reg & ALL_SDMA_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
+
+ for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_dma_err_status_cnt[i]);
+ }
}
static void count_port_inactive(struct hfi1_devdata *dd)
@@ -2352,10 +5506,11 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
u64 reg_copy = reg, handled = 0;
char buf[96];
+ int i = 0;
if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
- if (is_a0(dd) && (reg &
+ if (is_ax(dd) && (reg &
SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
&& (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
start_freeze_handling(dd->pport, 0);
@@ -2383,15 +5538,25 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (reg)
dd_dev_info(dd, "Egress Error: %s\n",
egress_err_status_string(buf, sizeof(buf), reg));
+
+ for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_egress_err_status_cnt[i]);
+ }
}
static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
char buf[96];
+ int i = 0;
dd_dev_info(dd, "Send Error: %s\n",
send_err_status_string(buf, sizeof(buf), reg));
+ for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
+ if (reg & (1ull << i))
+ incr_cntr64(&dd->send_err_status_cnt[i]);
+ }
}
/*
@@ -2483,6 +5648,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
char flags[96];
u64 status;
u32 sw_index;
+ int i = 0;
sw_index = dd->hw_to_sw[hw_context];
if (sw_index >= dd->num_send_contexts) {
@@ -2516,12 +5682,23 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
*/
if (sc->type != SC_USER)
queue_work(dd->pport->hfi1_wq, &sc->halt_work);
+
+ /*
+ * Update the counters for the corresponding status bits.
+ * Note that these particular counters are aggregated over all
+ * 160 contexts.
+ */
+ for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
+ if (status & (1ull << i))
+ incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
+ }
}
static void handle_sdma_eng_err(struct hfi1_devdata *dd,
unsigned int source, u64 status)
{
struct sdma_engine *sde;
+ int i = 0;
sde = &dd->per_sdma[source];
#ifdef CONFIG_SDMA_VERBOSITY
@@ -2531,6 +5708,16 @@ static void handle_sdma_eng_err(struct hfi1_devdata *dd,
sde->this_idx, source, (unsigned long long)status);
#endif
sdma_engine_error(sde, status);
+
+ /*
+ * Update the counters for the corresponding status bits.
+ * Note that these particular counters are aggregated over
+ * all 16 DMA engines.
+ */
+ for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
+ if (status & (1ull << i))
+ incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
+ }
}
/*
@@ -3050,7 +6237,7 @@ static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
/* else this is _p */
version = emulator_rev(dd);
- if (!is_a0(dd))
+ if (!is_ax(dd))
version = 0x2d; /* all B0 use 0x2d or higher settings */
if (version <= 0x12) {
@@ -3313,7 +6500,6 @@ void handle_freeze(struct work_struct *work)
struct hfi1_devdata *dd = ppd->dd;
/* wait for freeze indicators on all affected blocks */
- dd_dev_info(dd, "Entering SPC freeze\n");
wait_for_freeze_status(dd, 1);
/* SPC is now frozen */
@@ -3336,7 +6522,7 @@ void handle_freeze(struct work_struct *work)
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
wait_for_freeze_status(dd, 0);
- if (is_a0(dd)) {
+ if (is_ax(dd)) {
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
wait_for_freeze_status(dd, 1);
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
@@ -3371,7 +6557,6 @@ void handle_freeze(struct work_struct *work)
wake_up(&dd->event_queue);
/* no longer frozen */
- dd_dev_err(dd, "Exiting SPC freeze\n");
}
/*
@@ -3542,10 +6727,10 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
- /* Sanity check - ppd->pkeys[2] should be 0 */
- if (ppd->pkeys[2] != 0)
- dd_dev_err(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
- __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
+ /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
+ if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
+ dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
+ __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
ppd->pkeys[2] = FULL_MGMT_P_KEY;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
}
@@ -3864,7 +7049,7 @@ void handle_verify_cap(struct work_struct *work)
* REPLAY_BUF_MBE_SMASK
* FLIT_INPUT_BUF_MBE_SMASK
*/
- if (is_a0(dd)) { /* fixed in B0 */
+ if (is_ax(dd)) { /* fixed in B0 */
reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
| DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
@@ -3907,18 +7092,32 @@ void handle_verify_cap(struct work_struct *work)
*/
void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
{
- int skip = 1;
int do_bounce = 0;
- u16 lwde = ppd->link_width_downgrade_enabled;
+ int tries;
+ u16 lwde;
u16 tx, rx;
+ /* use the hls lock to avoid a race with actual link up */
+ tries = 0;
+retry:
mutex_lock(&ppd->hls_lock);
/* only apply if the link is up */
- if (ppd->host_link_state & HLS_UP)
- skip = 0;
- mutex_unlock(&ppd->hls_lock);
- if (skip)
- return;
+ if (!(ppd->host_link_state & HLS_UP)) {
+ /* still going up..wait and retry */
+ if (ppd->host_link_state & HLS_GOING_UP) {
+ if (++tries < 1000) {
+ mutex_unlock(&ppd->hls_lock);
+ usleep_range(100, 120); /* arbitrary */
+ goto retry;
+ }
+ dd_dev_err(ppd->dd,
+ "%s: giving up waiting for link state change\n",
+ __func__);
+ }
+ goto done;
+ }
+
+ lwde = ppd->link_width_downgrade_enabled;
if (refresh_widths) {
get_link_widths(ppd->dd, &tx, &rx);
@@ -3956,6 +7155,9 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
do_bounce = 1;
}
+done:
+ mutex_unlock(&ppd->hls_lock);
+
if (do_bounce) {
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
OPA_LINKDOWN_REASON_WIDTH_POLICY);
@@ -4046,6 +7248,11 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
}
err &= ~(u64)FAILED_LNI;
}
+ /* unknown frames can happen durning LNI, just count */
+ if (err & UNKNOWN_FRAME) {
+ ppd->unknown_frame_count++;
+ err &= ~(u64)UNKNOWN_FRAME;
+ }
if (err) {
/* report remaining errors, but do not do anything */
dd_dev_err(dd, "8051 info error: %s\n",
@@ -4774,13 +7981,25 @@ int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
*/
static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
{
+ u32 regno;
+ int ret;
- if (acquire_lcb_access(dd, 0) == 0) {
- write_csr(dd, addr, data);
- release_lcb_access(dd, 0);
- return 0;
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
+ (dd->dc8051_ver < dc8051_ver(0, 20))) {
+ if (acquire_lcb_access(dd, 0) == 0) {
+ write_csr(dd, addr, data);
+ release_lcb_access(dd, 0);
+ return 0;
+ }
+ return -EBUSY;
}
- return -EBUSY;
+
+ /* register is an index of LCB registers: (offset - base) / 8 */
+ regno = (addr - DC_LCB_CFG_RUN) >> 3;
+ ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
+ if (ret != HCMD_SUCCESS)
+ return -EBUSY;
+ return 0;
}
/*
@@ -4862,6 +8081,26 @@ static int do_8051_command(
*/
/*
+ * When writing a LCB CSR, out_data contains the full value to
+ * to be written, while in_data contains the relative LCB
+ * address in 7:0. Do the work here, rather than the caller,
+ * of distrubting the write data to where it needs to go:
+ *
+ * Write data
+ * 39:00 -> in_data[47:8]
+ * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
+ * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
+ */
+ if (type == HCMD_WRITE_LCB_CSR) {
+ in_data |= ((*out_data) & 0xffffffffffull) << 8;
+ reg = ((((*out_data) >> 40) & 0xff) <<
+ DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
+ | ((((*out_data) >> 48) & 0xffff) <<
+ DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+ write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
+ }
+
+ /*
* Do two writes: the first to stabilize the type and req_data, the
* second to activate.
*/
@@ -5856,6 +9095,23 @@ void init_qsfp(struct hfi1_pportdata *ppd)
}
}
+/*
+ * Do a one-time initialize of the LCB block.
+ */
+static void init_lcb(struct hfi1_devdata *dd)
+{
+ /* the DC has been reset earlier in the driver load */
+
+ /* set LCB for cclk loopback on the port */
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
+ write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
+ write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
+ write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
+ write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
+ write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
+ write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
+}
+
int bringup_serdes(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -5877,6 +9133,9 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
/* Set linkinit_reason on power up per OPA spec */
ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
+ /* one-time init of the LCB */
+ init_lcb(dd);
+
if (loopback) {
ret = init_loopback(dd);
if (ret < 0)
@@ -6148,7 +9407,8 @@ u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
static void set_send_length(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
- u32 max_hb = lrh_max_header_bytes(dd), maxvlmtu = 0, dcmtu;
+ u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
+ u32 maxvlmtu = dd->vld[15].mtu;
u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
& SEND_LEN_CHECK1_LEN_VL15_MASK) <<
SEND_LEN_CHECK1_LEN_VL15_SHIFT;
@@ -6319,9 +9579,10 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
* depending on how the link went down. The 8051 firmware
* will observe the needed wait time and only move to ready
* when that is completed. The largest of the quiet timeouts
- * is 2.5s, so wait that long and then a bit more.
+ * is 6s, so wait that long and then at least 0.5s more for
+ * other transitions, and another 0.5s for a buffer.
*/
- ret = wait_fm_ready(dd, 3000);
+ ret = wait_fm_ready(dd, 7000);
if (ret) {
dd_dev_err(dd,
"After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
@@ -7235,8 +10496,7 @@ static int set_buffer_control(struct hfi1_devdata *dd,
new_bc->vl[i].shared = 0;
}
new_total += be16_to_cpu(new_bc->overall_shared_limit);
- if (new_total > (u32)dd->link_credits)
- return -EINVAL;
+
/* fetch the current values */
get_buffer_control(dd, &cur_bc, &cur_total);
@@ -7282,8 +10542,8 @@ static int set_buffer_control(struct hfi1_devdata *dd,
*/
use_all_mask = 0;
if ((be16_to_cpu(new_bc->overall_shared_limit) <
- be16_to_cpu(cur_bc.overall_shared_limit))
- || (is_a0(dd) && any_shared_limit_changing)) {
+ be16_to_cpu(cur_bc.overall_shared_limit)) ||
+ (is_ax(dd) && any_shared_limit_changing)) {
set_global_shared(dd, 0);
cur_bc.overall_shared_limit = 0;
use_all_mask = 1;
@@ -7457,7 +10717,7 @@ int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
*/
static int disable_data_vls(struct hfi1_devdata *dd)
{
- if (is_a0(dd))
+ if (is_ax(dd))
return 1;
pio_send_control(dd, PSC_DATA_VL_DISABLE);
@@ -7475,7 +10735,7 @@ static int disable_data_vls(struct hfi1_devdata *dd)
*/
int open_fill_data_vls(struct hfi1_devdata *dd)
{
- if (is_a0(dd))
+ if (is_ax(dd))
return 1;
pio_send_control(dd, PSC_DATA_VL_ENABLE);
@@ -7748,11 +11008,22 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
& RCV_TID_CTRL_TID_BASE_INDEX_MASK)
<< RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
- if (ctxt == VL15CTXT)
- write_csr(dd, RCV_VL15, VL15CTXT);
+ if (ctxt == HFI1_CTRL_CTXT)
+ write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
}
if (op & HFI1_RCVCTRL_CTXT_DIS) {
write_csr(dd, RCV_VL15, 0);
+ /*
+ * When receive context is being disabled turn on tail
+ * update with a dummy tail address and then disable
+ * receive context.
+ */
+ if (dd->rcvhdrtail_dummy_physaddr) {
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_physaddr);
+ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ }
+
rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
}
if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
@@ -7822,10 +11093,11 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
/*
* If the context has been disabled and the Tail Update has
- * been cleared, clear the RCV_HDR_TAIL_ADDR CSR so
- * it doesn't contain an address that is invalid.
+ * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
+ * so it doesn't contain an address that is invalid.
*/
- write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 0);
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_physaddr);
}
u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
@@ -8785,7 +12057,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
/* turn off interrupts */
if (dd->num_msix_entries) {
/* MSI-X */
- hfi1_nomsix(dd);
+ pci_disable_msix(dd->pcidev);
} else {
/* INTx */
disable_intx(dd->pcidev);
@@ -8840,18 +12112,12 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
msix_intr);
}
-static void remap_receive_available_interrupt(struct hfi1_devdata *dd,
- int rx, int msix_intr)
-{
- remap_intr(dd, IS_RCVAVAIL_START + rx, msix_intr);
-}
-
static int request_intx_irq(struct hfi1_devdata *dd)
{
int ret;
- snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME"_%d",
- dd->unit);
+ snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
+ dd->unit);
ret = request_irq(dd->pcidev->irq, general_interrupt,
IRQF_SHARED, dd->intx_name, dd);
if (ret)
@@ -8870,7 +12136,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
int first_general, last_general;
int first_sdma, last_sdma;
int first_rx, last_rx;
- int first_cpu, restart_cpu, curr_cpu;
+ int first_cpu, curr_cpu;
int rcv_cpu, sdma_cpu;
int i, ret = 0, possible;
int ht;
@@ -8909,22 +12175,19 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
topology_sibling_cpumask(cpumask_first(local_mask)));
for (i = possible/ht; i < possible; i++)
cpumask_clear_cpu(i, def);
- /* reset possible */
- possible = cpumask_weight(def);
/* def now has full cores on chosen node*/
first_cpu = cpumask_first(def);
if (nr_cpu_ids >= first_cpu)
first_cpu++;
- restart_cpu = first_cpu;
- curr_cpu = restart_cpu;
+ curr_cpu = first_cpu;
- for (i = first_cpu; i < dd->n_krcv_queues + first_cpu; i++) {
+ /* One context is reserved as control context */
+ for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
cpumask_clear_cpu(curr_cpu, def);
cpumask_set_cpu(curr_cpu, rcv);
- if (curr_cpu >= possible)
- curr_cpu = restart_cpu;
- else
- curr_cpu++;
+ curr_cpu = cpumask_next(curr_cpu, def);
+ if (curr_cpu >= nr_cpu_ids)
+ break;
}
/* def mask has non-rcv, rcv has recv mask */
rcv_cpu = cpumask_first(rcv);
@@ -8953,7 +12216,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
handler = general_interrupt;
arg = dd;
snprintf(me->name, sizeof(me->name),
- DRIVER_NAME"_%d", dd->unit);
+ DRIVER_NAME "_%d", dd->unit);
err_info = "general";
} else if (first_sdma <= i && i < last_sdma) {
idx = i - first_sdma;
@@ -8961,7 +12224,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
handler = sdma_interrupt;
arg = sde;
snprintf(me->name, sizeof(me->name),
- DRIVER_NAME"_%d sdma%d", dd->unit, idx);
+ DRIVER_NAME "_%d sdma%d", dd->unit, idx);
err_info = "sdma";
remap_sdma_interrupts(dd, idx, i);
} else if (first_rx <= i && i < last_rx) {
@@ -8981,9 +12244,9 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
thread = receive_context_thread;
arg = rcd;
snprintf(me->name, sizeof(me->name),
- DRIVER_NAME"_%d kctxt%d", dd->unit, idx);
+ DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
err_info = "receive context";
- remap_receive_available_interrupt(dd, idx, i);
+ remap_intr(dd, IS_RCVAVAIL_START + idx, i);
} else {
/* not in our expected range - complain, then
ignore it */
@@ -9018,17 +12281,26 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
if (handler == sdma_interrupt) {
dd_dev_info(dd, "sdma engine %d cpu %d\n",
sde->this_idx, sdma_cpu);
+ sde->cpu = sdma_cpu;
cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
sdma_cpu = cpumask_next(sdma_cpu, def);
if (sdma_cpu >= nr_cpu_ids)
sdma_cpu = cpumask_first(def);
} else if (handler == receive_context_interrupt) {
- dd_dev_info(dd, "rcv ctxt %d cpu %d\n",
- rcd->ctxt, rcv_cpu);
- cpumask_set_cpu(rcv_cpu, dd->msix_entries[i].mask);
- rcv_cpu = cpumask_next(rcv_cpu, rcv);
- if (rcv_cpu >= nr_cpu_ids)
- rcv_cpu = cpumask_first(rcv);
+ dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
+ (rcd->ctxt == HFI1_CTRL_CTXT) ?
+ cpumask_first(def) : rcv_cpu);
+ if (rcd->ctxt == HFI1_CTRL_CTXT) {
+ /* map to first default */
+ cpumask_set_cpu(cpumask_first(def),
+ dd->msix_entries[i].mask);
+ } else {
+ cpumask_set_cpu(rcv_cpu,
+ dd->msix_entries[i].mask);
+ rcv_cpu = cpumask_next(rcv_cpu, rcv);
+ if (rcv_cpu >= nr_cpu_ids)
+ rcv_cpu = cpumask_first(rcv);
+ }
} else {
/* otherwise first def */
dd_dev_info(dd, "%s cpu %d\n",
@@ -9153,7 +12425,6 @@ fail:
static int set_up_context_variables(struct hfi1_devdata *dd)
{
int num_kernel_contexts;
- int num_user_contexts;
int total_contexts;
int ret;
unsigned ngroups;
@@ -9161,11 +12432,18 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Kernel contexts: (to be fixed later):
* - min or 2 or 1 context/numa
- * - Context 0 - default/errors
- * - Context 1 - VL15
+ * - Context 0 - control context (VL15/multicast/error)
+ * - Context 1 - default context
*/
if (n_krcvqs)
- num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS;
+ /*
+ * Don't count context 0 in n_krcvqs since
+ * is isn't used for normal verbs traffic.
+ *
+ * krcvqs will reflect number of kernel
+ * receive contexts above 0.
+ */
+ num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
else
num_kernel_contexts = num_online_nodes();
num_kernel_contexts =
@@ -9183,12 +12461,10 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
}
/*
* User contexts: (to be fixed later)
- * - set to num_rcv_contexts if non-zero
- * - default to 1 user context per CPU
+ * - default to 1 user context per CPU if num_user_contexts is
+ * negative
*/
- if (num_rcv_contexts)
- num_user_contexts = num_rcv_contexts;
- else
+ if (num_user_contexts < 0)
num_user_contexts = num_online_cpus();
total_contexts = num_kernel_contexts + num_user_contexts;
@@ -9455,7 +12731,7 @@ static void reset_asic_csrs(struct hfi1_devdata *dd)
/* We might want to retain this state across FLR if we ever use it */
write_csr(dd, ASIC_CFG_DRV_STR, 0);
- write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0);
+ /* ASIC_CFG_THERM_POLL_EN leave alone */
/* ASIC_STS_THERM read-only */
/* ASIC_CFG_RESET leave alone */
@@ -9906,7 +13182,7 @@ static void init_chip(struct hfi1_devdata *dd)
/* restore command and BARs */
restore_pci_variables(dd);
- if (is_a0(dd)) {
+ if (is_ax(dd)) {
dd_dev_info(dd, "Resetting CSRs with FLR\n");
hfi1_pcie_flr(dd);
restore_pci_variables(dd);
@@ -9925,23 +13201,20 @@ static void init_chip(struct hfi1_devdata *dd)
write_csr(dd, CCE_DC_CTRL, 0);
/* Set the LED off */
- if (is_a0(dd))
+ if (is_ax(dd))
setextled(dd, 0);
/*
* Clear the QSFP reset.
- * A0 leaves the out lines floating on power on, then on an FLR
- * enforces a 0 on all out pins. The driver does not touch
+ * An FLR enforces a 0 on all out pins. The driver does not touch
* ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
- * anything plugged constantly in reset, if it pays attention
+ * anything plugged constantly in reset, if it pays attention
* to RESET_N.
- * A prime example of this is SiPh. For now, set all pins high.
+ * Prime examples of this are optical cables. Set all pins high.
* I2CCLK and I2CDAT will change per direction, and INT_N and
* MODPRS_N are input only and their value is ignored.
*/
- if (is_a0(dd)) {
- write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
- write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
- }
+ write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
+ write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
}
static void init_early_variables(struct hfi1_devdata *dd)
@@ -9951,7 +13224,7 @@ static void init_early_variables(struct hfi1_devdata *dd)
/* assign link credit variables */
dd->vau = CM_VAU;
dd->link_credits = CM_GLOBAL_CREDITS;
- if (is_a0(dd))
+ if (is_ax(dd))
dd->link_credits--;
dd->vcu = cu_to_vcu(hfi1_cu);
/* enough room for 8 MAD packets plus header - 17K */
@@ -10017,12 +13290,6 @@ static void init_qpmap_table(struct hfi1_devdata *dd,
u64 ctxt = first_ctxt;
for (i = 0; i < 256;) {
- if (ctxt == VL15CTXT) {
- ctxt++;
- if (ctxt > last_ctxt)
- ctxt = first_ctxt;
- continue;
- }
reg |= ctxt << (8 * (i % 8));
i++;
ctxt++;
@@ -10065,7 +13332,7 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
u64 *rsmmap;
u64 reg;
- u8 rxcontext = is_a0(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
+ u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
/* validate */
if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
@@ -10087,6 +13354,8 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
goto bail;
rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
+ if (!rsmmap)
+ goto bail;
memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
/* init the local copy of the table */
for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
@@ -10135,19 +13404,13 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
/* Enable RSM */
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
kfree(rsmmap);
- /* map everything else (non-VL15) to context 0 */
- init_qpmap_table(
- dd,
- 0,
- 0);
+ /* map everything else to first context */
+ init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
dd->qos_shift = n + 1;
return;
bail:
dd->qos_shift = 1;
- init_qpmap_table(
- dd,
- dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0,
- dd->n_krcv_queues - 1);
+ init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
}
static void init_rxe(struct hfi1_devdata *dd)
@@ -10276,7 +13539,7 @@ int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
* Enable send-side J_KEY integrity check, unless this is A0 h/w
* (due to A0 erratum).
*/
- if (!is_a0(dd)) {
+ if (!is_ax(dd)) {
reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
@@ -10309,7 +13572,7 @@ int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
* This check would not have been enabled for A0 h/w, see
* set_ctxt_jkey().
*/
- if (!is_a0(dd)) {
+ if (!is_ax(dd)) {
reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
@@ -10418,6 +13681,32 @@ static void asic_should_init(struct hfi1_devdata *dd)
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
}
+/*
+ * Set dd->boardname. Use a generic name if a name is not returned from
+ * EFI variable space.
+ *
+ * Return 0 on success, -ENOMEM if space could not be allocated.
+ */
+static int obtain_boardname(struct hfi1_devdata *dd)
+{
+ /* generic board description */
+ const char generic[] =
+ "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
+ unsigned long size;
+ int ret;
+
+ ret = read_hfi1_efi_var(dd, "description", &size,
+ (void **)&dd->boardname);
+ if (ret) {
+ dd_dev_err(dd, "Board description not found\n");
+ /* use generic description */
+ dd->boardname = kstrdup(generic, GFP_KERNEL);
+ if (!dd->boardname)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
/**
* Allocate and initialize the device structure for the hfi.
* @dev: the pci_dev for hfi1_ib device
@@ -10554,9 +13843,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* insure num_vls isn't larger than number of sdma engines */
if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
- ppd->vls_supported = num_vls = HFI1_MAX_VLS_SUPPORTED;
- ppd->vls_operational = ppd->vls_supported;
+ num_vls, dd->chip_sdma_engines);
+ num_vls = dd->chip_sdma_engines;
+ ppd->vls_supported = dd->chip_sdma_engines;
}
/*
@@ -10615,18 +13904,13 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
parse_platform_config(dd);
- /* add board names as they are defined */
- dd->boardname = kmalloc(64, GFP_KERNEL);
- if (!dd->boardname)
+ ret = obtain_boardname(dd);
+ if (ret)
goto bail_cleanup;
- snprintf(dd->boardname, 64, "Board ID 0x%llx",
- dd->revision >> CCE_REVISION_BOARD_ID_LOWER_NIBBLE_SHIFT
- & CCE_REVISION_BOARD_ID_LOWER_NIBBLE_MASK);
snprintf(dd->boardversion, BOARD_VERS_MAX,
- "ChipABI %u.%u, %s, ChipRev %u.%u, SW Compat %llu\n",
+ "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
- dd->boardname,
(u32)dd->majrev,
(u32)dd->minrev,
(dd->revision >> CCE_REVISION_SW_SHIFT)
@@ -10803,7 +14087,9 @@ static int thermal_init(struct hfi1_devdata *dd)
acquire_hw_mutex(dd);
dd_dev_info(dd, "Initializing thermal sensor\n");
-
+ /* Disable polling of thermal readings */
+ write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
+ msleep(100);
/* Thermal Sensor Initialization */
/* Step 1: Reset the Thermal SBus Receiver */
ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h
index ebf9041a1c5e..5b375ddc345d 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/staging/rdma/hfi1/chip.h
@@ -235,6 +235,7 @@
#define HCMD_MISC 0x05
#define HCMD_READ_LCB_IDLE_MSG 0x06
#define HCMD_READ_LCB_CSR 0x07
+#define HCMD_WRITE_LCB_CSR 0x08
#define HCMD_INTERFACE_TEST 0xff
/* DC_DC8051_CFG_HOST_CMD_1.RETURN_CODE - 8051 host command return */
@@ -663,7 +664,6 @@ void get_linkup_link_widths(struct hfi1_pportdata *ppd);
void read_ltp_rtt(struct hfi1_devdata *dd);
void clear_linkup_counters(struct hfi1_devdata *dd);
u32 hdrqempty(struct hfi1_ctxtdata *rcd);
-int is_a0(struct hfi1_devdata *dd);
int is_ax(struct hfi1_devdata *dd);
int is_bx(struct hfi1_devdata *dd);
u32 read_physical_state(struct hfi1_devdata *dd);
@@ -721,7 +721,6 @@ enum {
C_RX_TID_FULL,
C_RX_TID_INVALID,
C_RX_TID_FLGMS,
- C_RX_CTX_RHQS,
C_RX_CTX_EGRS,
C_RCV_TID_FLSMS,
C_CCE_PCI_CR_ST,
@@ -788,6 +787,275 @@ enum {
C_SW_PIO_WAIT,
C_SW_KMEM_WAIT,
C_SW_SEND_SCHED,
+/* MISC_ERR_STATUS */
+ C_MISC_PLL_LOCK_FAIL_ERR,
+ C_MISC_MBIST_FAIL_ERR,
+ C_MISC_INVALID_EEP_CMD_ERR,
+ C_MISC_EFUSE_DONE_PARITY_ERR,
+ C_MISC_EFUSE_WRITE_ERR,
+ C_MISC_EFUSE_READ_BAD_ADDR_ERR,
+ C_MISC_EFUSE_CSR_PARITY_ERR,
+ C_MISC_FW_AUTH_FAILED_ERR,
+ C_MISC_KEY_MISMATCH_ERR,
+ C_MISC_SBUS_WRITE_FAILED_ERR,
+ C_MISC_CSR_WRITE_BAD_ADDR_ERR,
+ C_MISC_CSR_READ_BAD_ADDR_ERR,
+ C_MISC_CSR_PARITY_ERR,
+/* CceErrStatus */
+ /*
+ * A special counter that is the aggregate count
+ * of all the cce_err_status errors. The remainder
+ * are actual bits in the CceErrStatus register.
+ */
+ C_CCE_ERR_STATUS_AGGREGATED_CNT,
+ C_CCE_MSIX_CSR_PARITY_ERR,
+ C_CCE_INT_MAP_UNC_ERR,
+ C_CCE_INT_MAP_COR_ERR,
+ C_CCE_MSIX_TABLE_UNC_ERR,
+ C_CCE_MSIX_TABLE_COR_ERR,
+ C_CCE_RXDMA_CONV_FIFO_PARITY_ERR,
+ C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_SEG_WRITE_BAD_ADDR_ERR,
+ C_CCE_SEG_READ_BAD_ADDR_ERR,
+ C_LA_TRIGGERED,
+ C_CCE_TRGT_CPL_TIMEOUT_ERR,
+ C_PCIC_RECEIVE_PARITY_ERR,
+ C_PCIC_TRANSMIT_BACK_PARITY_ERR,
+ C_PCIC_TRANSMIT_FRONT_PARITY_ERR,
+ C_PCIC_CPL_DAT_Q_UNC_ERR,
+ C_PCIC_CPL_HD_Q_UNC_ERR,
+ C_PCIC_POST_DAT_Q_UNC_ERR,
+ C_PCIC_POST_HD_Q_UNC_ERR,
+ C_PCIC_RETRY_SOT_MEM_UNC_ERR,
+ C_PCIC_RETRY_MEM_UNC_ERR,
+ C_PCIC_N_POST_DAT_Q_PARITY_ERR,
+ C_PCIC_N_POST_H_Q_PARITY_ERR,
+ C_PCIC_CPL_DAT_Q_COR_ERR,
+ C_PCIC_CPL_HD_Q_COR_ERR,
+ C_PCIC_POST_DAT_Q_COR_ERR,
+ C_PCIC_POST_HD_Q_COR_ERR,
+ C_PCIC_RETRY_SOT_MEM_COR_ERR,
+ C_PCIC_RETRY_MEM_COR_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR,
+ C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR,
+ C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_CSR_CFG_BUS_PARITY_ERR,
+ C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR,
+ C_CCE_RSPD_DATA_PARITY_ERR,
+ C_CCE_TRGT_ACCESS_ERR,
+ C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR,
+ C_CCE_CSR_WRITE_BAD_ADDR_ERR,
+ C_CCE_CSR_READ_BAD_ADDR_ERR,
+ C_CCE_CSR_PARITY_ERR,
+/* RcvErrStatus */
+ C_RX_CSR_PARITY_ERR,
+ C_RX_CSR_WRITE_BAD_ADDR_ERR,
+ C_RX_CSR_READ_BAD_ADDR_ERR,
+ C_RX_DMA_CSR_UNC_ERR,
+ C_RX_DMA_DQ_FSM_ENCODING_ERR,
+ C_RX_DMA_EQ_FSM_ENCODING_ERR,
+ C_RX_DMA_CSR_PARITY_ERR,
+ C_RX_RBUF_DATA_COR_ERR,
+ C_RX_RBUF_DATA_UNC_ERR,
+ C_RX_DMA_DATA_FIFO_RD_COR_ERR,
+ C_RX_DMA_DATA_FIFO_RD_UNC_ERR,
+ C_RX_DMA_HDR_FIFO_RD_COR_ERR,
+ C_RX_DMA_HDR_FIFO_RD_UNC_ERR,
+ C_RX_RBUF_DESC_PART2_COR_ERR,
+ C_RX_RBUF_DESC_PART2_UNC_ERR,
+ C_RX_RBUF_DESC_PART1_COR_ERR,
+ C_RX_RBUF_DESC_PART1_UNC_ERR,
+ C_RX_HQ_INTR_FSM_ERR,
+ C_RX_HQ_INTR_CSR_PARITY_ERR,
+ C_RX_LOOKUP_CSR_PARITY_ERR,
+ C_RX_LOOKUP_RCV_ARRAY_COR_ERR,
+ C_RX_LOOKUP_RCV_ARRAY_UNC_ERR,
+ C_RX_LOOKUP_DES_PART2_PARITY_ERR,
+ C_RX_LOOKUP_DES_PART1_UNC_COR_ERR,
+ C_RX_LOOKUP_DES_PART1_UNC_ERR,
+ C_RX_RBUF_NEXT_FREE_BUF_COR_ERR,
+ C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR,
+ C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR,
+ C_RX_RBUF_FL_INITDONE_PARITY_ERR,
+ C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR,
+ C_RX_RBUF_FL_RD_ADDR_PARITY_ERR,
+ C_RX_RBUF_EMPTY_ERR,
+ C_RX_RBUF_FULL_ERR,
+ C_RX_RBUF_BAD_LOOKUP_ERR,
+ C_RX_RBUF_CTX_ID_PARITY_ERR,
+ C_RX_RBUF_CSR_QEOPDW_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR,
+ C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR,
+ C_RX_RBUF_BLOCK_LIST_READ_COR_ERR,
+ C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR,
+ C_RX_RBUF_LOOKUP_DES_COR_ERR,
+ C_RX_RBUF_LOOKUP_DES_UNC_ERR,
+ C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR,
+ C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR,
+ C_RX_RBUF_FREE_LIST_COR_ERR,
+ C_RX_RBUF_FREE_LIST_UNC_ERR,
+ C_RX_RCV_FSM_ENCODING_ERR,
+ C_RX_DMA_FLAG_COR_ERR,
+ C_RX_DMA_FLAG_UNC_ERR,
+ C_RX_DC_SOP_EOP_PARITY_ERR,
+ C_RX_RCV_CSR_PARITY_ERR,
+ C_RX_RCV_QP_MAP_TABLE_COR_ERR,
+ C_RX_RCV_QP_MAP_TABLE_UNC_ERR,
+ C_RX_RCV_DATA_COR_ERR,
+ C_RX_RCV_DATA_UNC_ERR,
+ C_RX_RCV_HDR_COR_ERR,
+ C_RX_RCV_HDR_UNC_ERR,
+ C_RX_DC_INTF_PARITY_ERR,
+ C_RX_DMA_CSR_COR_ERR,
+/* SendPioErrStatus */
+ C_PIO_PEC_SOP_HEAD_PARITY_ERR,
+ C_PIO_PCC_SOP_HEAD_PARITY_ERR,
+ C_PIO_LAST_RETURNED_CNT_PARITY_ERR,
+ C_PIO_CURRENT_FREE_CNT_PARITY_ERR,
+ C_PIO_RSVD_31_ERR,
+ C_PIO_RSVD_30_ERR,
+ C_PIO_PPMC_SOP_LEN_ERR,
+ C_PIO_PPMC_BQC_MEM_PARITY_ERR,
+ C_PIO_VL_FIFO_PARITY_ERR,
+ C_PIO_VLF_SOP_PARITY_ERR,
+ C_PIO_VLF_V1_LEN_PARITY_ERR,
+ C_PIO_BLOCK_QW_COUNT_PARITY_ERR,
+ C_PIO_WRITE_QW_VALID_PARITY_ERR,
+ C_PIO_STATE_MACHINE_ERR,
+ C_PIO_WRITE_DATA_PARITY_ERR,
+ C_PIO_HOST_ADDR_MEM_COR_ERR,
+ C_PIO_HOST_ADDR_MEM_UNC_ERR,
+ C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR,
+ C_PIO_INIT_SM_IN_ERR,
+ C_PIO_PPMC_PBL_FIFO_ERR,
+ C_PIO_CREDIT_RET_FIFO_PARITY_ERR,
+ C_PIO_V1_LEN_MEM_BANK1_COR_ERR,
+ C_PIO_V1_LEN_MEM_BANK0_COR_ERR,
+ C_PIO_V1_LEN_MEM_BANK1_UNC_ERR,
+ C_PIO_V1_LEN_MEM_BANK0_UNC_ERR,
+ C_PIO_SM_PKT_RESET_PARITY_ERR,
+ C_PIO_PKT_EVICT_FIFO_PARITY_ERR,
+ C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR,
+ C_PIO_SBRDCTL_CRREL_PARITY_ERR,
+ C_PIO_PEC_FIFO_PARITY_ERR,
+ C_PIO_PCC_FIFO_PARITY_ERR,
+ C_PIO_SB_MEM_FIFO1_ERR,
+ C_PIO_SB_MEM_FIFO0_ERR,
+ C_PIO_CSR_PARITY_ERR,
+ C_PIO_WRITE_ADDR_PARITY_ERR,
+ C_PIO_WRITE_BAD_CTXT_ERR,
+/* SendDmaErrStatus */
+ C_SDMA_PCIE_REQ_TRACKING_COR_ERR,
+ C_SDMA_PCIE_REQ_TRACKING_UNC_ERR,
+ C_SDMA_CSR_PARITY_ERR,
+ C_SDMA_RPY_TAG_ERR,
+/* SendEgressErrStatus */
+ C_TX_READ_PIO_MEMORY_CSR_UNC_ERR,
+ C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR,
+ C_TX_EGRESS_FIFO_COR_ERR,
+ C_TX_READ_PIO_MEMORY_COR_ERR,
+ C_TX_READ_SDMA_MEMORY_COR_ERR,
+ C_TX_SB_HDR_COR_ERR,
+ C_TX_CREDIT_OVERRUN_ERR,
+ C_TX_LAUNCH_FIFO8_COR_ERR,
+ C_TX_LAUNCH_FIFO7_COR_ERR,
+ C_TX_LAUNCH_FIFO6_COR_ERR,
+ C_TX_LAUNCH_FIFO5_COR_ERR,
+ C_TX_LAUNCH_FIFO4_COR_ERR,
+ C_TX_LAUNCH_FIFO3_COR_ERR,
+ C_TX_LAUNCH_FIFO2_COR_ERR,
+ C_TX_LAUNCH_FIFO1_COR_ERR,
+ C_TX_LAUNCH_FIFO0_COR_ERR,
+ C_TX_CREDIT_RETURN_VL_ERR,
+ C_TX_HCRC_INSERTION_ERR,
+ C_TX_EGRESS_FIFI_UNC_ERR,
+ C_TX_READ_PIO_MEMORY_UNC_ERR,
+ C_TX_READ_SDMA_MEMORY_UNC_ERR,
+ C_TX_SB_HDR_UNC_ERR,
+ C_TX_CREDIT_RETURN_PARITY_ERR,
+ C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR,
+ C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR,
+ C_TX_SDMA15_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA14_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA13_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA12_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA11_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA10_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA9_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA8_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA7_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA6_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA5_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA4_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA3_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA2_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA1_DISALLOWED_PACKET_ERR,
+ C_TX_SDMA0_DISALLOWED_PACKET_ERR,
+ C_TX_CONFIG_PARITY_ERR,
+ C_TX_SBRD_CTL_CSR_PARITY_ERR,
+ C_TX_LAUNCH_CSR_PARITY_ERR,
+ C_TX_ILLEGAL_CL_ERR,
+ C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR,
+ C_TX_RESERVED_10,
+ C_TX_RESERVED_9,
+ C_TX_SDMA_LAUNCH_INTF_PARITY_ERR,
+ C_TX_PIO_LAUNCH_INTF_PARITY_ERR,
+ C_TX_RESERVED_6,
+ C_TX_INCORRECT_LINK_STATE_ERR,
+ C_TX_LINK_DOWN_ERR,
+ C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR,
+ C_TX_RESERVED_2,
+ C_TX_PKT_INTEGRITY_MEM_UNC_ERR,
+ C_TX_PKT_INTEGRITY_MEM_COR_ERR,
+/* SendErrStatus */
+ C_SEND_CSR_WRITE_BAD_ADDR_ERR,
+ C_SEND_CSR_READ_BAD_ADD_ERR,
+ C_SEND_CSR_PARITY_ERR,
+/* SendCtxtErrStatus */
+ C_PIO_WRITE_OUT_OF_BOUNDS_ERR,
+ C_PIO_WRITE_OVERFLOW_ERR,
+ C_PIO_WRITE_CROSSES_BOUNDARY_ERR,
+ C_PIO_DISALLOWED_PACKET_ERR,
+ C_PIO_INCONSISTENT_SOP_ERR,
+/*SendDmaEngErrStatus */
+ C_SDMA_HEADER_REQUEST_FIFO_COR_ERR,
+ C_SDMA_HEADER_STORAGE_COR_ERR,
+ C_SDMA_PACKET_TRACKING_COR_ERR,
+ C_SDMA_ASSEMBLY_COR_ERR,
+ C_SDMA_DESC_TABLE_COR_ERR,
+ C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR,
+ C_SDMA_HEADER_STORAGE_UNC_ERR,
+ C_SDMA_PACKET_TRACKING_UNC_ERR,
+ C_SDMA_ASSEMBLY_UNC_ERR,
+ C_SDMA_DESC_TABLE_UNC_ERR,
+ C_SDMA_TIMEOUT_ERR,
+ C_SDMA_HEADER_LENGTH_ERR,
+ C_SDMA_HEADER_ADDRESS_ERR,
+ C_SDMA_HEADER_SELECT_ERR,
+ C_SMDA_RESERVED_9,
+ C_SDMA_PACKET_DESC_OVERFLOW_ERR,
+ C_SDMA_LENGTH_MISMATCH_ERR,
+ C_SDMA_HALT_ERR,
+ C_SDMA_MEM_READ_ERR,
+ C_SDMA_FIRST_DESC_ERR,
+ C_SDMA_TAIL_OUT_OF_BOUNDS_ERR,
+ C_SDMA_TOO_LONG_ERR,
+ C_SDMA_GEN_MISMATCH_ERR,
+ C_SDMA_WRONG_DW_ERR,
DEV_CNTR_LAST /* Must be kept last */
};
@@ -810,6 +1078,7 @@ enum {
C_RX_WORDS,
C_SW_LINK_DOWN,
C_SW_LINK_UP,
+ C_SW_UNKNOWN_FRAME,
C_SW_XMIT_DSCD,
C_SW_XMIT_DSCD_VL,
C_SW_XMIT_CSTR_ERR,
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h
index bf45de29d8bd..701e9e1012a6 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/staging/rdma/hfi1/chip_registers.h
@@ -318,6 +318,9 @@
#define DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT 0
#define DC_LCB_CFG_TX_FIFOS_RESET (DC_LCB_CSRS + 0x000000000008)
#define DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT 0
+#define DC_LCB_CFG_REINIT_AS_SLAVE (DC_LCB_CSRS + 0x000000000030)
+#define DC_LCB_CFG_CNT_FOR_SKIP_STALL (DC_LCB_CSRS + 0x000000000040)
+#define DC_LCB_CFG_CLK_CNTR (DC_LCB_CSRS + 0x000000000110)
#define DC_LCB_ERR_CLR (DC_LCB_CSRS + 0x000000000308)
#define DC_LCB_ERR_EN (DC_LCB_CSRS + 0x000000000310)
#define DC_LCB_ERR_FLG (DC_LCB_CSRS + 0x000000000300)
@@ -379,7 +382,6 @@
#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
#define RCV_BUF_OVFL_CNT 10
#define RCV_CONTEXT_EGR_STALL 22
-#define RCV_CONTEXT_RHQ_STALL 21
#define RCV_DATA_PKT_CNT 0
#define RCV_DWORD_CNT 1
#define RCV_TID_FLOW_GEN_MISMATCH_CNT 20
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h
index 5e203239c5b0..5dd92720faae 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/staging/rdma/hfi1/common.h
@@ -132,13 +132,14 @@
* HFI1_CAP_RESERVED_MASK bits.
*/
#define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \
- HFI1_CAP_HDRSUPP | \
- HFI1_CAP_MULTI_PKT_EGR | \
- HFI1_CAP_NODROP_RHQ_FULL | \
- HFI1_CAP_NODROP_EGR_FULL | \
- HFI1_CAP_ALLOW_PERM_JKEY | \
- HFI1_CAP_STATIC_RATE_CTRL | \
- HFI1_CAP_PRINT_UNIMPL)
+ HFI1_CAP_HDRSUPP | \
+ HFI1_CAP_MULTI_PKT_EGR | \
+ HFI1_CAP_NODROP_RHQ_FULL | \
+ HFI1_CAP_NODROP_EGR_FULL | \
+ HFI1_CAP_ALLOW_PERM_JKEY | \
+ HFI1_CAP_STATIC_RATE_CTRL | \
+ HFI1_CAP_PRINT_UNIMPL | \
+ HFI1_CAP_TID_UNMAP)
/*
* A set of capability bits that are "global" and are not allowed to be
* set in the user bitmask.
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 88414d720469..0c8831705664 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -78,8 +78,8 @@
hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
/* Snoop option mask */
-#define SNOOP_DROP_SEND (1 << 0)
-#define SNOOP_USE_METADATA (1 << 1)
+#define SNOOP_DROP_SEND BIT(0)
+#define SNOOP_USE_METADATA BIT(1)
static u8 snoop_flags;
@@ -135,7 +135,7 @@ static struct cdev diagpkt_cdev;
static struct device *diagpkt_device;
static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
+ size_t count, loff_t *off);
static const struct file_operations diagpkt_file_ops = {
.owner = THIS_MODULE,
@@ -177,37 +177,37 @@ struct hfi1_link_info {
#define HFI1_SNOOP_IOCGETLINKSTATE \
_IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
#define HFI1_SNOOP_IOCSETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+1)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
#define HFI1_SNOOP_IOCCLEARQUEUE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+2)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
#define HFI1_SNOOP_IOCCLEARFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+3)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
#define HFI1_SNOOP_IOCSETFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+4)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
#define HFI1_SNOOP_IOCGETVERSION \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+5)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
#define HFI1_SNOOP_IOCSET_OPTS \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+6)
+ _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
/*
* These offsets +6/+7 could change, but these are already known and used
* IOCTL numbers so don't change them without a good reason.
*/
#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+6, \
+ _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
struct hfi1_link_info)
#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ+7, \
+ _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
struct hfi1_link_info)
static int hfi1_snoop_open(struct inode *in, struct file *fp);
static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off);
+ size_t pkt_len, loff_t *off);
static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
+ size_t count, loff_t *off);
static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait);
+ struct poll_table_struct *wait);
static int hfi1_snoop_release(struct inode *in, struct file *fp);
struct hfi1_packet_filter_command {
@@ -323,14 +323,12 @@ static void hfi1_snoop_remove(struct hfi1_devdata *dd)
void hfi1_diag_remove(struct hfi1_devdata *dd)
{
-
hfi1_snoop_remove(dd);
if (atomic_dec_and_test(&diagpkt_count))
hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
}
-
/*
* Allocated structure shared between the credit return mechanism and
* diagpkt_send().
@@ -379,6 +377,7 @@ static ssize_t diagpkt_send(struct diag_pkt *dp)
pio_release_cb credit_cb = NULL;
void *credit_arg = NULL;
struct diagpkt_wait *wait = NULL;
+ int trycount = 0;
dd = hfi1_lookup(dp->unit);
if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
@@ -393,7 +392,7 @@ static ssize_t diagpkt_send(struct diag_pkt *dp)
if (dp->version != _DIAG_PKT_VERS) {
dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp->version);
+ dp->version);
ret = -EINVAL;
goto bail;
}
@@ -440,7 +439,7 @@ static ssize_t diagpkt_send(struct diag_pkt *dp)
}
if (copy_from_user(tmpbuf,
- (const void __user *) (unsigned long) dp->data,
+ (const void __user *)(unsigned long)dp->data,
dp->len)) {
ret = -EFAULT;
goto bail;
@@ -493,8 +492,15 @@ static ssize_t diagpkt_send(struct diag_pkt *dp)
credit_arg = wait;
}
+retry:
pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
if (!pbuf) {
+ if (trycount == 0) {
+ /* force a credit return and try again */
+ sc_return_credits(sc);
+ trycount = 1;
+ goto retry;
+ }
/*
* No send buffer means no credit callback. Undo
* the wait set-up that was done above. We free wait
@@ -530,9 +536,9 @@ static ssize_t diagpkt_send(struct diag_pkt *dp)
* NOTE: PRC_FILL_ERR is at best informational and cannot
* be depended on.
*/
- if (!ret && (((wait->code & PRC_STATUS_ERR)
- || (wait->code & PRC_FILL_ERR)
- || (wait->code & PRC_SC_DISABLE))))
+ if (!ret && (((wait->code & PRC_STATUS_ERR) ||
+ (wait->code & PRC_FILL_ERR) ||
+ (wait->code & PRC_SC_DISABLE))))
ret = -EIO;
put_diagpkt_wait(wait); /* finished with the structure */
@@ -545,7 +551,7 @@ bail:
}
static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
+ size_t count, loff_t *off)
{
struct hfi1_devdata *dd;
struct send_context *sc;
@@ -565,7 +571,7 @@ static ssize_t diagpkt_write(struct file *fp, const char __user *data,
*/
if (dp.pbc) {
dd = hfi1_lookup(dp.unit);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
sc = dd->vld[vl].sc;
@@ -598,7 +604,7 @@ static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
if (ret) {
dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
- &dd->hfi1_snoop.class_dev);
+ &dd->hfi1_snoop.class_dev);
}
return ret;
@@ -611,7 +617,6 @@ static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
dd = hfi1_lookup(unit);
return dd;
-
}
/* clear or restore send context integrity checks */
@@ -652,7 +657,7 @@ static int hfi1_snoop_open(struct inode *in, struct file *fp)
mutex_lock(&hfi1_mutex);
dd = hfi1_dd_from_sc_inode(in);
- if (dd == NULL) {
+ if (!dd) {
ret = -ENODEV;
goto bail;
}
@@ -739,7 +744,7 @@ static int hfi1_snoop_release(struct inode *in, struct file *fp)
int mode_flag;
dd = hfi1_dd_from_sc_inode(in);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
@@ -794,7 +799,7 @@ static unsigned int hfi1_snoop_poll(struct file *fp,
struct hfi1_devdata *dd;
dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
@@ -805,7 +810,6 @@ static unsigned int hfi1_snoop_poll(struct file *fp,
spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
return ret;
-
}
static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
@@ -822,7 +826,7 @@ static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
struct hfi1_pportdata *ppd;
dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
ppd = dd->pport;
@@ -847,7 +851,7 @@ static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
if (copy_from_user(&byte_one, data, 1))
return -EINVAL;
- if (copy_from_user(&byte_two, data+1, 1))
+ if (copy_from_user(&byte_two, data + 1, 1))
return -EINVAL;
sc4 = (byte_one >> 4) & 0xf;
@@ -920,7 +924,7 @@ static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
struct hfi1_devdata *dd;
dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
@@ -946,16 +950,18 @@ static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
if (pkt_len >= packet->total_len) {
if (copy_to_user(data, packet->data,
- packet->total_len))
+ packet->total_len))
ret = -EFAULT;
else
ret = packet->total_len;
- } else
+ } else {
ret = -EINVAL;
+ }
kfree(packet);
- } else
+ } else {
spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ }
return ret;
}
@@ -966,9 +972,9 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
void *filter_value = NULL;
long ret = 0;
int value = 0;
- u8 physState = 0;
- u8 linkState = 0;
- u16 devState = 0;
+ u8 phys_state = 0;
+ u8 link_state = 0;
+ u16 dev_state = 0;
unsigned long flags = 0;
unsigned long *argp = NULL;
struct hfi1_packet_filter_command filter_cmd = {0};
@@ -976,237 +982,221 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
struct hfi1_pportdata *ppd = NULL;
unsigned int index;
struct hfi1_link_info link_info;
+ int read_cmd, write_cmd, read_ok, write_ok;
dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (dd == NULL)
+ if (!dd)
return -ENODEV;
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
mode_flag = dd->hfi1_snoop.mode_flag;
+ read_cmd = _IOC_DIR(cmd) & _IOC_READ;
+ write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
+ write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
+ read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
- if (((_IOC_DIR(cmd) & _IOC_READ)
- && !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)))
- || ((_IOC_DIR(cmd) & _IOC_WRITE)
- && !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)))) {
- ret = -EFAULT;
- } else if (!capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- } else if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
- (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
- (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
- (cmd != HFI1_SNOOP_IOCSETFILTER)) {
+ if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
+ return -EFAULT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
+ (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
+ (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
+ (cmd != HFI1_SNOOP_IOCSETFILTER))
/* Capture devices are allowed only 3 operations
* 1.Clear capture queue
* 2.Clear capture filter
* 3.Set capture filter
* Other are invalid.
*/
- ret = -EINVAL;
- } else {
- switch (cmd) {
- case HFI1_SNOOP_IOCSETLINKSTATE:
- snoop_dbg("HFI1_SNOOP_IOCSETLINKSTATE is not valid");
- ret = -EINVAL;
- break;
+ return -EINVAL;
- case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
- memset(&link_info, 0, sizeof(link_info));
+ switch (cmd) {
+ case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
+ memset(&link_info, 0, sizeof(link_info));
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- ret = -EFAULT;
+ if (copy_from_user(&link_info,
+ (struct hfi1_link_info __user *)arg,
+ sizeof(link_info)))
+ return -EFAULT;
- value = link_info.port_state;
- index = link_info.port_number;
- if (index > dd->num_pports - 1) {
- ret = -EINVAL;
- break;
- }
+ value = link_info.port_state;
+ index = link_info.port_number;
+ if (index > dd->num_pports - 1)
+ return -EINVAL;
- ppd = &dd->pport[index];
- if (!ppd) {
- ret = -EINVAL;
- break;
- }
+ ppd = &dd->pport[index];
+ if (!ppd)
+ return -EINVAL;
- /* What we want to transition to */
- physState = (value >> 4) & 0xF;
- linkState = value & 0xF;
- snoop_dbg("Setting link state 0x%x", value);
-
- switch (linkState) {
- case IB_PORT_NOP:
- if (physState == 0)
- break;
- /* fall through */
- case IB_PORT_DOWN:
- switch (physState) {
- case 0:
- devState = HLS_DN_DOWNDEF;
- break;
- case 2:
- devState = HLS_DN_POLL;
- break;
- case 3:
- devState = HLS_DN_DISABLE;
- break;
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = set_link_state(ppd, devState);
- break;
- case IB_PORT_ARMED:
- ret = set_link_state(ppd, HLS_UP_ARMED);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ARM);
- break;
- case IB_PORT_ACTIVE:
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ACTIVE);
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ /* What we want to transition to */
+ phys_state = (value >> 4) & 0xF;
+ link_state = value & 0xF;
+ snoop_dbg("Setting link state 0x%x", value);
- if (ret)
+ switch (link_state) {
+ case IB_PORT_NOP:
+ if (phys_state == 0)
break;
- /* fall through */
- case HFI1_SNOOP_IOCGETLINKSTATE:
- case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
- if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
- memset(&link_info, 0, sizeof(link_info));
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- ret = -EFAULT;
- index = link_info.port_number;
- } else {
- ret = __get_user(index, (int __user *) arg);
- if (ret != 0)
- break;
- }
-
- if (index > dd->num_pports - 1) {
- ret = -EINVAL;
+ /* fall through */
+ case IB_PORT_DOWN:
+ switch (phys_state) {
+ case 0:
+ dev_state = HLS_DN_DOWNDEF;
break;
- }
-
- ppd = &dd->pport[index];
- if (!ppd) {
- ret = -EINVAL;
+ case 2:
+ dev_state = HLS_DN_POLL;
break;
+ case 3:
+ dev_state = HLS_DN_DISABLE;
+ break;
+ default:
+ return -EINVAL;
}
- value = hfi1_ibphys_portstate(ppd);
- value <<= 4;
- value |= driver_lstate(ppd);
-
- snoop_dbg("Link port | Link State: %d", value);
-
- if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
- (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
- link_info.port_state = value;
- link_info.node_guid = cpu_to_be64(ppd->guid);
- link_info.link_speed_active =
- ppd->link_speed_active;
- link_info.link_width_active =
- ppd->link_width_active;
- if (copy_to_user(
- (struct hfi1_link_info __user *)arg,
- &link_info, sizeof(link_info)))
- ret = -EFAULT;
- } else {
- ret = __put_user(value, (int __user *)arg);
- }
+ ret = set_link_state(ppd, dev_state);
break;
-
- case HFI1_SNOOP_IOCCLEARQUEUE:
- snoop_dbg("Clearing snoop queue");
- drain_snoop_list(&dd->hfi1_snoop.queue);
+ case IB_PORT_ARMED:
+ ret = set_link_state(ppd, HLS_UP_ARMED);
+ if (!ret)
+ send_idle_sma(dd, SMA_IDLE_ARM);
break;
-
- case HFI1_SNOOP_IOCCLEARFILTER:
- snoop_dbg("Clearing filter");
- if (dd->hfi1_snoop.filter_callback) {
- /* Drain packets first */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback = NULL;
- }
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
+ case IB_PORT_ACTIVE:
+ ret = set_link_state(ppd, HLS_UP_ACTIVE);
+ if (!ret)
+ send_idle_sma(dd, SMA_IDLE_ACTIVE);
break;
+ default:
+ return -EINVAL;
+ }
- case HFI1_SNOOP_IOCSETFILTER:
- snoop_dbg("Setting filter");
- /* just copy command structure */
- argp = (unsigned long *)arg;
- if (copy_from_user(&filter_cmd, (void __user *)argp,
- sizeof(filter_cmd))) {
- ret = -EFAULT;
- break;
- }
- if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
- pr_alert("Invalid opcode in request\n");
- ret = -EINVAL;
+ if (ret)
+ break;
+ /* fall through */
+ case HFI1_SNOOP_IOCGETLINKSTATE:
+ case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
+ if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
+ memset(&link_info, 0, sizeof(link_info));
+ if (copy_from_user(&link_info,
+ (struct hfi1_link_info __user *)arg,
+ sizeof(link_info)))
+ return -EFAULT;
+ index = link_info.port_number;
+ } else {
+ ret = __get_user(index, (int __user *)arg);
+ if (ret != 0)
break;
- }
+ }
- snoop_dbg("Opcode %d Len %d Ptr %p",
- filter_cmd.opcode, filter_cmd.length,
- filter_cmd.value_ptr);
+ if (index > dd->num_pports - 1)
+ return -EINVAL;
- filter_value = kcalloc(filter_cmd.length, sizeof(u8),
- GFP_KERNEL);
- if (!filter_value) {
- pr_alert("Not enough memory\n");
- ret = -ENOMEM;
- break;
- }
- /* copy remaining data from userspace */
- if (copy_from_user((u8 *)filter_value,
- (void __user *)filter_cmd.value_ptr,
- filter_cmd.length)) {
- kfree(filter_value);
- ret = -EFAULT;
- break;
- }
+ ppd = &dd->pport[index];
+ if (!ppd)
+ return -EINVAL;
+
+ value = hfi1_ibphys_portstate(ppd);
+ value <<= 4;
+ value |= driver_lstate(ppd);
+
+ snoop_dbg("Link port | Link State: %d", value);
+
+ if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
+ (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
+ link_info.port_state = value;
+ link_info.node_guid = cpu_to_be64(ppd->guid);
+ link_info.link_speed_active =
+ ppd->link_speed_active;
+ link_info.link_width_active =
+ ppd->link_width_active;
+ if (copy_to_user((struct hfi1_link_info __user *)arg,
+ &link_info, sizeof(link_info)))
+ return -EFAULT;
+ } else {
+ ret = __put_user(value, (int __user *)arg);
+ }
+ break;
+
+ case HFI1_SNOOP_IOCCLEARQUEUE:
+ snoop_dbg("Clearing snoop queue");
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ break;
+
+ case HFI1_SNOOP_IOCCLEARFILTER:
+ snoop_dbg("Clearing filter");
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ if (dd->hfi1_snoop.filter_callback) {
/* Drain packets first */
drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback =
- hfi1_filters[filter_cmd.opcode].filter;
- /* just in case we see back to back sets */
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = filter_value;
+ dd->hfi1_snoop.filter_callback = NULL;
+ }
+ kfree(dd->hfi1_snoop.filter_value);
+ dd->hfi1_snoop.filter_value = NULL;
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ break;
- break;
- case HFI1_SNOOP_IOCGETVERSION:
- value = SNOOP_CAPTURE_VERSION;
- snoop_dbg("Getting version: %d", value);
- ret = __put_user(value, (int __user *)arg);
- break;
- case HFI1_SNOOP_IOCSET_OPTS:
- snoop_flags = 0;
- ret = __get_user(value, (int __user *) arg);
- if (ret != 0)
- break;
+ case HFI1_SNOOP_IOCSETFILTER:
+ snoop_dbg("Setting filter");
+ /* just copy command structure */
+ argp = (unsigned long *)arg;
+ if (copy_from_user(&filter_cmd, (void __user *)argp,
+ sizeof(filter_cmd)))
+ return -EFAULT;
- snoop_dbg("Setting snoop option %d", value);
- if (value & SNOOP_DROP_SEND)
- snoop_flags |= SNOOP_DROP_SEND;
- if (value & SNOOP_USE_METADATA)
- snoop_flags |= SNOOP_USE_METADATA;
- break;
- default:
- ret = -ENOTTY;
- break;
+ if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
+ pr_alert("Invalid opcode in request\n");
+ return -EINVAL;
+ }
+
+ snoop_dbg("Opcode %d Len %d Ptr %p",
+ filter_cmd.opcode, filter_cmd.length,
+ filter_cmd.value_ptr);
+
+ filter_value = kcalloc(filter_cmd.length, sizeof(u8),
+ GFP_KERNEL);
+ if (!filter_value)
+ return -ENOMEM;
+
+ /* copy remaining data from userspace */
+ if (copy_from_user((u8 *)filter_value,
+ (void __user *)filter_cmd.value_ptr,
+ filter_cmd.length)) {
+ kfree(filter_value);
+ return -EFAULT;
}
+ /* Drain packets first */
+ spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
+ drain_snoop_list(&dd->hfi1_snoop.queue);
+ dd->hfi1_snoop.filter_callback =
+ hfi1_filters[filter_cmd.opcode].filter;
+ /* just in case we see back to back sets */
+ kfree(dd->hfi1_snoop.filter_value);
+ dd->hfi1_snoop.filter_value = filter_value;
+ spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+ break;
+ case HFI1_SNOOP_IOCGETVERSION:
+ value = SNOOP_CAPTURE_VERSION;
+ snoop_dbg("Getting version: %d", value);
+ ret = __put_user(value, (int __user *)arg);
+ break;
+ case HFI1_SNOOP_IOCSET_OPTS:
+ snoop_flags = 0;
+ ret = __get_user(value, (int __user *)arg);
+ if (ret != 0)
+ break;
+
+ snoop_dbg("Setting snoop option %d", value);
+ if (value & SNOOP_DROP_SEND)
+ snoop_flags |= SNOOP_DROP_SEND;
+ if (value & SNOOP_USE_METADATA)
+ snoop_flags |= SNOOP_USE_METADATA;
+ break;
+ default:
+ return -ENOTTY;
}
-done:
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
+
return ret;
}
@@ -1321,7 +1311,6 @@ static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
{
-
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr = NULL;
int ret;
@@ -1404,7 +1393,6 @@ static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
{
-
u32 lnh = 0;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr = NULL;
@@ -1476,16 +1464,14 @@ static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
u32 data_len,
u32 md_len)
{
-
struct snoop_packet *packet;
- packet = kzalloc(sizeof(struct snoop_packet) + hdr_len + data_len
+ packet = kzalloc(sizeof(*packet) + hdr_len + data_len
+ md_len,
GFP_ATOMIC | __GFP_NOWARN);
if (likely(packet))
INIT_LIST_HEAD(&packet->list);
-
return packet;
}
@@ -1542,12 +1528,11 @@ int snoop_recv_handler(struct hfi1_packet *packet)
unlikely(snoop_flags & SNOOP_USE_METADATA))
md_len = sizeof(struct capture_md);
-
s_packet = allocate_snoop_packet(header_size,
tlen - header_size,
md_len);
- if (unlikely(s_packet == NULL)) {
+ if (unlikely(!s_packet)) {
dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
break;
}
@@ -1618,14 +1603,12 @@ int snoop_recv_handler(struct hfi1_packet *packet)
/*
* Handle snooping and capturing packets when sdma is being used.
*/
-int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc)
+int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
{
- pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
+ pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
snoop_dbg("Unsupported Operation");
- return hfi1_verbs_send_dma(qp, ibhdr, hdrwords, ss, len, plen, dwords,
- 0);
+ return hfi1_verbs_send_dma(qp, ps, 0);
}
/*
@@ -1633,12 +1616,16 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
* bypass packets. The only way to send a bypass packet currently is to use the
* diagpkt interface. When that interface is enable snoop/capture is not.
*/
-int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc)
+int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ahg_ib_header *ahdr = qp->s_hdr;
+ u32 hdrwords = qp->s_hdrwords;
+ struct hfi1_sge_state *ss = qp->s_cur_sge;
+ u32 len = qp->s_cur_size;
+ u32 dwords = (len + 3) >> 2;
+ u32 plen = hdrwords + dwords + 2; /* includes pbc */
+ struct hfi1_pportdata *ppd = ps->ppd;
struct snoop_packet *s_packet = NULL;
u32 *hdr = (u32 *)&ahdr->ibh;
u32 length = 0;
@@ -1666,7 +1653,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
/* not using ss->total_len as arg 2 b/c that does not count CRC */
s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
- if (unlikely(s_packet == NULL)) {
+ if (unlikely(!s_packet)) {
dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
goto out;
}
@@ -1783,8 +1770,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
break;
}
out:
- return hfi1_verbs_send_pio(qp, ahdr, hdrwords, ss, len, plen, dwords,
- md.u.pbc);
+ return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
}
/*
@@ -1836,7 +1822,7 @@ void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
s_packet = allocate_snoop_packet(packet_len, 0, md_len);
- if (unlikely(s_packet == NULL)) {
+ if (unlikely(!s_packet)) {
dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
goto inline_pio_out;
}
@@ -1868,5 +1854,4 @@ void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
inline_pio_out:
pio_copy(dd, pbuf, pbc, from, count);
-
}
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c
index ce69141b56cb..8485de1fce08 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/staging/rdma/hfi1/driver.c
@@ -158,7 +158,7 @@ const char *get_unit_name(int unit)
{
static char iname[16];
- snprintf(iname, sizeof(iname), DRIVER_NAME"_%u", unit);
+ snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
return iname;
}
@@ -436,59 +436,58 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
#ifndef CONFIG_PRESCAN_RXQ
static void prescan_rxq(struct hfi1_packet *packet) {}
-#else /* CONFIG_PRESCAN_RXQ */
+#else /* !CONFIG_PRESCAN_RXQ */
static int prescan_receive_queue;
static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
struct hfi1_other_headers *ohdr,
- u64 rhf, struct ib_grh *grh)
+ u64 rhf, u32 bth1, struct ib_grh *grh)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 bth1;
+ u32 rqpn = 0;
+ u16 rlid;
u8 sc5, svc_type;
- int is_fecn, is_becn;
switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
case IB_QPT_UD:
+ rlid = be16_to_cpu(hdr->lrh[3]);
+ rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
svc_type = IB_CC_SVCTYPE_UD;
break;
- case IB_QPT_UC: /* LATER */
- case IB_QPT_RC: /* LATER */
+ case IB_QPT_UC:
+ rlid = qp->remote_ah_attr.dlid;
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ case IB_QPT_RC:
+ rlid = qp->remote_ah_attr.dlid;
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
default:
return;
}
- is_fecn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
- HFI1_FECN_MASK;
- is_becn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
- HFI1_BECN_MASK;
-
sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
if (rhf_dc_info(rhf))
sc5 |= 0x10;
- if (is_fecn) {
- u32 src_qpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
+ if (bth1 & HFI1_FECN_SMASK) {
u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
u16 dlid = be16_to_cpu(hdr->lrh[1]);
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- return_cnp(ibp, qp, src_qpn, pkey, dlid, slid, sc5, grh);
+ return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
}
- if (is_becn) {
+ if (bth1 & HFI1_BECN_SMASK) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ u32 lqpn = bth1 & HFI1_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc5];
- process_becn(ppd, sl, 0, lqpn, 0, svc_type);
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
-
- /* turn off BECN, or FECN */
- bth1 = be32_to_cpu(ohdr->bth[1]);
- bth1 &= ~(HFI1_FECN_MASK << HFI1_FECN_SHIFT);
- bth1 &= ~(HFI1_BECN_MASK << HFI1_BECN_SHIFT);
- ohdr->bth[1] = cpu_to_be32(bth1);
}
struct ps_mdata {
@@ -508,38 +507,51 @@ static inline void init_ps_mdata(struct ps_mdata *mdata,
mdata->rcd = rcd;
mdata->rsize = packet->rsize;
mdata->maxcnt = packet->maxcnt;
+ mdata->ps_head = packet->rhqoff;
- if (rcd->ps_state.initialized == 0) {
- mdata->ps_head = packet->rhqoff;
- rcd->ps_state.initialized++;
- } else
- mdata->ps_head = rcd->ps_state.ps_head;
-
- if (HFI1_CAP_IS_KSET(DMA_RTAIL)) {
- mdata->ps_tail = packet->hdrqtail;
- mdata->ps_seq = 0; /* not used with DMA_RTAIL */
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
+ mdata->ps_tail = get_rcvhdrtail(rcd);
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ mdata->ps_seq = rcd->seq_cnt;
+ else
+ mdata->ps_seq = 0; /* not used with DMA_RTAIL */
} else {
mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
mdata->ps_seq = rcd->seq_cnt;
}
}
-static inline int ps_done(struct ps_mdata *mdata, u64 rhf)
+static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
+ struct hfi1_ctxtdata *rcd)
{
- if (HFI1_CAP_IS_KSET(DMA_RTAIL))
+ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
return mdata->ps_head == mdata->ps_tail;
return mdata->ps_seq != rhf_rcv_seq(rhf);
}
-static inline void update_ps_mdata(struct ps_mdata *mdata)
+static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
+ struct hfi1_ctxtdata *rcd)
{
- struct hfi1_ctxtdata *rcd = mdata->rcd;
+ /*
+ * Control context can potentially receive an invalid rhf.
+ * Drop such packets.
+ */
+ if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
+ return mdata->ps_seq != rhf_rcv_seq(rhf);
+
+ return 0;
+}
+static inline void update_ps_mdata(struct ps_mdata *mdata,
+ struct hfi1_ctxtdata *rcd)
+{
mdata->ps_head += mdata->rsize;
- if (mdata->ps_head > mdata->maxcnt)
+ if (mdata->ps_head >= mdata->maxcnt)
mdata->ps_head = 0;
- rcd->ps_state.ps_head = mdata->ps_head;
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+
+ /* Control context must do seq counting */
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
+ (rcd->ctxt == HFI1_CTRL_CTXT)) {
if (++mdata->ps_seq > 13)
mdata->ps_seq = 1;
}
@@ -571,13 +583,16 @@ static void prescan_rxq(struct hfi1_packet *packet)
struct hfi1_other_headers *ohdr;
struct ib_grh *grh = NULL;
u64 rhf = rhf_to_cpu(rhf_addr);
- u32 etype = rhf_rcv_type(rhf), qpn;
+ u32 etype = rhf_rcv_type(rhf), qpn, bth1;
int is_ecn = 0;
u8 lnh;
- if (ps_done(&mdata, rhf))
+ if (ps_done(&mdata, rhf, rcd))
break;
+ if (ps_skip(&mdata, rhf, rcd))
+ goto next;
+
if (etype != RHF_RCV_TYPE_IB)
goto next;
@@ -593,15 +608,13 @@ static void prescan_rxq(struct hfi1_packet *packet)
} else
goto next; /* just in case */
- is_ecn |= be32_to_cpu(ohdr->bth[1]) &
- (HFI1_FECN_MASK << HFI1_FECN_SHIFT);
- is_ecn |= be32_to_cpu(ohdr->bth[1]) &
- (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
if (!is_ecn)
goto next;
- qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ qpn = bth1 & HFI1_QPN_MASK;
rcu_read_lock();
qp = hfi1_lookup_qpn(ibp, qpn);
@@ -610,14 +623,44 @@ static void prescan_rxq(struct hfi1_packet *packet)
goto next;
}
- process_ecn(qp, hdr, ohdr, rhf, grh);
+ process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
rcu_read_unlock();
+
+ /* turn off BECN, FECN */
+ bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
+ ohdr->bth[1] = cpu_to_be32(bth1);
next:
- update_ps_mdata(&mdata);
+ update_ps_mdata(&mdata, rcd);
}
}
#endif /* CONFIG_PRESCAN_RXQ */
+static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
+{
+ int ret = RCV_PKT_OK;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->numpkt++;
+ if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
+ if (thread) {
+ cond_resched();
+ } else {
+ ret = RCV_PKT_LIMIT;
+ this_cpu_inc(*packet->rcd->dd->rcv_limit);
+ }
+ }
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->dd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+
+ return ret;
+}
+
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret = RCV_PKT_OK;
@@ -721,8 +764,8 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
- if (qp->r_flags & HFI1_R_RSP_NAK) {
- qp->r_flags &= ~HFI1_R_RSP_NAK;
+ if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
+ qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
hfi1_send_rc_ack(rcd, qp, 0);
}
if (qp->r_flags & HFI1_R_RSP_SEND) {
@@ -791,7 +834,6 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
while (last == RCV_PKT_OK) {
last = process_rcv_packet(&packet, thread);
- hdrqtail = get_rcvhdrtail(rcd);
if (packet.rhqoff == hdrqtail)
last = RCV_PKT_DONE;
process_rcv_update(last, &packet);
@@ -806,7 +848,7 @@ static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
{
int i;
- for (i = 0; i < dd->first_user_ctxt; i++)
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
dd->rcd[i]->do_interrupt =
&handle_receive_interrupt_nodma_rtail;
}
@@ -815,7 +857,7 @@ static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
{
int i;
- for (i = 0; i < dd->first_user_ctxt; i++)
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
dd->rcd[i]->do_interrupt =
&handle_receive_interrupt_dma_rtail;
}
@@ -831,12 +873,16 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
{
struct hfi1_devdata *dd = rcd->dd;
u32 hdrqtail;
- int last = RCV_PKT_OK, needset = 1;
+ int needset, last = RCV_PKT_OK;
struct hfi1_packet packet;
+ int skip_pkt = 0;
+
+ /* Control context will always use the slow path interrupt handler */
+ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
init_packet(rcd, &packet);
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
u32 seq = rhf_rcv_seq(packet.rhf);
if (seq != rcd->seq_cnt) {
@@ -851,6 +897,17 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
goto bail;
}
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
+
+ /*
+ * Control context can potentially receive an invalid
+ * rhf. Drop such packets.
+ */
+ if (rcd->ctxt == HFI1_CTRL_CTXT) {
+ u32 seq = rhf_rcv_seq(packet.rhf);
+
+ if (seq != rcd->seq_cnt)
+ skip_pkt = 1;
+ }
}
prescan_rxq(&packet);
@@ -868,11 +925,14 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
dd->rhf_offset;
packet.rhf = rhf_to_cpu(packet.rhf_addr);
+ } else if (skip_pkt) {
+ last = skip_rcv_packet(&packet, thread);
+ skip_pkt = 0;
} else {
last = process_rcv_packet(&packet, thread);
}
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
u32 seq = rhf_rcv_seq(packet.rhf);
if (++rcd->seq_cnt > 13)
@@ -888,6 +948,19 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
} else {
if (packet.rhqoff == hdrqtail)
last = RCV_PKT_DONE;
+ /*
+ * Control context can potentially receive an invalid
+ * rhf. Drop such packets.
+ */
+ if (rcd->ctxt == HFI1_CTRL_CTXT) {
+ u32 seq = rhf_rcv_seq(packet.rhf);
+
+ if (++rcd->seq_cnt > 13)
+ rcd->seq_cnt = 1;
+ if (!last && (seq != rcd->seq_cnt))
+ skip_pkt = 1;
+ }
+
if (needset) {
dd_dev_info(dd,
"Switching to DMA_RTAIL\n");
@@ -1163,20 +1236,20 @@ void handle_eflags(struct hfi1_packet *packet)
struct hfi1_ctxtdata *rcd = packet->rcd;
u32 rte = rhf_rcv_type_err(packet->rhf);
- dd_dev_err(rcd->dd,
- "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
- rcd->ctxt, packet->rhf,
- packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
- packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
- packet->rhf & RHF_DC_ERR ? "dc " : "",
- packet->rhf & RHF_TID_ERR ? "tid " : "",
- packet->rhf & RHF_LEN_ERR ? "len " : "",
- packet->rhf & RHF_ECC_ERR ? "ecc " : "",
- packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
- packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
- rte);
-
rcv_hdrerr(rcd, rcd->ppd, packet);
+ if (rhf_err_flags(packet->rhf))
+ dd_dev_err(rcd->dd,
+ "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
+ rcd->ctxt, packet->rhf,
+ packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
+ packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
+ packet->rhf & RHF_DC_ERR ? "dc " : "",
+ packet->rhf & RHF_TID_ERR ? "tid " : "",
+ packet->rhf & RHF_LEN_ERR ? "len " : "",
+ packet->rhf & RHF_ECC_ERR ? "ecc " : "",
+ packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
+ packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
+ rte);
}
/*
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c
new file mode 100644
index 000000000000..7dc5bae220e0
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/efivar.c
@@ -0,0 +1,169 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "efivar.h"
+
+/* GUID for HFI1 variables in EFI */
+#define HFI1_EFIVAR_GUID EFI_GUID(0xc50a953e, 0xa8b2, 0x42a6, \
+ 0xbf, 0x89, 0xd3, 0x33, 0xa6, 0xe9, 0xe6, 0xd4)
+/* largest EFI data size we expect */
+#define EFI_DATA_SIZE 4096
+
+/*
+ * Read the named EFI variable. Return the size of the actual data in *size
+ * and a kmalloc'ed buffer in *return_data. The caller must free the
+ * data. It is guaranteed that *return_data will be NULL and *size = 0
+ * if this routine fails.
+ *
+ * Return 0 on success, -errno on failure.
+ */
+static int read_efi_var(const char *name, unsigned long *size,
+ void **return_data)
+{
+ efi_status_t status;
+ efi_char16_t *uni_name;
+ efi_guid_t guid;
+ unsigned long temp_size;
+ void *temp_buffer;
+ void *data;
+ int i;
+ int ret;
+
+ /* set failure return values */
+ *size = 0;
+ *return_data = NULL;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -EOPNOTSUPP;
+
+ uni_name = kzalloc(sizeof(efi_char16_t) * (strlen(name) + 1),
+ GFP_KERNEL);
+ temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL);
+
+ if (!uni_name || !temp_buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* input: the size of the buffer */
+ temp_size = EFI_DATA_SIZE;
+
+ /* convert ASCII to unicode - it is a 1:1 mapping */
+ for (i = 0; name[i]; i++)
+ uni_name[i] = name[i];
+
+ /* need a variable for our GUID */
+ guid = HFI1_EFIVAR_GUID;
+
+ /* call into EFI runtime services */
+ status = efi.get_variable(
+ uni_name,
+ &guid,
+ NULL,
+ &temp_size,
+ temp_buffer);
+
+ /*
+ * It would be nice to call efi_status_to_err() here, but that
+ * is in the EFIVAR_FS code and may not be compiled in.
+ * However, even that is insufficient since it does not cover
+ * EFI_BUFFER_TOO_SMALL which could be an important return.
+ * For now, just split out succces or not found.
+ */
+ ret = status == EFI_SUCCESS ? 0 :
+ status == EFI_NOT_FOUND ? -ENOENT :
+ -EINVAL;
+ if (ret)
+ goto fail;
+
+ /*
+ * We have successfully read the EFI variable into our
+ * temporary buffer. Now allocate a correctly sized
+ * buffer.
+ */
+ data = kmalloc(temp_size, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memcpy(data, temp_buffer, temp_size);
+ *size = temp_size;
+ *return_data = data;
+
+fail:
+ kfree(uni_name);
+ kfree(temp_buffer);
+
+ return ret;
+}
+
+/*
+ * Read an HFI1 EFI variable of the form:
+ * <PCIe address>-<kind>
+ * Return an kalloc'ed array and size of the data.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data)
+{
+ char name[64];
+
+ /* create a common prefix */
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x-%s",
+ pci_domain_nr(dd->pcidev->bus),
+ dd->pcidev->bus->number,
+ PCI_SLOT(dd->pcidev->devfn),
+ PCI_FUNC(dd->pcidev->devfn),
+ kind);
+
+ return read_efi_var(name, size, return_data);
+}
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/staging/rdma/hfi1/efivar.h
new file mode 100644
index 000000000000..070706225c51
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/efivar.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _HFI1_EFIVAR_H
+#define _HFI1_EFIVAR_H
+
+#include <linux/efi.h>
+
+#include "hfi.h"
+
+int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
+ unsigned long *size, void **return_data);
+
+#endif /* _HFI1_EFIVAR_H */
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
index b61d3ae93ed1..fb620c97f592 100644
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ b/drivers/staging/rdma/hfi1/eprom.c
@@ -53,17 +53,26 @@
#include "eprom.h"
/*
- * The EPROM is logically divided into two partitions:
+ * The EPROM is logically divided into three partitions:
* partition 0: the first 128K, visible from PCI ROM BAR
- * partition 1: the rest
+ * partition 1: 4K config file (sector size)
+ * partition 2: the rest
*/
#define P0_SIZE (128 * 1024)
+#define P1_SIZE (4 * 1024)
#define P1_START P0_SIZE
+#define P2_START (P0_SIZE + P1_SIZE)
+
+/* erase sizes supported by the controller */
+#define SIZE_4KB (4 * 1024)
+#define MASK_4KB (SIZE_4KB - 1)
-/* largest erase size supported by the controller */
#define SIZE_32KB (32 * 1024)
#define MASK_32KB (SIZE_32KB - 1)
+#define SIZE_64KB (64 * 1024)
+#define MASK_64KB (SIZE_64KB - 1)
+
/* controller page size, in bytes */
#define EP_PAGE_SIZE 256
#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
@@ -75,10 +84,12 @@
#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
+#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
+#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
/* controller interface speeds */
#define EP_SPEED_FULL 0x2 /* full speed */
@@ -188,28 +199,43 @@ static int erase_chip(struct hfi1_devdata *dd)
}
/*
- * Erase a range using the 32KB erase command.
+ * Erase a range.
*/
-static int erase_32kb_range(struct hfi1_devdata *dd, u32 start, u32 end)
+static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
{
+ u32 end = start + len;
int ret = 0;
if (end < start)
return -EINVAL;
- if ((start & MASK_32KB) || (end & MASK_32KB)) {
+ /* check the end points for the minimum erase */
+ if ((start & MASK_4KB) || (end & MASK_4KB)) {
dd_dev_err(dd,
- "%s: non-aligned range (0x%x,0x%x) for a 32KB erase\n",
+ "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
__func__, start, end);
return -EINVAL;
}
write_enable(dd);
- for (; start < end; start += SIZE_32KB) {
+ while (start < end) {
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_32KB(start));
+ /* check in order of largest to smallest */
+ if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
+ write_csr(dd, ASIC_EEP_ADDR_CMD,
+ CMD_SECTOR_ERASE_64KB(start));
+ start += SIZE_64KB;
+ } else if (((start & MASK_32KB) == 0) &&
+ (start + SIZE_32KB <= end)) {
+ write_csr(dd, ASIC_EEP_ADDR_CMD,
+ CMD_SECTOR_ERASE_32KB(start));
+ start += SIZE_32KB;
+ } else { /* 4KB will work */
+ write_csr(dd, ASIC_EEP_ADDR_CMD,
+ CMD_SECTOR_ERASE_4KB(start));
+ start += SIZE_4KB;
+ }
ret = wait_for_not_busy(dd);
if (ret)
goto done;
@@ -309,6 +335,18 @@ done:
return ret;
}
+/* convert an range composite to a length, in bytes */
+static inline u32 extract_rlen(u32 composite)
+{
+ return (composite & 0xffff) * EP_PAGE_SIZE;
+}
+
+/* convert an range composite to a start, in bytes */
+static inline u32 extract_rstart(u32 composite)
+{
+ return (composite >> 16) * EP_PAGE_SIZE;
+}
+
/*
* Perform the given operation on the EPROM. Called from user space. The
* user credentials have already been checked.
@@ -319,6 +357,8 @@ int handle_eprom_command(const struct hfi1_cmd *cmd)
{
struct hfi1_devdata *dd;
u32 dev_id;
+ u32 rlen; /* range length */
+ u32 rstart; /* range start */
int ret = 0;
/*
@@ -364,54 +404,29 @@ int handle_eprom_command(const struct hfi1_cmd *cmd)
sizeof(u32)))
ret = -EFAULT;
break;
+
case HFI1_CMD_EP_ERASE_CHIP:
ret = erase_chip(dd);
break;
- case HFI1_CMD_EP_ERASE_P0:
- if (cmd->len != P0_SIZE) {
- ret = -ERANGE;
- break;
- }
- ret = erase_32kb_range(dd, 0, cmd->len);
- break;
- case HFI1_CMD_EP_ERASE_P1:
- /* check for overflow */
- if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
- ret = -ERANGE;
- break;
- }
- ret = erase_32kb_range(dd, P1_START, P1_START + cmd->len);
- break;
- case HFI1_CMD_EP_READ_P0:
- if (cmd->len != P0_SIZE) {
- ret = -ERANGE;
- break;
- }
- ret = read_length(dd, 0, cmd->len, cmd->addr);
- break;
- case HFI1_CMD_EP_READ_P1:
- /* check for overflow */
- if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
- ret = -ERANGE;
- break;
- }
- ret = read_length(dd, P1_START, cmd->len, cmd->addr);
+
+ case HFI1_CMD_EP_ERASE_RANGE:
+ rlen = extract_rlen(cmd->len);
+ rstart = extract_rstart(cmd->len);
+ ret = erase_range(dd, rstart, rlen);
break;
- case HFI1_CMD_EP_WRITE_P0:
- if (cmd->len > P0_SIZE) {
- ret = -ERANGE;
- break;
- }
- ret = write_length(dd, 0, cmd->len, cmd->addr);
+
+ case HFI1_CMD_EP_READ_RANGE:
+ rlen = extract_rlen(cmd->len);
+ rstart = extract_rstart(cmd->len);
+ ret = read_length(dd, rstart, rlen, cmd->addr);
break;
- case HFI1_CMD_EP_WRITE_P1:
- /* check for overflow */
- if (P1_START + cmd->len > ASIC_EEP_ADDR_CMD_EP_ADDR_MASK) {
- ret = -ERANGE;
- break;
- }
- ret = write_length(dd, P1_START, cmd->len, cmd->addr);
+
+ case HFI1_CMD_EP_WRITE_RANGE:
+ rlen = extract_rlen(cmd->len);
+ rstart = extract_rstart(cmd->len);
+ ret = write_length(dd, rstart, rlen, cmd->addr);
break;
+
default:
dd_dev_err(dd, "%s: unexpected command %d\n",
__func__, cmd->type);
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index aae9826ec62b..d57d549052c8 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -47,20 +47,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
-#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/cdev.h>
-#include <linux/swap.h>
#include <linux/vmalloc.h>
-#include <linux/highmem.h>
#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <asm/pgtable.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/cred.h>
-#include <linux/uio.h>
#include "hfi.h"
#include "pio.h"
@@ -68,6 +58,7 @@
#include "common.h"
#include "trace.h"
#include "user_sdma.h"
+#include "user_exp_rcv.h"
#include "eprom.h"
#undef pr_fmt
@@ -170,18 +161,6 @@ enum mmap_types {
HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
-#define EXP_TID_SET(field, value) \
- (((value) & EXP_TID_TID##field##_MASK) << \
- EXP_TID_TID##field##_SHIFT)
-#define EXP_TID_CLEAR(tid, field) { \
- (tid) &= ~(EXP_TID_TID##field##_MASK << \
- EXP_TID_TID##field##_SHIFT); \
- }
-#define EXP_TID_RESET(tid, field, value) do { \
- EXP_TID_CLEAR(tid, field); \
- (tid) |= EXP_TID_SET(field, value); \
- } while (0)
-
#define dbg(fmt, ...) \
pr_info(fmt, ##__VA_ARGS__)
@@ -204,7 +183,8 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
size_t count, loff_t *offset)
{
const struct hfi1_cmd __user *ucmd;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_cmd cmd;
struct hfi1_user_info uinfo;
struct hfi1_tid_info tinfo;
@@ -254,12 +234,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
break;
case HFI1_CMD_EP_INFO:
case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_P0:
- case HFI1_CMD_EP_ERASE_P1:
- case HFI1_CMD_EP_READ_P0:
- case HFI1_CMD_EP_READ_P1:
- case HFI1_CMD_EP_WRITE_P0:
- case HFI1_CMD_EP_WRITE_P1:
+ case HFI1_CMD_EP_ERASE_RANGE:
+ case HFI1_CMD_EP_READ_RANGE:
+ case HFI1_CMD_EP_WRITE_RANGE:
uctxt_required = 0; /* assigned user context not required */
must_be_root = 1; /* validate user */
copy = 0;
@@ -338,17 +315,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
ret = exp_tid_free(fp, &tinfo);
break;
case HFI1_CMD_RECV_CTRL:
- ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
+ ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
break;
case HFI1_CMD_POLL_TYPE:
uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
break;
case HFI1_CMD_ACK_EVENT:
- ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
+ ret = user_event_ack(uctxt, fd->subctxt, user_val);
break;
case HFI1_CMD_SET_PKEY:
if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
+ ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
else
ret = -EPERM;
break;
@@ -413,12 +390,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
}
case HFI1_CMD_EP_INFO:
case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_P0:
- case HFI1_CMD_EP_ERASE_P1:
- case HFI1_CMD_EP_READ_P0:
- case HFI1_CMD_EP_READ_P1:
- case HFI1_CMD_EP_WRITE_P0:
- case HFI1_CMD_EP_WRITE_P1:
+ case HFI1_CMD_EP_ERASE_RANGE:
+ case HFI1_CMD_EP_READ_RANGE:
+ case HFI1_CMD_EP_WRITE_RANGE:
ret = handle_eprom_command(&cmd);
break;
}
@@ -431,13 +405,13 @@ bail:
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
- struct hfi1_user_sdma_pkt_q *pq;
- struct hfi1_user_sdma_comp_q *cq;
+ struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
+ struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
int ret = 0, done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
- if (!user_sdma_comp_fp(kiocb->ki_filp) ||
- !user_sdma_pkt_fp(kiocb->ki_filp)) {
+ if (!cq || !pq) {
ret = -EIO;
goto done;
}
@@ -448,10 +422,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
}
hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
- ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
- dim);
- pq = user_sdma_pkt_fp(kiocb->ki_filp);
- cq = user_sdma_comp_fp(kiocb->ki_filp);
+ fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
ret = -ENOSPC;
@@ -476,7 +447,8 @@ done:
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
{
- struct hfi1_ctxtdata *uctxt;
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd;
unsigned long flags, pfn;
u64 token = vma->vm_pgoff << PAGE_SHIFT,
@@ -486,7 +458,6 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
int ret = 0;
u16 ctxt;
- uctxt = ctxt_fp(fp);
if (!is_valid_mmap(token) || !uctxt ||
!(vma->vm_flags & VM_SHARED)) {
ret = -EINVAL;
@@ -496,7 +467,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
type = HFI1_MMAP_TOKEN_GET(TYPE, token);
- if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
+ if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
ret = -EINVAL;
goto done;
}
@@ -660,13 +631,12 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vmf = 1;
break;
case SDMA_COMP: {
- struct hfi1_user_sdma_comp_q *cq;
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
- if (!user_sdma_comp_fp(fp)) {
+ if (!cq) {
ret = -EFAULT;
goto done;
}
- cq = user_sdma_comp_fp(fp);
memaddr = (u64)cq->comps;
memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
flags |= VM_IO | VM_DONTEXPAND;
@@ -680,16 +650,16 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
if ((vma->vm_end - vma->vm_start) != memlen) {
hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
- uctxt->ctxt, subctxt_fp(fp),
+ uctxt->ctxt, fd->subctxt,
(vma->vm_end - vma->vm_start), memlen);
ret = -EINVAL;
goto done;
}
vma->vm_flags = flags;
- dd_dev_info(dd,
- "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
- __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
+ hfi1_cdbg(PROC,
+ "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
+ ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
vma->vm_end - vma->vm_start, vma->vm_flags);
pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
if (vmf) {
@@ -730,7 +700,7 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
struct hfi1_ctxtdata *uctxt;
unsigned pollflag;
- uctxt = ctxt_fp(fp);
+ uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
if (!uctxt)
pollflag = POLLERR;
else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
@@ -761,8 +731,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
flush_wc();
/* drain user sdma queue */
- if (fdata->pq)
- hfi1_user_sdma_free_queues(fdata);
+ hfi1_user_sdma_free_queues(fdata);
/*
* Clear any left over, unhandled events so the next process that
@@ -874,6 +843,14 @@ done:
return ret;
}
+/* return true if the device available for general use */
+static int usable_device(struct hfi1_devdata *dd)
+{
+ struct hfi1_pportdata *ppd = dd->pport;
+
+ return driver_lstate(ppd) == IB_PORT_ACTIVE;
+}
+
static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
int devno, unsigned alg)
{
@@ -903,7 +880,11 @@ static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
for (dev = 0; dev < devmax; dev++) {
pdd = hfi1_lookup(dev);
- if (pdd && pdd->freectxts &&
+ if (!pdd)
+ continue;
+ if (!usable_device(pdd))
+ continue;
+ if (pdd->freectxts &&
pdd->freectxts > free) {
dd = pdd;
free = pdd->freectxts;
@@ -912,7 +893,11 @@ static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
} else {
for (dev = 0; dev < devmax; dev++) {
pdd = hfi1_lookup(dev);
- if (pdd && pdd->freectxts) {
+ if (!pdd)
+ continue;
+ if (!usable_device(pdd))
+ continue;
+ if (pdd->freectxts) {
dd = pdd;
break;
}
@@ -930,13 +915,13 @@ static int find_shared_ctxt(struct file *fp,
{
int devmax, ndev, i;
int ret = 0;
+ struct hfi1_filedata *fd = fp->private_data;
devmax = hfi1_count_units(NULL, NULL);
for (ndev = 0; ndev < devmax; ndev++) {
struct hfi1_devdata *dd = hfi1_lookup(ndev);
- /* device portion of usable() */
if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
continue;
for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
@@ -959,10 +944,10 @@ static int find_shared_ctxt(struct file *fp,
ret = -EINVAL;
goto done;
}
- ctxt_fp(fp) = uctxt;
- subctxt_fp(fp) = uctxt->cnt++;
- uctxt->subpid[subctxt_fp(fp)] = current->pid;
- uctxt->active_slaves |= 1 << subctxt_fp(fp);
+ fd->uctxt = uctxt;
+ fd->subctxt = uctxt->cnt++;
+ uctxt->subpid[fd->subctxt] = current->pid;
+ uctxt->active_slaves |= 1 << fd->subctxt;
ret = 1;
goto done;
}
@@ -975,6 +960,7 @@ done:
static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo)
{
+ struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt;
unsigned ctxt;
int ret;
@@ -1011,8 +997,8 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
if (!uctxt->sc)
return -ENOMEM;
- dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
- uctxt->sc->hw_context);
+ hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
+ uctxt->sc->hw_context);
ret = sc_enable(uctxt->sc);
if (ret)
return ret;
@@ -1022,7 +1008,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
* This has to be done here so the rest of the sub-contexts find the
* proper master.
*/
- if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
+ if (uinfo->subctxt_cnt && !fd->subctxt) {
ret = init_subctxts(uctxt, uinfo);
/*
* On error, we don't need to disable and de-allocate the
@@ -1042,7 +1028,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
spin_lock_init(&uctxt->sdma_qlock);
hfi1_stats.sps_ctxts++;
dd->freectxts--;
- ctxt_fp(fp) = uctxt;
+ fd->uctxt = uctxt;
return 0;
}
@@ -1106,7 +1092,8 @@ static int user_init(struct file *fp)
{
int ret;
unsigned int rcvctrl_ops = 0;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
/* make sure that the context has already been setup */
if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
@@ -1118,7 +1105,7 @@ static int user_init(struct file *fp)
* Subctxts don't need to initialize anything since master
* has done it.
*/
- if (subctxt_fp(fp)) {
+ if (fd->subctxt) {
ret = wait_event_interruptible(uctxt->wait,
!test_bit(HFI1_CTXT_MASTER_UNINIT,
&uctxt->event_flags));
@@ -1178,8 +1165,8 @@ done:
static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
{
struct hfi1_ctxt_info cinfo;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
int ret = 0;
memset(&cinfo, 0, sizeof(cinfo));
@@ -1189,7 +1176,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
cinfo.num_active = hfi1_count_active_units();
cinfo.unit = uctxt->dd->unit;
cinfo.ctxt = uctxt->ctxt;
- cinfo.subctxt = subctxt_fp(fp);
+ cinfo.subctxt = fd->subctxt;
cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
uctxt->dd->rcv_entries.group_size) +
uctxt->expected_count;
@@ -1201,10 +1188,10 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
cinfo.egrtids = uctxt->egrbufs.alloced;
cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
- cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
+ cinfo.sdma_ring_size = fd->cq->nentries;
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
- trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
+ trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
ret = -EFAULT;
done:
@@ -1213,7 +1200,8 @@ done:
static int setup_ctxt(struct file *fp)
{
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
@@ -1222,7 +1210,7 @@ static int setup_ctxt(struct file *fp)
* programming of eager buffers. This is done if context sharing
* is not requested or by the master process.
*/
- if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
+ if (!uctxt->subctxt_cnt || !fd->subctxt) {
ret = hfi1_init_ctxt(uctxt->sc);
if (ret)
goto done;
@@ -1234,7 +1222,7 @@ static int setup_ctxt(struct file *fp)
ret = hfi1_setup_eagerbufs(uctxt);
if (ret)
goto done;
- if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
+ if (uctxt->subctxt_cnt && !fd->subctxt) {
ret = setup_subctxt(uctxt);
if (ret)
goto done;
@@ -1277,7 +1265,7 @@ static int setup_ctxt(struct file *fp)
uctxt->tidusemap[uctxt->tidmapcnt - 1] =
~((1ULL << (uctxt->numtidgroups %
BITS_PER_LONG)) - 1);
- trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
+ trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0,
uctxt->tidusemap, uctxt->tidmapcnt);
}
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
@@ -1292,7 +1280,8 @@ done:
static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
{
struct hfi1_base_info binfo;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
ssize_t sz;
unsigned offset;
@@ -1314,50 +1303,50 @@ static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
offset = ((u64)uctxt->sc->hw_free -
(u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
- subctxt_fp(fp), offset);
+ fd->subctxt, offset);
binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
uctxt->sc->base_addr);
binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
uctxt->sc->base_addr);
binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
uctxt->rcvhdrq);
binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
uctxt->egrbufs.rcvtids[0].phys);
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
/*
* user regs are at
* (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
*/
binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
+ HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
sizeof(*dd->events));
binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
offset);
binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
dd->status);
if (HFI1_CAP_IS_USET(DMA_RTAIL))
binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
if (uctxt->subctxt_cnt) {
binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
uctxt->ctxt,
- subctxt_fp(fp), 0);
+ fd->subctxt, 0);
}
sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
if (copy_to_user(ubase, &binfo, sz))
@@ -1368,7 +1357,8 @@ static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
static unsigned int poll_urgent(struct file *fp,
struct poll_table_struct *pt)
{
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned pollflag;
@@ -1390,7 +1380,8 @@ static unsigned int poll_urgent(struct file *fp,
static unsigned int poll_next(struct file *fp,
struct poll_table_struct *pt)
{
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned pollflag;
@@ -1562,7 +1553,8 @@ static inline unsigned num_free_groups(unsigned long map, u16 *start)
static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
{
int ret = 0;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned tid, mapped = 0, npages, ngroups, exp_groups,
tidpairs = uctxt->expected_count / 2;
@@ -1671,8 +1663,8 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
* Now that we know how many free RcvArray entries we have,
* we can pin that many user pages.
*/
- ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
- pinned, pages);
+ ret = hfi1_acquire_user_pages(vaddr + (mapped * PAGE_SIZE),
+ pinned, true, pages);
if (ret) {
/*
* We can't continue because the pages array won't be
@@ -1718,7 +1710,7 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
pages[pmapped], 0,
tidsize, PCI_DMA_FROMDEVICE);
trace_hfi1_exp_rcv_set(uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
tid, vaddr,
phys[pmapped],
pages[pmapped]);
@@ -1763,7 +1755,7 @@ static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
(((useidx & 0xffffff) << 16) |
((bitidx + bits_used) & 0xffffff)));
}
- trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
+ trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0, uctxt->tidusemap,
uctxt->tidmapcnt);
done:
@@ -1792,7 +1784,8 @@ bail:
static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
{
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned long tidmap[uctxt->tidmapcnt];
struct page **pages;
@@ -1828,7 +1821,7 @@ static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
hfi1_put_tid(dd, tid, PT_INVALID,
0, 0);
trace_hfi1_exp_rcv_free(uctxt->ctxt,
- subctxt_fp(fp),
+ fd->subctxt,
tid, phys[i],
pages[i]);
pci_unmap_page(dd->pcidev, phys[i],
@@ -1840,12 +1833,12 @@ static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
}
}
flush_wc();
- hfi1_release_user_pages(pshadow, pcount);
+ hfi1_release_user_pages(pshadow, pcount, true);
clear_bit(bitidx, &uctxt->tidusemap[idx]);
map &= ~(1ULL<<bitidx);
}
}
- trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
+ trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 1, uctxt->tidusemap,
uctxt->tidmapcnt);
done:
return ret;
@@ -1869,7 +1862,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
uctxt->physshadow[tid] = 0;
uctxt->tid_pg_list[tid] = NULL;
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(&p, 1);
+ hfi1_release_user_pages(&p, 1, true);
}
}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c
index b4bdcf341aac..28ae42faa018 100644
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ b/drivers/staging/rdma/hfi1/firmware.c
@@ -68,6 +68,10 @@
#define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
#define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
+#define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
+#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
+#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
+#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
static uint fw_8051_load = 1;
static uint fw_fabric_serdes_load = 1;
@@ -158,7 +162,8 @@ struct firmware_details {
static DEFINE_MUTEX(fw_mutex);
enum fw_state {
FW_EMPTY,
- FW_ACQUIRED,
+ FW_TRY,
+ FW_FINAL,
FW_ERR
};
static enum fw_state fw_state = FW_EMPTY;
@@ -428,8 +433,8 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
if (ret) {
- dd_dev_err(dd, "cannot load firmware \"%s\", err %d\n",
- name, ret);
+ dd_dev_err(dd, "cannot find firmware \"%s\", err %d\n",
+ name, ret);
return ret;
}
@@ -539,28 +544,53 @@ done:
static void dispose_one_firmware(struct firmware_details *fdet)
{
release_firmware(fdet->fw);
- fdet->fw = NULL;
+ /* erase all previous information */
+ memset(fdet, 0, sizeof(*fdet));
}
/*
- * Called by all HFIs when loading their firmware - i.e. device probe time.
- * The first one will do the actual firmware load. Use a mutex to resolve
- * any possible race condition.
+ * Obtain the 4 firmwares from the OS. All must be obtained at once or not
+ * at all. If called with the firmware state in FW_TRY, use alternate names.
+ * On exit, this routine will have set the firmware state to one of FW_TRY,
+ * FW_FINAL, or FW_ERR.
*
- * The call to this routine cannot be moved to driver load because the kernel
- * call request_firmware() requires a device which is only available after
- * the first device probe.
+ * Must be holding fw_mutex.
*/
-static int obtain_firmware(struct hfi1_devdata *dd)
+static void __obtain_firmware(struct hfi1_devdata *dd)
{
int err = 0;
- mutex_lock(&fw_mutex);
- if (fw_state == FW_ACQUIRED) {
- goto done; /* already acquired */
- } else if (fw_state == FW_ERR) {
- err = fw_err;
- goto done; /* already tried and failed */
+ if (fw_state == FW_FINAL) /* nothing more to obtain */
+ return;
+ if (fw_state == FW_ERR) /* already in error */
+ return;
+
+ /* fw_state is FW_EMPTY or FW_TRY */
+retry:
+ if (fw_state == FW_TRY) {
+ /*
+ * We tried the original and it failed. Move to the
+ * alternate.
+ */
+ dd_dev_info(dd, "using alternate firmware names\n");
+ /*
+ * Let others run. Some systems, when missing firmware, does
+ * something that holds for 30 seconds. If we do that twice
+ * in a row it triggers task blocked warning.
+ */
+ cond_resched();
+ if (fw_8051_load)
+ dispose_one_firmware(&fw_8051);
+ if (fw_fabric_serdes_load)
+ dispose_one_firmware(&fw_fabric);
+ if (fw_sbus_load)
+ dispose_one_firmware(&fw_sbus);
+ if (fw_pcie_serdes_load)
+ dispose_one_firmware(&fw_pcie);
+ fw_8051_name = ALT_FW_8051_NAME_ASIC;
+ fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
+ fw_sbus_name = ALT_FW_SBUS_NAME;
+ fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
}
if (fw_8051_load) {
@@ -588,27 +618,82 @@ static int obtain_firmware(struct hfi1_devdata *dd)
goto done;
}
+done:
+ if (err) {
+ /* oops, had problems obtaining a firmware */
+ if (fw_state == FW_EMPTY) {
+ /* retry with alternate */
+ fw_state = FW_TRY;
+ goto retry;
+ }
+ fw_state = FW_ERR;
+ fw_err = -ENOENT;
+ } else {
+ /* success */
+ if (fw_state == FW_EMPTY)
+ fw_state = FW_TRY; /* may retry later */
+ else
+ fw_state = FW_FINAL; /* cannot try again */
+ }
+}
+
+/*
+ * Called by all HFIs when loading their firmware - i.e. device probe time.
+ * The first one will do the actual firmware load. Use a mutex to resolve
+ * any possible race condition.
+ *
+ * The call to this routine cannot be moved to driver load because the kernel
+ * call request_firmware() requires a device which is only available after
+ * the first device probe.
+ */
+static int obtain_firmware(struct hfi1_devdata *dd)
+{
+ unsigned long timeout;
+ int err = 0;
+
+ mutex_lock(&fw_mutex);
+
+ /* 40s delay due to long delay on missing firmware on some systems */
+ timeout = jiffies + msecs_to_jiffies(40000);
+ while (fw_state == FW_TRY) {
+ /*
+ * Another device is trying the firmware. Wait until it
+ * decides what works (or not).
+ */
+ if (time_after(jiffies, timeout)) {
+ /* waited too long */
+ dd_dev_err(dd, "Timeout waiting for firmware try");
+ fw_state = FW_ERR;
+ fw_err = -ETIMEDOUT;
+ break;
+ }
+ mutex_unlock(&fw_mutex);
+ msleep(20); /* arbitrary delay */
+ mutex_lock(&fw_mutex);
+ }
+ /* not in FW_TRY state */
+
+ if (fw_state == FW_FINAL)
+ goto done; /* already acquired */
+ else if (fw_state == FW_ERR)
+ goto done; /* already tried and failed */
+ /* fw_state is FW_EMPTY */
+
+ /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
+ __obtain_firmware(dd);
+
if (platform_config_load) {
platform_config = NULL;
err = request_firmware(&platform_config, platform_config_name,
&dd->pcidev->dev);
- if (err) {
- err = 0;
+ if (err)
platform_config = NULL;
- }
}
- /* success */
- fw_state = FW_ACQUIRED;
-
done:
- if (err) {
- fw_err = err;
- fw_state = FW_ERR;
- }
mutex_unlock(&fw_mutex);
- return err;
+ return fw_err;
}
/*
@@ -638,6 +723,38 @@ void dispose_firmware(void)
}
/*
+ * Called with the result of a firmware download.
+ *
+ * Return 1 to retry loading the firmware, 0 to stop.
+ */
+static int retry_firmware(struct hfi1_devdata *dd, int load_result)
+{
+ int retry;
+
+ mutex_lock(&fw_mutex);
+
+ if (load_result == 0) {
+ /*
+ * The load succeeded, so expect all others to do the same.
+ * Do not retry again.
+ */
+ if (fw_state == FW_TRY)
+ fw_state = FW_FINAL;
+ retry = 0; /* do NOT retry */
+ } else if (fw_state == FW_TRY) {
+ /* load failed, obtain alternate firmware */
+ __obtain_firmware(dd);
+ retry = (fw_state == FW_FINAL);
+ } else {
+ /* else in FW_FINAL or FW_ERR, no retry in either case */
+ retry = 0;
+ }
+
+ mutex_unlock(&fw_mutex);
+ return retry;
+}
+
+/*
* Write a block of data to a given array CSR. All calls will be in
* multiples of 8 bytes.
*/
@@ -951,7 +1068,7 @@ void sbus_request(struct hfi1_devdata *dd,
static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
{
/* only needed on A0 */
- if (!is_a0(dd))
+ if (!is_ax(dd))
return;
dd_dev_info(dd, "Turning off spicos:%s%s\n",
@@ -1248,7 +1365,9 @@ int load_firmware(struct hfi1_devdata *dd)
fabric_serdes_addrs[dd->hfi1_id],
NUM_FABRIC_SERDES);
turn_off_spicos(dd, SPICO_FABRIC);
- ret = load_fabric_serdes_firmware(dd, &fw_fabric);
+ do {
+ ret = load_fabric_serdes_firmware(dd, &fw_fabric);
+ } while (retry_firmware(dd, ret));
clear_sbus_fast_mode(dd);
release_hw_mutex(dd);
@@ -1257,7 +1376,9 @@ int load_firmware(struct hfi1_devdata *dd)
}
if (fw_8051_load) {
- ret = load_8051_firmware(dd, &fw_8051);
+ do {
+ ret = load_8051_firmware(dd, &fw_8051);
+ } while (retry_firmware(dd, ret));
if (ret)
return ret;
}
@@ -1568,9 +1689,11 @@ int load_pcie_firmware(struct hfi1_devdata *dd)
/* both firmware loads below use the SBus */
set_sbus_fast_mode(dd);
- if (fw_sbus_load && (dd->flags & HFI1_DO_INIT_ASIC)) {
+ if (fw_sbus_load) {
turn_off_spicos(dd, SPICO_SBUS);
- ret = load_sbus_firmware(dd, &fw_sbus);
+ do {
+ ret = load_sbus_firmware(dd, &fw_sbus);
+ } while (retry_firmware(dd, ret));
if (ret)
goto done;
}
@@ -1581,7 +1704,9 @@ int load_pcie_firmware(struct hfi1_devdata *dd)
pcie_serdes_broadcast[dd->hfi1_id],
pcie_serdes_addrs[dd->hfi1_id],
NUM_PCIE_SERDES);
- ret = load_pcie_serdes_firmware(dd, &fw_pcie);
+ do {
+ ret = load_pcie_serdes_firmware(dd, &fw_pcie);
+ } while (retry_firmware(dd, ret));
if (ret)
goto done;
}
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h
index 190f7a2f6773..2611bb2e764d 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/staging/rdma/hfi1/hfi.h
@@ -100,6 +100,26 @@ extern unsigned long hfi1_cap_mask;
HFI1_CAP_MISC_MASK)
/*
+ * Control context is always 0 and handles the error packets.
+ * It also handles the VL15 and multicast packets.
+ */
+#define HFI1_CTRL_CTXT 0
+
+/*
+ * Driver context will store software counters for each of the events
+ * associated with these status registers
+ */
+#define NUM_CCE_ERR_STATUS_COUNTERS 41
+#define NUM_RCV_ERR_STATUS_COUNTERS 64
+#define NUM_MISC_ERR_STATUS_COUNTERS 13
+#define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
+#define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
+#define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
+#define NUM_SEND_ERR_STATUS_COUNTERS 3
+#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
+#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
+
+/*
* per driver stats, either not device nor port-specific, or
* summed over all of the devices and ports.
* They are described by name via ipathfs filesystem, so layout
@@ -139,15 +159,6 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
struct hfi1_opcode_stats_perctx;
#endif
-/*
- * struct ps_state keeps state associated with RX queue "prescanning"
- * (prescanning for FECNs, and BECNs), if prescanning is in use.
- */
-struct ps_state {
- u32 ps_head;
- int initialized;
-};
-
struct ctxt_eager_bufs {
ssize_t size; /* total size of eager buffers */
u32 count; /* size of buffers array */
@@ -243,7 +254,7 @@ struct hfi1_ctxtdata {
/* chip offset of PIO buffers for this ctxt */
u32 piobufs;
/* per-context configuration flags */
- u16 flags;
+ u32 flags;
/* per-context event flags for fileops/intr communication */
unsigned long event_flags;
/* WAIT_RCV that timed out, no interrupt */
@@ -302,10 +313,6 @@ struct hfi1_ctxtdata {
struct list_head sdma_queues;
spinlock_t sdma_qlock;
-#ifdef CONFIG_PRESCAN_RXQ
- struct ps_state ps_state;
-#endif /* CONFIG_PRESCAN_RXQ */
-
/*
* The interrupt handler for a particular receive context can vary
* throughout it's lifetime. This is not a lock protected data member so
@@ -706,6 +713,8 @@ struct hfi1_pportdata {
u64 link_downed;
/* number of times link retrained successfully */
u64 link_up;
+ /* number of times a link unknown frame was reported */
+ u64 unknown_frame_count;
/* port_ltp_crc_mode is returned in 'portinfo' MADs */
u16 port_ltp_crc_mode;
/* port_crc_mode_enabled is the crc we support */
@@ -1053,6 +1062,26 @@ struct hfi1_devdata {
atomic_t drop_packet;
u8 do_drop;
+ /*
+ * Software counters for the status bits defined by the
+ * associated error status registers
+ */
+ u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
+ u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
+ u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
+ u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
+ u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
+ u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
+ u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
+
+ /* Software counter that spans all contexts */
+ u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
+ /* Software counter that spans all DMA engines */
+ u64 sw_send_dma_eng_err_status_cnt[
+ NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
+ /* Software counter that aggregates all cce_err_status errors */
+ u64 sw_cce_err_status_aggregate;
+
/* receive interrupt functions */
rhf_rcv_function_ptr *rhf_rcv_function_map;
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
@@ -1061,12 +1090,10 @@ struct hfi1_devdata {
* Handlers for outgoing data so that snoop/capture does not
* have to have its hooks in the send path
*/
- int (*process_pio_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
- u32 hdrwords, struct hfi1_sge_state *ss,
- u32 len, u32 plen, u32 dwords, u64 pbc);
- int (*process_dma_send)(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
- u32 hdrwords, struct hfi1_sge_state *ss,
- u32 len, u32 plen, u32 dwords, u64 pbc);
+ int (*process_pio_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
+ int (*process_dma_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
@@ -1084,6 +1111,10 @@ struct hfi1_devdata {
/* Save the enabled LCB error bits */
u64 lcb_err_en;
u8 dc_shutdown;
+
+ /* receive context tail dummy address */
+ __le64 *rcvhdrtail_dummy_kvaddr;
+ dma_addr_t rcvhdrtail_dummy_physaddr;
};
/* 8051 firmware version helper */
@@ -1414,27 +1445,13 @@ void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc);
-int snoop_send_pio_handler(struct hfi1_qp *qp, struct ahg_ib_header *ibhdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc);
+int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
+int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
-/* for use in system calls, where we want to know device type, etc. */
-#define ctxt_fp(fp) \
- (((struct hfi1_filedata *)(fp)->private_data)->uctxt)
-#define subctxt_fp(fp) \
- (((struct hfi1_filedata *)(fp)->private_data)->subctxt)
-#define tidcursor_fp(fp) \
- (((struct hfi1_filedata *)(fp)->private_data)->tidcursor)
-#define user_sdma_pkt_fp(fp) \
- (((struct hfi1_filedata *)(fp)->private_data)->pq)
-#define user_sdma_comp_fp(fp) \
- (((struct hfi1_filedata *)(fp)->private_data)->cq)
-
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
{
return ppd->dd;
@@ -1570,8 +1587,8 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val);
*/
#define DEFAULT_RCVHDR_ENTSIZE 32
-int hfi1_get_user_pages(unsigned long, size_t, struct page **);
-void hfi1_release_user_pages(struct page **, size_t);
+int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
+void hfi1_release_user_pages(struct page **, size_t, bool);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
@@ -1612,7 +1629,6 @@ void hfi1_pcie_flr(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *);
void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
void hfi1_enable_intx(struct pci_dev *);
-void hfi1_nomsix(struct hfi1_devdata *);
void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
@@ -1649,7 +1665,7 @@ void update_sge(struct hfi1_sge_state *ss, u32 length);
extern unsigned int hfi1_max_mtu;
extern unsigned int hfi1_cu;
extern unsigned int user_credit_return_threshold;
-extern uint num_rcv_contexts;
+extern int num_user_contexts;
extern unsigned n_krcvqs;
extern u8 krcvqs[];
extern int krcvqsset;
@@ -1713,7 +1729,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
else
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
- if (is_a0(dd))
+ if (is_ax(dd))
/* turn off send-side job key checks - A0 erratum */
return base_sc_integrity &
~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
@@ -1740,7 +1756,7 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
| SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
- if (is_a0(dd))
+ if (is_ax(dd))
/* turn off send-side job key checks - A0 erratum */
return base_sdma_integrity &
~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c
index 8666f3ad24e9..4dd8051aba7e 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/staging/rdma/hfi1/init.c
@@ -60,6 +60,7 @@
#include "hfi.h"
#include "device.h"
#include "common.h"
+#include "trace.h"
#include "mad.h"
#include "sdma.h"
#include "debugfs.h"
@@ -81,15 +82,15 @@
* Number of user receive contexts we are configured to use (to allow for more
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
*/
-uint num_rcv_contexts;
-module_param_named(num_rcv_contexts, num_rcv_contexts, uint, S_IRUGO);
+int num_user_contexts = -1;
+module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
MODULE_PARM_DESC(
- num_rcv_contexts, "Set max number of user receive contexts to use");
+ num_user_contexts, "Set max number of user contexts to use");
u8 krcvqs[RXE_NUM_DATA_VL];
int krcvqsset;
module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO);
-MODULE_PARM_DESC(krcvqs, "Array of the number of kernel receive queues by VL");
+MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
/* computed based on above array */
unsigned n_krcvqs;
@@ -112,7 +113,7 @@ MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (
unsigned int user_credit_return_threshold = 33; /* default is 33% */
module_param(user_credit_return_threshold, uint, S_IRUGO);
-MODULE_PARM_DESC(user_credit_return_theshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
+MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
static inline u64 encode_rcv_header_entry_size(u16);
@@ -129,6 +130,9 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
int ret;
int local_node_id = pcibus_to_node(dd->pcidev->bus);
+ /* Control context has to be always 0 */
+ BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
+
if (local_node_id < 0)
local_node_id = numa_node_id();
dd->assigned_node_id = local_node_id;
@@ -158,6 +162,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
HFI1_CAP_KGET(NODROP_RHQ_FULL) |
HFI1_CAP_KGET(NODROP_EGR_FULL) |
HFI1_CAP_KGET(DMA_RTAIL);
+
+ /* Control context must use DMA_RTAIL */
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ rcd->flags |= HFI1_CAP_DMA_RTAIL;
rcd->seq_cnt = 1;
rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
@@ -208,7 +216,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
if (rcd) {
u32 rcvtids, max_entries;
- dd_dev_info(dd, "%s: setting up context %u\n", __func__, ctxt);
+ hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
INIT_LIST_HEAD(&rcd->qp_wait_list);
rcd->ppd = ppd;
@@ -279,8 +287,9 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
rcd->ctxt);
rcd->egrbufs.count = MAX_EAGER_ENTRIES;
}
- dd_dev_info(dd, "ctxt%u: max Eager buffer RcvArray entries: %u\n",
- rcd->ctxt, rcd->egrbufs.count);
+ hfi1_cdbg(PROC,
+ "ctxt%u: max Eager buffer RcvArray entries: %u\n",
+ rcd->ctxt, rcd->egrbufs.count);
/*
* Allocate array that will hold the eager buffer accounting
@@ -308,8 +317,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
*/
if (rcd->egrbufs.size < hfi1_max_mtu) {
rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
- dd_dev_info(dd,
- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
+ hfi1_cdbg(PROC,
+ "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
rcd->ctxt, rcd->egrbufs.size);
}
rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
@@ -601,20 +610,19 @@ static int create_workqueues(struct hfi1_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (!ppd->hfi1_wq) {
- char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
-
- snprintf(wq_name, sizeof(wq_name), "hfi%d_%d",
- dd->unit, pidx);
ppd->hfi1_wq =
- create_singlethread_workqueue(wq_name);
+ alloc_workqueue(
+ "hfi%d_%d",
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ dd->num_sdma,
+ dd->unit, pidx);
if (!ppd->hfi1_wq)
goto wq_error;
}
}
return 0;
wq_error:
- pr_err("create_singlethread_workqueue failed for port %d\n",
- pidx + 1);
+ pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
if (ppd->hfi1_wq) {
@@ -670,7 +678,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
dd->process_dma_send = hfi1_verbs_send_dma;
dd->pio_inline_send = pio_copy;
- if (is_a0(dd)) {
+ if (is_ax(dd)) {
atomic_set(&dd->drop_packet, DROP_PACKET_ON);
dd->do_drop = 1;
} else {
@@ -691,6 +699,18 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (ret)
goto done;
+ /* allocate dummy tail memory for all receive contexts */
+ dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
+ &dd->pcidev->dev, sizeof(u64),
+ &dd->rcvhdrtail_dummy_physaddr,
+ GFP_KERNEL);
+
+ if (!dd->rcvhdrtail_dummy_kvaddr) {
+ dd_dev_err(dd, "cannot allocate dummy tail memory\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
/* dd->rcd can be NULL if early initialization failed */
for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
/*
@@ -1266,6 +1286,14 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
tmp = dd->rcd;
dd->rcd = NULL;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ if (dd->rcvhdrtail_dummy_kvaddr) {
+ dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+ (void *)dd->rcvhdrtail_dummy_kvaddr,
+ dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
+ }
+
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
struct hfi1_ctxtdata *rcd = tmp[ctxt];
@@ -1308,6 +1336,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
struct hfi1_devdata *dd = NULL;
+ struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
HFI1_CAP_LOCK();
@@ -1322,6 +1351,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
hfi1_hdrq_entsize);
+ ret = -EINVAL;
goto bail;
}
@@ -1403,8 +1433,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (initfail || ret) {
stop_timers(dd);
flush_workqueue(ib_wq);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
hfi1_quiet_serdes(dd->pport + pidx);
+ ppd = dd->pport + pidx;
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ }
if (!j)
hfi1_device_remove(dd);
if (!ret)
@@ -1521,6 +1557,14 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
<< RCV_HDR_SIZE_HDR_SIZE_SHIFT;
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
+
+ /*
+ * Program dummy tail address for every receive context
+ * before enabling any receive context
+ */
+ write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
+ dd->rcvhdrtail_dummy_physaddr);
+
return 0;
bail_free:
@@ -1660,9 +1704,11 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.numbufs = idx;
rcd->egrbufs.size = alloced_bytes;
- dd_dev_info(dd, "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
- rcd->egrbufs.size);
+ hfi1_cdbg(PROC,
+ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
+ rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
+ rcd->egrbufs.size);
+
/*
* Set the contexts rcv array head update threshold to the closest
@@ -1683,13 +1729,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
rcd->expected_base = rcd->eager_base + egrtop;
- dd_dev_info(dd, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
- rcd->eager_base, rcd->expected_base);
+ hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
+ rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
+ rcd->eager_base, rcd->expected_base);
if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
- dd_dev_err(dd, "ctxt%u: current Eager buffer size is invalid %u\n",
- rcd->ctxt, rcd->egrbufs.rcvtid_size);
+ hfi1_cdbg(PROC,
+ "ctxt%u: current Eager buffer size is invalid %u\n",
+ rcd->ctxt, rcd->egrbufs.rcvtid_size);
ret = -EINVAL;
goto bail;
}
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h
index fa361b405851..e8ba5606d08d 100644
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ b/drivers/staging/rdma/hfi1/iowait.h
@@ -150,12 +150,14 @@ static inline void iowait_init(
* iowait_schedule() - initialize wait structure
* @wait: wait struct to schedule
* @wq: workqueue for schedule
+ * @cpu: cpu
*/
static inline void iowait_schedule(
struct iowait *wait,
- struct workqueue_struct *wq)
+ struct workqueue_struct *wq,
+ int cpu)
{
- queue_work(wq, &wait->iowork);
+ queue_work_on(cpu, wq, &wait->iowork);
}
/**
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 32f703736185..4f5dbd14b5de 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -84,7 +84,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent;
- struct ib_smp *smp;
+ struct opa_smp *smp;
int ret;
unsigned long flags;
unsigned long timeout;
@@ -117,15 +117,15 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
return;
smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
+ smp->base_version = OPA_MGMT_BASE_VERSION;
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = 1;
+ smp->class_version = OPA_SMI_CLASS_VERSION;
smp->method = IB_MGMT_METHOD_TRAP;
ibp->tid++;
smp->tid = cpu_to_be64(ibp->tid);
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
- memcpy(smp->data, data, len);
+ memcpy(smp->route.lid.data, data, len);
spin_lock_irqsave(&ibp->lock, flags);
if (!ibp->sm_ah) {
@@ -164,11 +164,16 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
* Send a bad [PQ]_Key trap (ch. 14.3.8).
*/
void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+ u32 qp1, u32 qp2, u16 lid1, u16 lid2)
{
- struct ib_mad_notice_attr data;
+ struct opa_mad_notice_attr data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+ u32 _lid1 = lid1;
+ u32 _lid2 = lid2;
- if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
+ memset(&data, 0, sizeof(data));
+
+ if (trap_num == OPA_TRAP_BAD_P_KEY)
ibp->pkey_violations++;
else
ibp->qkey_violations++;
@@ -176,17 +181,15 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = trap_num;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_257_258.lid1 = lid1;
- data.details.ntc_257_258.lid2 = lid2;
- data.details.ntc_257_258.key = cpu_to_be32(key);
- data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
- data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
+ data.issuer_lid = cpu_to_be32(lid);
+ data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
+ data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
+ data.ntc_257_258.key = cpu_to_be32(key);
+ data.ntc_257_258.sl = sl << 3;
+ data.ntc_257_258.qp1 = cpu_to_be32(qp1);
+ data.ntc_257_258.qp2 = cpu_to_be32(qp2);
send_trap(ibp, &data, sizeof(data));
}
@@ -197,32 +200,30 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
__be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
{
- struct ib_mad_notice_attr data;
+ struct opa_mad_notice_attr data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+ memset(&data, 0, sizeof(data));
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_256.lid = data.issuer_lid;
- data.details.ntc_256.method = mad->method;
- data.details.ntc_256.attr_id = mad->attr_id;
- data.details.ntc_256.attr_mod = mad->attr_mod;
- data.details.ntc_256.mkey = mkey;
+ data.trap_num = OPA_TRAP_BAD_M_KEY;
+ data.issuer_lid = cpu_to_be32(lid);
+ data.ntc_256.lid = data.issuer_lid;
+ data.ntc_256.method = mad->method;
+ data.ntc_256.attr_id = mad->attr_id;
+ data.ntc_256.attr_mod = mad->attr_mod;
+ data.ntc_256.mkey = mkey;
if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-
- data.details.ntc_256.dr_slid = (__force __be16)dr_slid;
- data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
- data.details.ntc_256.dr_trunc_hop |=
+ data.ntc_256.dr_slid = dr_slid;
+ data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
+ data.ntc_256.dr_trunc_hop |=
IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
+ hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
}
- data.details.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.details.ntc_256.dr_rtn_path, return_path,
+ data.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(data.ntc_256.dr_rtn_path, return_path,
hop_cnt);
}
@@ -234,17 +235,17 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
*/
void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
{
- struct ib_mad_notice_attr data;
+ struct opa_mad_notice_attr data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ memset(&data, 0, sizeof(data));
data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
+ data.issuer_lid = cpu_to_be32(lid);
+ data.ntc_144.lid = data.issuer_lid;
+ data.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
send_trap(ibp, &data, sizeof(data));
}
@@ -254,17 +255,17 @@ void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
*/
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
{
- struct ib_mad_notice_attr data;
+ struct opa_mad_notice_attr data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ memset(&data, 0, sizeof(data));
data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_145.lid = data.issuer_lid;
- data.details.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
+ data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
+ data.issuer_lid = cpu_to_be32(lid);
+ data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
+ data.ntc_145.lid = data.issuer_lid;
send_trap(ibp, &data, sizeof(data));
}
@@ -274,18 +275,18 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
*/
void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
{
- struct ib_mad_notice_attr data;
+ struct opa_mad_notice_attr data;
+ u32 lid = ppd_from_ibp(ibp)->lid;
+
+ memset(&data, 0, sizeof(data));
data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.local_changes = 1;
- data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
+ data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
+ data.issuer_lid = cpu_to_be32(lid);
+ data.ntc_144.lid = data.issuer_lid;
+ data.ntc_144.change_flags =
+ cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
send_trap(ibp, &data, sizeof(data));
}
@@ -2076,13 +2077,20 @@ struct opa_aggregate {
u8 data[0];
};
-/* Request contains first two fields, response contains those plus the rest */
+#define MSK_LLI 0x000000f0
+#define MSK_LLI_SFT 4
+#define MSK_LER 0x0000000f
+#define MSK_LER_SFT 0
+#define ADD_LLI 8
+#define ADD_LER 2
+
+/* Request contains first three fields, response contains those plus the rest */
struct opa_port_data_counters_msg {
__be64 port_select_mask[4];
__be32 vl_select_mask;
+ __be32 resolution;
/* Response fields follow */
- __be32 reserved1;
struct _port_dctrs {
u8 port_number;
u8 reserved2[3];
@@ -2271,34 +2279,8 @@ static void a0_portstatus(struct hfi1_pportdata *ppd,
{
if (!is_bx(ppd->dd)) {
unsigned long vl;
- int vfi = 0;
u64 max_vl_xmit_wait = 0, tmp;
u32 vl_all_mask = VL_MASK_ALL;
- u64 rcv_data, rcv_bubble;
-
- rcv_data = be64_to_cpu(rsp->port_rcv_data);
- rcv_bubble = be64_to_cpu(rsp->port_rcv_bubble);
- /* In the measured time period, calculate the total number
- * of flits that were received. Subtract out one false
- * rcv_bubble increment for every 32 received flits but
- * don't let the number go negative.
- */
- if (rcv_bubble >= (rcv_data>>5)) {
- rcv_bubble -= (rcv_data>>5);
- rsp->port_rcv_bubble = cpu_to_be64(rcv_bubble);
- }
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
- rcv_data = be64_to_cpu(rsp->vls[vfi].port_vl_rcv_data);
- rcv_bubble =
- be64_to_cpu(rsp->vls[vfi].port_vl_rcv_bubble);
- if (rcv_bubble >= (rcv_data>>5)) {
- rcv_bubble -= (rcv_data>>5);
- rsp->vls[vfi].port_vl_rcv_bubble =
- cpu_to_be64(rcv_bubble);
- }
- vfi++;
- }
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
8 * sizeof(vl_all_mask)) {
@@ -2363,8 +2345,6 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
CNTR_INVALID_VL));
rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
CNTR_INVALID_VL));
- rsp->port_rcv_bubble =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL));
rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
CNTR_INVALID_VL));
rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
@@ -2434,9 +2414,6 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
- rsp->vls[vfi].port_vl_rcv_bubble =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL_VL,
- idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_pkts =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
@@ -2474,7 +2451,8 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
-static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port)
+static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
+ u8 res_lli, u8 res_ler)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -2490,14 +2468,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port)
CNTR_INVALID_VL);
error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_TX_REPLAY,
- CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_RX_REPLAY,
- CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_SEQ_CRC_CNT,
- CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL);
+ /* local link integrity must be right-shifted by the lli resolution */
+ tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
+ tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
+ error_counter_summary += (tmp >> res_lli);
+ /* link error recovery must b right-shifted by the ler resolution */
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
+ error_counter_summary += (tmp >> res_ler);
error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
CNTR_INVALID_VL);
error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
@@ -2519,32 +2497,8 @@ static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
if (!is_bx(dd)) {
unsigned long vl;
int vfi = 0;
- u64 rcv_data, rcv_bubble, sum_vl_xmit_wait = 0;
-
- rcv_data = be64_to_cpu(rsp->port_rcv_data);
- rcv_bubble = be64_to_cpu(rsp->port_rcv_bubble);
- /* In the measured time period, calculate the total number
- * of flits that were received. Subtract out one false
- * rcv_bubble increment for every 32 received flits but
- * don't let the number go negative.
- */
- if (rcv_bubble >= (rcv_data>>5)) {
- rcv_bubble -= (rcv_data>>5);
- rsp->port_rcv_bubble = cpu_to_be64(rcv_bubble);
- }
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
- rcv_data = be64_to_cpu(rsp->vls[vfi].port_vl_rcv_data);
- rcv_bubble =
- be64_to_cpu(rsp->vls[vfi].port_vl_rcv_bubble);
- if (rcv_bubble >= (rcv_data>>5)) {
- rcv_bubble -= (rcv_data>>5);
- rsp->vls[vfi].port_vl_rcv_bubble =
- cpu_to_be64(rcv_bubble);
- }
- vfi++;
- }
- vfi = 0;
+ u64 sum_vl_xmit_wait = 0;
+
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
8 * sizeof(vl_select_mask)) {
u64 tmp = sum_vl_xmit_wait +
@@ -2575,6 +2529,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u32 num_ports;
u8 num_pslm;
u8 lq, num_vls;
+ u8 res_lli, res_ler;
u64 port_mask;
unsigned long port_num;
unsigned long vl;
@@ -2585,6 +2540,10 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
+ res_lli = res_lli ? res_lli + ADD_LLI : 0;
+ res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
+ res_ler = res_ler ? res_ler + ADD_LER : 0;
if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -2635,8 +2594,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
CNTR_INVALID_VL));
rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
CNTR_INVALID_VL));
- rsp->port_rcv_bubble =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL));
rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
CNTR_INVALID_VL));
rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
@@ -2655,7 +2612,8 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
rsp->port_error_counter_summary =
- cpu_to_be64(get_error_counter_summary(ibdev, port));
+ cpu_to_be64(get_error_counter_summary(ibdev, port,
+ res_lli, res_ler));
vlinfo = &(rsp->vls[0]);
vfi = 0;
@@ -2675,9 +2633,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
rsp->vls[vfi].port_vl_rcv_data =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
idx_from_vl(vl)));
- rsp->vls[vfi].port_vl_rcv_bubble =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BBL_VL,
- idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_pkts =
cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
@@ -3458,7 +3413,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 nport = OPA_AM_NPORT(am);
u64 reg;
- if (nport != 1 || OPA_AM_PORTNUM(am)) {
+ if (nport != 1) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -3483,7 +3438,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 nport = OPA_AM_NPORT(am);
int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
- if (nport != 1 || OPA_AM_PORTNUM(am)) {
+ if (nport != 1) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h
index 47457501c044..f0317750e2fc 100644
--- a/drivers/staging/rdma/hfi1/mad.h
+++ b/drivers/staging/rdma/hfi1/mad.h
@@ -60,7 +60,121 @@
#endif
#include "opa_compat.h"
+/*
+ * OPA Traps
+ */
+#define OPA_TRAP_GID_NOW_IN_SERVICE cpu_to_be16(64)
+#define OPA_TRAP_GID_OUT_OF_SERVICE cpu_to_be16(65)
+#define OPA_TRAP_ADD_MULTICAST_GROUP cpu_to_be16(66)
+#define OPA_TRAL_DEL_MULTICAST_GROUP cpu_to_be16(67)
+#define OPA_TRAP_UNPATH cpu_to_be16(68)
+#define OPA_TRAP_REPATH cpu_to_be16(69)
+#define OPA_TRAP_PORT_CHANGE_STATE cpu_to_be16(128)
+#define OPA_TRAP_LINK_INTEGRITY cpu_to_be16(129)
+#define OPA_TRAP_EXCESSIVE_BUFFER_OVERRUN cpu_to_be16(130)
+#define OPA_TRAP_FLOW_WATCHDOG cpu_to_be16(131)
+#define OPA_TRAP_CHANGE_CAPABILITY cpu_to_be16(144)
+#define OPA_TRAP_CHANGE_SYSGUID cpu_to_be16(145)
+#define OPA_TRAP_BAD_M_KEY cpu_to_be16(256)
+#define OPA_TRAP_BAD_P_KEY cpu_to_be16(257)
+#define OPA_TRAP_BAD_Q_KEY cpu_to_be16(258)
+#define OPA_TRAP_SWITCH_BAD_PKEY cpu_to_be16(259)
+#define OPA_SMA_TRAP_DATA_LINK_WIDTH cpu_to_be16(2048)
+/*
+ * Generic trap/notice other local changes flags (trap 144).
+ */
+#define OPA_NOTICE_TRAP_LWDE_CHG 0x08 /* Link Width Downgrade Enable
+ * changed
+ */
+#define OPA_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
+#define OPA_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
+#define OPA_NOTICE_TRAP_NODE_DESC_CHG 0x01
+
+struct opa_mad_notice_attr {
+ u8 generic_type;
+ u8 prod_type_msb;
+ __be16 prod_type_lsb;
+ __be16 trap_num;
+ __be16 toggle_count;
+ __be32 issuer_lid;
+ __be32 reserved1;
+ union ib_gid issuer_gid;
+
+ union {
+ struct {
+ u8 details[64];
+ } raw_data;
+
+ struct {
+ union ib_gid gid;
+ } __packed ntc_64_65_66_67;
+
+ struct {
+ __be32 lid;
+ } __packed ntc_128;
+
+ struct {
+ __be32 lid; /* where violation happened */
+ u8 port_num; /* where violation happened */
+ } __packed ntc_129_130_131;
+
+ struct {
+ __be32 lid; /* LID where change occurred */
+ __be32 new_cap_mask; /* new capability mask */
+ __be16 reserved2;
+ __be16 cap_mask;
+ __be16 change_flags; /* low 4 bits only */
+ } __packed ntc_144;
+
+ struct {
+ __be64 new_sys_guid;
+ __be32 lid; /* lid where sys guid changed */
+ } __packed ntc_145;
+
+ struct {
+ __be32 lid;
+ __be32 dr_slid;
+ u8 method;
+ u8 dr_trunc_hop;
+ __be16 attr_id;
+ __be32 attr_mod;
+ __be64 mkey;
+ u8 dr_rtn_path[30];
+ } __packed ntc_256;
+
+ struct {
+ __be32 lid1;
+ __be32 lid2;
+ __be32 key;
+ u8 sl; /* SL: high 5 bits */
+ u8 reserved3[3];
+ union ib_gid gid1;
+ union ib_gid gid2;
+ __be32 qp1; /* high 8 bits reserved */
+ __be32 qp2; /* high 8 bits reserved */
+ } __packed ntc_257_258;
+
+ struct {
+ __be16 flags; /* low 8 bits reserved */
+ __be16 pkey;
+ __be32 lid1;
+ __be32 lid2;
+ u8 sl; /* SL: high 5 bits */
+ u8 reserved4[3];
+ union ib_gid gid1;
+ union ib_gid gid2;
+ __be32 qp1; /* high 8 bits reserved */
+ __be32 qp2; /* high 8 bits reserved */
+ } __packed ntc_259;
+
+ struct {
+ __be32 lid;
+ } __packed ntc_2048;
+
+ };
+ u8 class_data[0];
+};
#define IB_VLARB_LOWPRI_0_31 1
#define IB_VLARB_LOWPRI_32_63 2
diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c
index 568f185a022d..a3f8b884fdd6 100644
--- a/drivers/staging/rdma/hfi1/mr.c
+++ b/drivers/staging/rdma/hfi1/mr.c
@@ -167,10 +167,7 @@ static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd)
rval = init_mregion(&mr->mr, pd, count);
if (rval)
goto bail;
- /*
- * ib_reg_phys_mr() will initialize mr->ibmr except for
- * lkey and rkey.
- */
+
rval = hfi1_alloc_lkey(&mr->mr, 0);
if (rval)
goto bail_mregion;
@@ -188,52 +185,6 @@ bail:
}
/**
- * hfi1_reg_phys_mr - register a physical memory region
- * @pd: protection domain for this memory region
- * @buffer_list: pointer to the list of physical buffers to register
- * @num_phys_buf: the number of physical buffers to register
- * @iova_start: the starting address passed over IB which maps to this MR
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start)
-{
- struct hfi1_mr *mr;
- int n, m, i;
- struct ib_mr *ret;
-
- mr = alloc_mr(num_phys_buf, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- goto bail;
- }
-
- mr->mr.user_base = *iova_start;
- mr->mr.iova = *iova_start;
- mr->mr.access_flags = acc;
-
- m = 0;
- n = 0;
- for (i = 0; i < num_phys_buf; i++) {
- mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
- mr->mr.map[m]->segs[n].length = buffer_list[i].size;
- mr->mr.length += buffer_list[i].size;
- n++;
- if (n == HFI1_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
* hfi1_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c
index a956044459a2..8317b07d722a 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/staging/rdma/hfi1/pcie.c
@@ -426,14 +426,6 @@ void request_msix(struct hfi1_devdata *dd, u32 *nent,
tune_pcie_caps(dd);
}
-/*
- * Disable MSI-X.
- */
-void hfi1_nomsix(struct hfi1_devdata *dd)
-{
- pci_disable_msix(dd->pcidev);
-}
-
void hfi1_enable_intx(struct pci_dev *pdev)
{
/* first, turn on INTx */
@@ -475,8 +467,18 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
- u16 rc_mrrs, ep_mrrs, max_mrrs;
+ u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
+ /*
+ * Turn on extended tags in DevCtl in case the BIOS has turned it off
+ * to improve WFR SDMA bandwidth
+ */
+ pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
+ dd_dev_info(dd, "Enabling PCIe extended tags\n");
+ ectl |= PCI_EXP_DEVCTL_EXT_TAG;
+ pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ }
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (!pci_is_root_bus(parent->bus)) {
@@ -915,7 +917,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
/*
* A0 needs an additional SBR
*/
- if (is_a0(dd))
+ if (is_ax(dd))
nsbr++;
/*
@@ -947,17 +949,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
}
retry:
-
- if (therm) {
- /*
- * toggle SPICO_ENABLE to get back to the state
- * just after the firmware load
- */
- sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
- WRITE_SBUS_RECEIVER, 0x00000040);
- sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
- WRITE_SBUS_RECEIVER, 0x00000140);
- }
+ /* the SBus download will reset the spico for thermal */
/* step 3: download SBus Master firmware */
/* step 4: download PCIe Gen3 SerDes firmware */
@@ -1201,7 +1193,7 @@ retry:
write_csr(dd, CCE_DC_CTRL, 0);
/* Set the LED off */
- if (is_a0(dd))
+ if (is_ax(dd))
setextled(dd, 0);
/* check for any per-lane errors */
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c
index e5c32db4bc67..b51a4416312b 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/staging/rdma/hfi1/pio.c
@@ -660,6 +660,24 @@ void set_pio_integrity(struct send_context *sc)
write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
}
+static u32 get_buffers_allocated(struct send_context *sc)
+{
+ int cpu;
+ u32 ret = 0;
+
+ for_each_possible_cpu(cpu)
+ ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
+ return ret;
+}
+
+static void reset_buffers_allocated(struct send_context *sc)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
+}
+
/*
* Allocate a NUMA relative send context structure of the given type along
* with a HW context.
@@ -668,7 +686,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
uint hdrqentsize, int numa)
{
struct send_context_info *sci;
- struct send_context *sc;
+ struct send_context *sc = NULL;
dma_addr_t pa;
unsigned long flags;
u64 reg;
@@ -686,10 +704,20 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
if (!sc)
return NULL;
+ sc->buffers_allocated = alloc_percpu(u32);
+ if (!sc->buffers_allocated) {
+ kfree(sc);
+ dd_dev_err(dd,
+ "Cannot allocate buffers_allocated per cpu counters\n"
+ );
+ return NULL;
+ }
+
spin_lock_irqsave(&dd->sc_lock, flags);
ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
if (ret) {
spin_unlock_irqrestore(&dd->sc_lock, flags);
+ free_percpu(sc->buffers_allocated);
kfree(sc);
return NULL;
}
@@ -705,7 +733,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
spin_lock_init(&sc->credit_ctrl_lock);
INIT_LIST_HEAD(&sc->piowait);
INIT_WORK(&sc->halt_work, sc_halted);
- atomic_set(&sc->buffers_allocated, 0);
init_waitqueue_head(&sc->halt_wait);
/* grouping is always single context for now */
@@ -815,15 +842,16 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
}
}
- dd_dev_info(dd,
- "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
- sw_index,
- hw_context,
- sc_type_name(type),
- sc->group,
- sc->credits,
- sc->credit_ctrl,
- thresh);
+ hfi1_cdbg(PIO,
+ "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
+ sw_index,
+ hw_context,
+ sc_type_name(type),
+ sc->group,
+ sc->credits,
+ sc->credit_ctrl,
+ thresh);
+
return sc;
}
@@ -865,6 +893,7 @@ void sc_free(struct send_context *sc)
spin_unlock_irqrestore(&dd->sc_lock, flags);
kfree(sc->sr);
+ free_percpu(sc->buffers_allocated);
kfree(sc);
}
@@ -1028,7 +1057,7 @@ int sc_restart(struct send_context *sc)
/* kernel context */
loop = 0;
while (1) {
- count = atomic_read(&sc->buffers_allocated);
+ count = get_buffers_allocated(sc);
if (count == 0)
break;
if (loop > 100) {
@@ -1196,7 +1225,8 @@ int sc_enable(struct send_context *sc)
sc->sr_head = 0;
sc->sr_tail = 0;
sc->flags = 0;
- atomic_set(&sc->buffers_allocated, 0);
+ /* the alloc lock insures no fast path allocation */
+ reset_buffers_allocated(sc);
/*
* Clear all per-context errors. Some of these will be set when
@@ -1372,7 +1402,8 @@ retry:
/* there is enough room */
- atomic_inc(&sc->buffers_allocated);
+ preempt_disable();
+ this_cpu_inc(*sc->buffers_allocated);
/* read this once */
head = sc->sr_head;
@@ -1564,6 +1595,7 @@ void sc_release_update(struct send_context *sc)
u64 hw_free;
u32 head, tail;
unsigned long old_free;
+ unsigned long free;
unsigned long extra;
unsigned long flags;
int code;
@@ -1578,7 +1610,7 @@ void sc_release_update(struct send_context *sc)
extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
- (old_free & CR_COUNTER_MASK))
& CR_COUNTER_MASK;
- sc->free = old_free + extra;
+ free = old_free + extra;
trace_hfi1_piofree(sc, extra);
/* call sent buffer callbacks */
@@ -1588,7 +1620,7 @@ void sc_release_update(struct send_context *sc)
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
- if (sent_before(sc->free, pbuf->sent_at)) {
+ if (sent_before(free, pbuf->sent_at)) {
/* not sent yet */
break;
}
@@ -1602,8 +1634,10 @@ void sc_release_update(struct send_context *sc)
if (tail >= sc->sr_size)
tail = 0;
}
- /* update tail, in case we moved it */
sc->sr_tail = tail;
+ /* make sure tail is updated before free */
+ smp_wmb();
+ sc->free = free;
spin_unlock_irqrestore(&sc->release_lock, flags);
sc_piobufavail(sc);
}
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h
index 0bb885ca3cfb..53d3e0a79375 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/staging/rdma/hfi1/pio.h
@@ -130,7 +130,7 @@ struct send_context {
spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
u64 credit_ctrl; /* cache for credit control */
u32 credit_intr_count; /* count of credit intr users */
- atomic_t buffers_allocated; /* count of buffers allocated */
+ u32 __percpu *buffers_allocated;/* count of buffers allocated */
wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
};
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c
index 8972bbc02038..ebb0bafc68cb 100644
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ b/drivers/staging/rdma/hfi1/pio_copy.c
@@ -160,7 +160,8 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
}
/* finished with this buffer */
- atomic_dec(&pbuf->sc->buffers_allocated);
+ this_cpu_dec(*pbuf->sc->buffers_allocated);
+ preempt_enable();
}
/* USE_SHIFTS is faster in user-space tests on a Xeon X5570 @ 2.93GHz */
@@ -854,5 +855,6 @@ void seg_pio_copy_end(struct pio_buf *pbuf)
}
/* finished with this buffer */
- atomic_dec(&pbuf->sc->buffers_allocated);
+ this_cpu_dec(*pbuf->sc->buffers_allocated);
+ preempt_enable();
}
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index f8c36166962f..ce036810d576 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -378,6 +378,7 @@ static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
+ qp->r_adefered = 0;
qp->r_aflags = 0;
qp->r_flags = 0;
qp->s_head = 0;
@@ -617,7 +618,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mig = 0;
int ret;
u32 pmtu = 0; /* for gcc warning only */
- struct hfi1_devdata *dd;
+ struct hfi1_devdata *dd = dd_from_dev(dev);
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_lock);
@@ -631,23 +632,35 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto inval;
if (attr_mask & IB_QP_AV) {
+ u8 sc;
+
if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
goto inval;
if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr))
goto inval;
+ sc = ah_to_sc(ibqp->device, &attr->ah_attr);
+ if (!qp_to_sdma_engine(qp, sc) &&
+ dd->flags & HFI1_HAS_SEND_DMA)
+ goto inval;
}
if (attr_mask & IB_QP_ALT_PATH) {
+ u8 sc;
+
if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
goto inval;
if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
goto inval;
- if (attr->alt_pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+ if (attr->alt_pkey_index >= hfi1_get_npkeys(dd))
+ goto inval;
+ sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
+ if (!qp_to_sdma_engine(qp, sc) &&
+ dd->flags & HFI1_HAS_SEND_DMA)
goto inval;
}
if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= hfi1_get_npkeys(dd_from_dev(dev)))
+ if (attr->pkey_index >= hfi1_get_npkeys(dd))
goto inval;
if (attr_mask & IB_QP_MIN_RNR_TIMER)
@@ -792,6 +805,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->remote_ah_attr = attr->ah_attr;
qp->s_srate = attr->ah_attr.static_rate;
qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
}
if (attr_mask & IB_QP_ALT_PATH) {
@@ -806,6 +821,8 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
qp->s_flags |= HFI1_S_AHG_CLEAR;
+ qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
}
}
@@ -1528,9 +1545,6 @@ struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
if (!(dd->flags & HFI1_HAS_SEND_DMA))
return NULL;
switch (qp->ibqp.qp_type) {
- case IB_QPT_UC:
- case IB_QPT_RC:
- break;
case IB_QPT_SMI:
return NULL;
default:
@@ -1685,3 +1699,25 @@ void qp_comm_est(struct hfi1_qp *qp)
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
+
+/*
+ * Switch to alternate path.
+ * The QP s_lock should be held and interrupts disabled.
+ */
+void hfi1_migrate_qp(struct hfi1_qp *qp)
+{
+ struct ib_event ev;
+
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ qp->s_flags |= HFI1_S_AHG_CLEAR;
+ qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
+ qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+}
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h
index b9c1575990aa..62a94c5d7dca 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/staging/rdma/hfi1/qp.h
@@ -128,7 +128,6 @@ static inline void clear_ahg(struct hfi1_qp *qp)
if (qp->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1;
- qp->s_sde = NULL;
}
/**
@@ -212,7 +211,7 @@ int hfi1_qp_init(struct hfi1_ibdev *dev);
void hfi1_qp_exit(struct hfi1_ibdev *dev);
/**
- * hfi1_qp_waitup - wake up on the indicated event
+ * hfi1_qp_wakeup - wake up on the indicated event
* @qp: the QP
* @flag: flag the qp on which the qp is stalled
*/
@@ -223,19 +222,19 @@ struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
struct qp_iter;
/**
- * qp_iter_init - wake up on the indicated event
+ * qp_iter_init - initialize the iterator for the qp hash list
* @dev: the hfi1_ibdev
*/
struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev);
/**
- * qp_iter_next - wakeup on the indicated event
+ * qp_iter_next - Find the next qp in the hash list
* @iter: the iterator for the qp hash list
*/
int qp_iter_next(struct qp_iter *iter);
/**
- * qp_iter_next - wake up on the indicated event
+ * qp_iter_print - print the qp information to seq_file
* @s: the seq_file to emit the qp information on
* @iter: the iterator for the qp hash list
*/
@@ -247,4 +246,41 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
*/
void qp_comm_est(struct hfi1_qp *qp);
+/**
+ * _hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress w/o regard to the s_flags.
+ *
+ * It is only used in the post send, which doesn't hold
+ * the s_lock.
+ */
+static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
+{
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+
+ iowait_schedule(&qp->s_iowait, ppd->hfi1_wq,
+ qp->s_sde ?
+ qp->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->assigned_node_id)));
+}
+
+/**
+ * hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress and caller should hold
+ * the s_lock.
+ */
+static inline void hfi1_schedule_send(struct hfi1_qp *qp)
+{
+ if (hfi1_send_ok(qp))
+ _hfi1_schedule_send(qp);
+}
+
+void hfi1_migrate_qp(struct hfi1_qp *qp);
+
#endif /* _QP_H */
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c
index ffdb1d787a80..6326a915d7fd 100644
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ b/drivers/staging/rdma/hfi1/qsfp.c
@@ -475,7 +475,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
u8 *cache = &ppd->qsfp_info.cache[0];
u8 bin_buff[QSFP_DUMP_CHUNK];
char lenstr[6];
- int sofar, ret;
+ int sofar;
int bidx = 0;
u8 *atten = &cache[QSFP_ATTEN_OFFS];
u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
@@ -536,6 +536,5 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
bidx += QSFP_DUMP_CHUNK;
}
}
- ret = sofar;
- return ret;
+ return sofar;
}
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
index 5fc93bb312f1..6f4a155f7931 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/staging/rdma/hfi1/rc.c
@@ -1608,6 +1608,27 @@ bail:
return;
}
+static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
+ struct hfi1_qp *qp)
+{
+ if (list_empty(&qp->rspwait)) {
+ qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
+ atomic_inc(&qp->refcount);
+ list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
+ }
+}
+
+static inline void rc_cancel_ack(struct hfi1_qp *qp)
+{
+ qp->r_adefered = 0;
+ if (list_empty(&qp->rspwait))
+ return;
+ list_del_init(&qp->rspwait);
+ qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+}
+
/**
* rc_rcv_error - process an incoming duplicate or error RC packet
* @ohdr: the other headers for this packet
@@ -1650,11 +1671,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* in the receive queue have been processed.
* Otherwise, we end up propagating congestion.
*/
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
+ rc_defered_ack(rcd, qp);
}
goto done;
}
@@ -1978,7 +1995,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
}
psn = be32_to_cpu(ohdr->bth[2]);
- opcode = bth0 >> 24;
+ opcode = (bth0 >> 24) & 0xff;
/*
* Process responses (ACKs) before anything else. Note that the
@@ -2329,19 +2346,29 @@ send_last:
qp->r_ack_psn = psn;
qp->r_nak_state = 0;
/* Send an ACK if requested or required. */
- if (psn & (1 << 31))
- goto send_ack;
+ if (psn & IB_BTH_REQ_ACK) {
+ if (packet->numpkt == 0) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+ if (qp->r_adefered >= HFI1_PSN_CREDIT) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+ if (unlikely(is_fecn)) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+ qp->r_adefered++;
+ rc_defered_ack(rcd, qp);
+ }
return;
rnr_nak:
qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
+ rc_defered_ack(rcd, qp);
return;
nack_op_err:
@@ -2349,11 +2376,7 @@ nack_op_err:
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
+ rc_defered_ack(rcd, qp);
return;
nack_inv_unlck:
@@ -2363,11 +2386,7 @@ nack_inv:
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
+ rc_defered_ack(rcd, qp);
return;
nack_acc_unlck:
@@ -2391,19 +2410,19 @@ void hfi1_rc_hdrerr(
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
int diff;
u32 opcode;
- u32 psn;
+ u32 psn, bth0;
/* Check for GRH */
ohdr = &hdr->u.oth;
if (has_grh)
ohdr = &hdr->u.l.oth;
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
return;
psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
+ opcode = (bth0 >> 24) & 0xff;
/* Only deal with RDMA Writes for now */
if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
@@ -2421,13 +2440,7 @@ void hfi1_rc_hdrerr(
* Otherwise, we end up
* propagating congestion.
*/
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(
- &qp->rspwait,
- &rcd->qp_wait_list);
- }
+ rc_defered_ack(rcd, qp);
} /* Out of sequence NAK */
} /* QP Request NAKs */
}
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
index 49bc9fd7a51a..4a91975b68d7 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/staging/rdma/hfi1/ruc.c
@@ -241,26 +241,6 @@ bail:
return ret;
}
-/*
- * Switch to alternate path.
- * The QP s_lock should be held and interrupts disabled.
- */
-void hfi1_migrate_qp(struct hfi1_qp *qp)
-{
- struct ib_event ev;
-
- qp->s_mig_state = IB_MIG_MIGRATED;
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-}
-
static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
{
if (!index) {
@@ -308,11 +288,12 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
sc5, be16_to_cpu(hdr->lrh[3])))) {
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
(u16)bth0,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ be16_to_cpu(hdr->lrh[3]),
+ be16_to_cpu(hdr->lrh[1]));
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
@@ -340,11 +321,12 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
sc5, be16_to_cpu(hdr->lrh[3])))) {
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
(u16)bth0,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ be16_to_cpu(hdr->lrh[3]),
+ be16_to_cpu(hdr->lrh[1]));
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
@@ -714,11 +696,8 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
clear_ahg(qp);
if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
/* first middle that needs copy */
- if (qp->s_ahgidx < 0) {
- if (!qp->s_sde)
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde);
- }
if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
@@ -761,7 +740,6 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
u16 lrh0;
u32 nwords;
u32 extra_bytes;
- u8 sc5;
u32 bth1;
/* Construct the header. */
@@ -775,9 +753,7 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
lrh0 = HFI1_LRH_GRH;
middle = 0;
}
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
- qp->s_sc = sc5;
+ lrh0 |= (qp->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
/*
* reset s_hdr/AHG fields
*
@@ -835,16 +811,20 @@ void hfi1_do_send(struct work_struct *work)
{
struct iowait *wait = container_of(work, struct iowait, iowork);
struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_pkt_state ps;
int (*make_req)(struct hfi1_qp *qp);
unsigned long flags;
unsigned long timeout;
+ ps.dev = to_idev(qp->ibqp.device);
+ ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ps.ppd = ppd_from_ibp(ps.ibp);
+
if ((qp->ibqp.qp_type == IB_QPT_RC ||
qp->ibqp.qp_type == IB_QPT_UC) &&
!loopback &&
- (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
+ (qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc) - 1)) ==
+ ps.ppd->lid) {
ruc_loopback(qp);
return;
}
@@ -876,8 +856,7 @@ void hfi1_do_send(struct work_struct *work)
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
- if (hfi1_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
- qp->s_cur_sge, qp->s_cur_size))
+ if (hfi1_verbs_send(qp, &ps))
break;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
@@ -886,7 +865,7 @@ void hfi1_do_send(struct work_struct *work)
/* allow other tasks to run */
if (unlikely(time_after(jiffies, timeout))) {
cond_resched();
- ppd->dd->verbs_dev.n_send_schedule++;
+ ps.ppd->dd->verbs_dev.n_send_schedule++;
timeout = jiffies + SEND_RESCHED_TIMEOUT;
}
} while (make_req(qp));
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
index 2a1da2189900..9a15f1f32b45 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -236,7 +236,6 @@ static void sdma_hw_clean_up_task(unsigned long);
static void sdma_put(struct sdma_state *);
static void sdma_set_state(struct sdma_engine *, enum sdma_states);
static void sdma_start_hw_clean_up(struct sdma_engine *);
-static void sdma_start_sw_clean_up(struct sdma_engine *);
static void sdma_sw_clean_up_task(unsigned long);
static void sdma_sendctrl(struct sdma_engine *, unsigned);
static void init_sdma_regs(struct sdma_engine *, u32, uint);
@@ -470,12 +469,6 @@ static void sdma_err_halt_wait(struct work_struct *work)
sdma_process_event(sde, sdma_event_e15_hw_halt_done);
}
-static void sdma_start_err_halt_wait(struct sdma_engine *sde)
-{
- schedule_work(&sde->err_halt_worker);
-}
-
-
static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
{
if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
@@ -682,11 +675,6 @@ static void sdma_start_hw_clean_up(struct sdma_engine *sde)
tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
}
-static void sdma_start_sw_clean_up(struct sdma_engine *sde)
-{
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
-}
-
static void sdma_set_state(struct sdma_engine *sde,
enum sdma_states next_state)
{
@@ -777,19 +765,27 @@ struct sdma_engine *sdma_select_engine_vl(
struct sdma_map_elem *e;
struct sdma_engine *rval;
- if (WARN_ON(vl > 8))
- return NULL;
+ /* NOTE This should only happen if SC->VL changed after the initial
+ * checks on the QP/AH
+ * Default will return engine 0 below
+ */
+ if (vl >= num_vls) {
+ rval = NULL;
+ goto done;
+ }
rcu_read_lock();
m = rcu_dereference(dd->sdma_map);
if (unlikely(!m)) {
rcu_read_unlock();
- return NULL;
+ return &dd->per_sdma[0];
}
e = m->map[vl & m->mask];
rval = e->sde[selector & e->mask];
rcu_read_unlock();
+done:
+ rval = !rval ? &dd->per_sdma[0] : rval;
trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
return rval;
}
@@ -1874,7 +1870,7 @@ static void dump_sdma_state(struct sdma_engine *sde)
}
#define SDE_FMT \
- "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
+ "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
/**
* sdma_seqfile_dump_sde() - debugfs dump of sde
* @s: seq file
@@ -1894,6 +1890,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
head = sde->descq_head & sde->sdma_mask;
tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
+ sde->cpu,
sdma_state_name(sde->state.current_state),
(unsigned long long)read_sde_csr(sde, SD(CTRL)),
(unsigned long long)read_sde_csr(sde, SD(STATUS)),
@@ -2308,7 +2305,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
case sdma_event_e50_hw_cleaned:
break;
case sdma_event_e60_hw_halted:
- sdma_start_err_halt_wait(sde);
+ schedule_work(&sde->err_halt_worker);
break;
case sdma_event_e70_go_idle:
ss->go_s99_running = 0;
@@ -2389,7 +2386,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
break;
case sdma_event_e60_hw_halted:
sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
- sdma_start_err_halt_wait(sde);
+ schedule_work(&sde->err_halt_worker);
break;
case sdma_event_e70_go_idle:
break;
@@ -2452,7 +2449,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
@@ -2494,13 +2491,13 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
case sdma_event_e15_hw_halt_done:
sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e25_hw_clean_up_done:
break;
@@ -2512,7 +2509,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
case sdma_event_e50_hw_cleaned:
break;
case sdma_event_e60_hw_halted:
- sdma_start_err_halt_wait(sde);
+ schedule_work(&sde->err_halt_worker);
break;
case sdma_event_e70_go_idle:
ss->go_s99_running = 0;
@@ -2535,13 +2532,13 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
case sdma_event_e15_hw_halt_done:
sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e25_hw_clean_up_done:
break;
@@ -2553,7 +2550,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
case sdma_event_e50_hw_cleaned:
break;
case sdma_event_e60_hw_halted:
- sdma_start_err_halt_wait(sde);
+ schedule_work(&sde->err_halt_worker);
break;
case sdma_event_e70_go_idle:
ss->go_s99_running = 0;
@@ -2575,7 +2572,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
@@ -2599,7 +2596,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
break;
case sdma_event_e81_hw_frozen:
sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e82_hw_unfreeze:
break;
@@ -2614,7 +2611,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
@@ -2658,7 +2655,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
switch (event) {
case sdma_event_e00_go_hw_down:
sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(sde);
+ tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
break;
case sdma_event_e10_go_hw_start:
break;
@@ -2681,7 +2678,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
* progress check
*/
sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
- sdma_start_err_halt_wait(sde);
+ schedule_work(&sde->err_halt_worker);
break;
case sdma_event_e70_go_idle:
sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
@@ -2734,22 +2731,21 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
GFP_ATOMIC);
if (!tx->coalesce_buf)
- return -ENOMEM;
-
+ goto enomem;
tx->coalesce_idx = 0;
}
return 0;
}
if (unlikely(tx->num_desc == MAX_DESC))
- return -ENOMEM;
+ goto enomem;
tx->descp = kmalloc_array(
MAX_DESC,
sizeof(struct sdma_desc),
GFP_ATOMIC);
if (!tx->descp)
- return -ENOMEM;
+ goto enomem;
/* reserve last descriptor for coalescing */
tx->desc_limit = MAX_DESC - 1;
@@ -2757,6 +2753,9 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
for (i = 0; i < tx->num_desc; i++)
tx->descp[i] = tx->descs[i];
return 0;
+enomem:
+ sdma_txclean(dd, tx);
+ return -ENOMEM;
}
/*
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
index cc22d2ee2054..da89e6458162 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -410,8 +410,6 @@ struct sdma_engine {
u64 idle_mask;
u64 progress_mask;
/* private: */
- struct workqueue_struct *wq;
- /* private: */
volatile __le64 *head_dma; /* DMA'ed by chip */
/* private: */
dma_addr_t head_phys;
@@ -426,6 +424,8 @@ struct sdma_engine {
u32 sdma_mask;
/* private */
struct sdma_state state;
+ /* private */
+ int cpu;
/* private: */
u8 sdma_shift;
/* private: */
@@ -774,10 +774,13 @@ static inline int _sdma_txadd_daddr(
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
- if (tx->packet_len & (sizeof(u32) - 1))
+ if (tx->packet_len & (sizeof(u32) - 1)) {
rval = _pad_sdma_tx_descs(dd, tx);
- else
+ if (rval)
+ return rval;
+ } else {
_sdma_close_tx(dd, tx);
+ }
}
tx->num_desc++;
return rval;
@@ -990,7 +993,9 @@ static inline void sdma_iowait_schedule(
struct sdma_engine *sde,
struct iowait *wait)
{
- iowait_schedule(wait, sde->wq);
+ struct hfi1_pportdata *ppd = sde->dd->pport;
+
+ iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
}
/* for use by interrupt handling */
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c
index f55b75194847..10122e84cb2f 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/staging/rdma/hfi1/trace.c
@@ -67,7 +67,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
#define IMM_PRN "imm %d"
#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
-#define AETH_PRN "aeth syn 0x%.2x msn 0x%.8x"
+#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
#define ATOMICACKETH_PRN "origdata %lld"
#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
@@ -79,6 +79,19 @@ static u64 ib_u64_get(__be32 *p)
return ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]);
}
+static const char *parse_syndrome(u8 syndrome)
+{
+ switch (syndrome >> 5) {
+ case 0:
+ return "ACK";
+ case 1:
+ return "RNRNAK";
+ case 3:
+ return "NAK";
+ }
+ return "";
+}
+
const char *parse_everbs_hdrs(
struct trace_seq *p,
u8 opcode,
@@ -124,16 +137,18 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_READ_RESPONSE_LAST):
case OP(RC, RDMA_READ_RESPONSE_ONLY):
case OP(RC, ACKNOWLEDGE):
- trace_seq_printf(p, AETH_PRN,
- be32_to_cpu(eh->aeth) >> 24,
- be32_to_cpu(eh->aeth) & HFI1_MSN_MASK);
+ trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
+ parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
+ be32_to_cpu(eh->aeth) & HFI1_MSN_MASK);
break;
/* aeth + atomicacketh */
case OP(RC, ATOMIC_ACKNOWLEDGE):
trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
- (be32_to_cpu(eh->at.aeth) >> 24) & 0xff,
- be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK,
- (unsigned long long)ib_u64_get(eh->at.atomic_ack_eth));
+ be32_to_cpu(eh->at.aeth) >> 24,
+ parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
+ be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK,
+ (unsigned long long)
+ ib_u64_get(eh->at.atomic_ack_eth));
break;
/* atomiceth */
case OP(RC, COMPARE_SWAP):
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h
index 57430295c404..86c12ebfd4f0 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/staging/rdma/hfi1/trace.h
@@ -417,7 +417,8 @@ __print_symbolic(opcode, \
ib_opcode_name(UC_RDMA_WRITE_ONLY), \
ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(UD_SEND_ONLY), \
- ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE))
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(CNP))
#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
index 6095039c4485..4f2a7889a852 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/staging/rdma/hfi1/uc.c
@@ -268,7 +268,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct hfi1_qp *qp = packet->qp;
struct hfi1_other_headers *ohdr = packet->ohdr;
- u32 opcode;
+ u32 bth0, opcode;
u32 hdrsize = packet->hlen;
u32 psn;
u32 pad;
@@ -278,10 +278,9 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
int has_grh = rcv_flags & HFI1_HAS_GRH;
int ret;
u32 bth1;
- struct ib_grh *grh = NULL;
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
+ bth0 = be32_to_cpu(ohdr->bth[0]);
+ if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
return;
bth1 = be32_to_cpu(ohdr->bth[1]);
@@ -303,6 +302,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
}
if (bth1 & HFI1_FECN_SMASK) {
+ struct ib_grh *grh = NULL;
u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
u16 slid = be16_to_cpu(hdr->lrh[3]);
u16 dlid = be16_to_cpu(hdr->lrh[1]);
@@ -310,13 +310,16 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
u8 sc5;
sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ if (has_grh)
+ grh = &hdr->u.l.grh;
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
+ return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5,
+ grh);
}
}
psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
+ opcode = (bth0 >> 24) & 0xff;
/* Compare the PSN verses the expected PSN. */
if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index 5a9c784bec04..bd1b402c1e14 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -111,11 +111,10 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
((1 << ppd->lmc) - 1));
if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
qp->s_pkey_index, slid))) {
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(slid),
- cpu_to_be16(ah_attr->dlid));
+ slid, ah_attr->dlid);
goto drop;
}
}
@@ -135,11 +134,11 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
lid = ppd->lid | (ah_attr->src_path_bits &
((1 << ppd->lmc) - 1));
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
ah_attr->sl,
sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(ah_attr->dlid));
+ lid,
+ ah_attr->dlid);
goto drop;
}
}
@@ -383,6 +382,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
lrh0 |= (sc5 & 0xf) << 12;
qp->s_sc = sc5;
}
+ qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
qp->s_hdr->ibh.lrh[2] =
@@ -736,12 +736,13 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* for invalid pkeys is optional according to
* IB spec (release 1.3, section 10.9.4)
*/
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
pkey,
(be16_to_cpu(hdr->lrh[0]) >> 4) &
0xF,
src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ be16_to_cpu(hdr->lrh[3]),
+ be16_to_cpu(hdr->lrh[1]));
return;
}
} else {
@@ -752,10 +753,11 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
if (unlikely(qkey != qp->qkey)) {
- hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ be16_to_cpu(hdr->lrh[3]),
+ be16_to_cpu(hdr->lrh[1]));
return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/staging/rdma/hfi1/user_exp_rcv.h
new file mode 100644
index 000000000000..4f4876e1d353
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.h
@@ -0,0 +1,74 @@
+#ifndef _HFI1_USER_EXP_RCV_H
+#define _HFI1_USER_EXP_RCV_H
+/*
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define EXP_TID_TIDLEN_MASK 0x7FFULL
+#define EXP_TID_TIDLEN_SHIFT 0
+#define EXP_TID_TIDCTRL_MASK 0x3ULL
+#define EXP_TID_TIDCTRL_SHIFT 20
+#define EXP_TID_TIDIDX_MASK 0x3FFULL
+#define EXP_TID_TIDIDX_SHIFT 22
+#define EXP_TID_GET(tid, field) \
+ (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+
+#define EXP_TID_SET(field, value) \
+ (((value) & EXP_TID_TID##field##_MASK) << \
+ EXP_TID_TID##field##_SHIFT)
+#define EXP_TID_CLEAR(tid, field) ({ \
+ (tid) &= ~(EXP_TID_TID##field##_MASK << \
+ EXP_TID_TID##field##_SHIFT); \
+ })
+#define EXP_TID_RESET(tid, field, value) do { \
+ EXP_TID_CLEAR(tid, field); \
+ (tid) |= EXP_TID_SET(field, (value)); \
+ } while (0)
+
+#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c
index 9071afbd7bf4..692de658f0dc 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/staging/rdma/hfi1/user_pages.c
@@ -49,59 +49,11 @@
*/
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/device.h>
#include "hfi.h"
-static void __hfi1_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- size_t i;
-
- for (i = 0; i < num_pages; i++) {
- if (dirty)
- set_page_dirty_lock(p[i]);
- put_page(p[i]);
- }
-}
-
-/*
- * Call with current->mm->mmap_sem held.
- */
-static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
- ret = -ENOMEM;
- goto bail;
- }
-
- for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(current, current->mm,
- start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
- p + got, NULL);
- if (ret < 0)
- goto bail_release;
- }
-
- current->mm->pinned_vm += num_pages;
-
- ret = 0;
- goto bail;
-
-bail_release:
- __hfi1_release_user_pages(p, got, 0);
-bail:
- return ret;
-}
-
/**
* hfi1_map_page - a safety wrapper around pci_map_page()
*
@@ -116,41 +68,44 @@ dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page,
return phys;
}
-/**
- * hfi1_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages. For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
+int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
+ struct page **pages)
{
+ unsigned long pinned, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool can_lock = capable(CAP_IPC_LOCK);
int ret;
- down_write(&current->mm->mmap_sem);
+ down_read(&current->mm->mmap_sem);
+ pinned = current->mm->pinned_vm;
+ up_read(&current->mm->mmap_sem);
- ret = __hfi1_get_user_pages(start_page, num_pages, p);
+ if (pinned + npages > lock_limit && !can_lock)
+ return -ENOMEM;
+ ret = get_user_pages_fast(vaddr, npages, writable, pages);
+ if (ret < 0)
+ return ret;
+
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm += ret;
up_write(&current->mm->mmap_sem);
return ret;
}
-void hfi1_release_user_pages(struct page **p, size_t num_pages)
+void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
{
- if (current->mm) /* during close after signal, mm can be NULL */
- down_write(&current->mm->mmap_sem);
+ size_t i;
- __hfi1_release_user_pages(p, num_pages, 1);
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty_lock(p[i]);
+ put_page(p[i]);
+ }
- if (current->mm) {
- current->mm->pinned_vm -= num_pages;
+ if (current->mm) { /* during close after signal, mm can be NULL */
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm -= npages;
up_write(&current->mm->mmap_sem);
}
}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index 36c838dcf023..d3de771a0770 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -146,8 +146,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
/* Last packet in the request */
-#define TXREQ_FLAGS_REQ_LAST_PKT (1 << 0)
-#define TXREQ_FLAGS_IOVEC_LAST_PKT (1 << 0)
+#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
+#define TXREQ_FLAGS_IOVEC_LAST_PKT BIT(0)
#define SDMA_REQ_IN_USE 0
#define SDMA_REQ_FOR_THREAD 1
@@ -156,9 +156,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define SDMA_REQ_HAS_ERROR 4
#define SDMA_REQ_DONE_ERROR 5
-#define SDMA_PKT_Q_INACTIVE (1 << 0)
-#define SDMA_PKT_Q_ACTIVE (1 << 1)
-#define SDMA_PKT_Q_DEFERRED (1 << 2)
+#define SDMA_PKT_Q_INACTIVE BIT(0)
+#define SDMA_PKT_Q_ACTIVE BIT(1)
+#define SDMA_PKT_Q_DEFERRED BIT(2)
/*
* Maximum retry attempts to submit a TX request
@@ -214,12 +214,6 @@ struct user_sdma_request {
*/
u8 omfactor;
/*
- * pointer to the user's task_struct. We are going to
- * get a reference to it so we can process io vectors
- * at a later time.
- */
- struct task_struct *user_proc;
- /*
* pointer to the user's mm_struct. We are going to
* get a reference to it so it doesn't get freed
* since we might not be in process context when we
@@ -245,9 +239,13 @@ struct user_sdma_request {
u16 tididx;
u32 sent;
u64 seqnum;
- spinlock_t list_lock;
struct list_head txps;
+ spinlock_t txcmp_lock; /* protect txcmp list */
+ struct list_head txcmp;
unsigned long flags;
+ /* status of the last txreq completed */
+ int status;
+ struct work_struct worker;
};
/*
@@ -260,6 +258,7 @@ struct user_sdma_txreq {
/* Packet header for the txreq */
struct hfi1_pkt_header hdr;
struct sdma_txreq txreq;
+ struct list_head list;
struct user_sdma_request *req;
struct {
struct user_sdma_iovec *vec;
@@ -282,10 +281,12 @@ struct user_sdma_txreq {
static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
static int num_user_pages(const struct iovec *);
static void user_sdma_txreq_cb(struct sdma_txreq *, int, int);
+static void user_sdma_delayed_completion(struct work_struct *);
static void user_sdma_free_request(struct user_sdma_request *);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct user_sdma_iovec *);
+static void unpin_vector_pages(struct user_sdma_request *,
+ struct user_sdma_iovec *);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
@@ -352,6 +353,7 @@ static void sdma_kmem_cache_ctor(void *obj)
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
{
+ struct hfi1_filedata *fd;
int ret = 0;
unsigned memsize;
char buf[64];
@@ -365,6 +367,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto done;
}
+ fd = fp->private_data;
+
if (!hfi1_sdma_comp_ring_size) {
ret = -EINVAL;
goto done;
@@ -384,16 +388,17 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
- pq->subctxt = subctxt_fp(fp);
+ pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
+ init_waitqueue_head(&pq->wait);
iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
activate_packet_queue);
pq->reqidx = 0;
snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
- subctxt_fp(fp));
+ fd->subctxt);
pq->txreq_cache = kmem_cache_create(buf,
sizeof(struct user_sdma_txreq),
L1_CACHE_BYTES,
@@ -404,7 +409,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
uctxt->ctxt);
goto pq_txreq_nomem;
}
- user_sdma_pkt_fp(fp) = pq;
+ fd->pq = pq;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
goto cq_nomem;
@@ -416,7 +421,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto cq_comps_nomem;
cq->nentries = hfi1_sdma_comp_ring_size;
- user_sdma_comp_fp(fp) = cq;
+ fd->cq = cq;
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
list_add(&pq->list, &uctxt->sdma_queues);
@@ -431,7 +436,7 @@ pq_txreq_nomem:
kfree(pq->reqs);
pq_reqs_nomem:
kfree(pq);
- user_sdma_pkt_fp(fp) = NULL;
+ fd->pq = NULL;
pq_nomem:
ret = -ENOMEM;
done:
@@ -448,26 +453,16 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
uctxt->ctxt, fd->subctxt);
pq = fd->pq;
if (pq) {
- u16 i, j;
-
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
if (!list_empty(&pq->list))
list_del_init(&pq->list);
spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
iowait_sdma_drain(&pq->busy);
- if (pq->reqs) {
- for (i = 0, j = 0; i < atomic_read(&pq->n_reqs) &&
- j < pq->n_max_reqs; j++) {
- struct user_sdma_request *req = &pq->reqs[j];
-
- if (test_bit(SDMA_REQ_IN_USE, &req->flags)) {
- set_comp_state(req, ERROR, -ECOMM);
- user_sdma_free_request(req);
- i++;
- }
- }
- kfree(pq->reqs);
- }
+ /* Wait until all requests have been freed. */
+ wait_event_interruptible(
+ pq->wait,
+ (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ kfree(pq->reqs);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
fd->pq = NULL;
@@ -485,9 +480,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
int ret = 0, i = 0, sent;
- struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
- struct hfi1_user_sdma_pkt_q *pq = user_sdma_pkt_fp(fp);
- struct hfi1_user_sdma_comp_q *cq = user_sdma_comp_fp(fp);
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;
u8 pcount = initial_pkt_count;
@@ -499,40 +495,36 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
hfi1_cdbg(
SDMA,
"[%u:%u:%u] First vector not big enough for header %lu/%lu",
- dd->unit, uctxt->ctxt, subctxt_fp(fp),
+ dd->unit, uctxt->ctxt, fd->subctxt,
iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
if (ret) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
- dd->unit, uctxt->ctxt, subctxt_fp(fp), ret);
- ret = -EFAULT;
- goto done;
+ dd->unit, uctxt->ctxt, fd->subctxt, ret);
+ return -EFAULT;
}
- trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, subctxt_fp(fp),
+ trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
if (cq->comps[info.comp_idx].status == QUEUED) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
- dd->unit, uctxt->ctxt, subctxt_fp(fp),
+ dd->unit, uctxt->ctxt, fd->subctxt,
info.comp_idx);
- ret = -EBADSLT;
- goto done;
+ return -EBADSLT;
}
if (!info.fragsize) {
hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Request does not specify fragsize",
- dd->unit, uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
- ret = -EINVAL;
- goto done;
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
+ return -EINVAL;
}
/*
* We've done all the safety checks that we can up to this point,
* "allocate" the request entry.
*/
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
- uctxt->ctxt, subctxt_fp(fp), info.comp_idx);
+ uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx;
memset(req, 0, sizeof(*req));
/* Mark the request as IN_USE before we start filling it in. */
@@ -540,8 +532,12 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
req->data_iovs = req_iovcnt(info.ctrl) - 1;
req->pq = pq;
req->cq = cq;
+ req->status = -1;
INIT_LIST_HEAD(&req->txps);
- spin_lock_init(&req->list_lock);
+ INIT_LIST_HEAD(&req->txcmp);
+ INIT_WORK(&req->worker, user_sdma_delayed_completion);
+
+ spin_lock_init(&req->txcmp_lock);
memcpy(&req->info, &info, sizeof(info));
if (req_opcode(info.ctrl) == EXPECTED)
@@ -550,8 +546,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
MAX_VECTORS_PER_REQ);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
/* Copy the header from the user buffer */
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
@@ -659,7 +654,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
/* Have to select the engine */
req->sde = sdma_select_engine_vl(dd,
- (u32)(uctxt->ctxt + subctxt_fp(fp)),
+ (u32)(uctxt->ctxt + fd->subctxt),
vl);
if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM;
@@ -681,18 +676,16 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
sent = user_sdma_send_pkts(req, pcount);
if (unlikely(sent < 0)) {
if (sent != -EBUSY) {
- ret = sent;
- goto send_err;
+ req->status = sent;
+ set_comp_state(req, ERROR, req->status);
+ return sent;
} else
sent = 0;
}
atomic_inc(&pq->n_reqs);
+ xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
if (sent < req->info.npkts) {
- /* Take the references to the user's task and mm_struct */
- get_task_struct(current);
- req->user_proc = current;
-
/*
* This is a somewhat blocking send implementation.
* The driver will block the caller until all packets of the
@@ -702,8 +695,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
- if (ret != -EBUSY)
- goto send_err;
+ if (ret != -EBUSY) {
+ req->status = ret;
+ return ret;
+ }
wait_event_interruptible_timeout(
pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE),
@@ -713,14 +708,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
}
}
- ret = 0;
*count += idx;
- goto done;
-send_err:
- set_comp_state(req, ERROR, ret);
+ return 0;
free_req:
user_sdma_free_request(req);
-done:
return ret;
}
@@ -778,20 +769,24 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
struct hfi1_user_sdma_pkt_q *pq = NULL;
struct user_sdma_iovec *iovec = NULL;
- if (!req->pq) {
- ret = -EINVAL;
- goto done;
- }
+ if (!req->pq)
+ return -EINVAL;
pq = req->pq;
+ /* If tx completion has reported an error, we are done. */
+ if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
+ set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ return -EFAULT;
+ }
+
/*
* Check if we might have sent the entire request already
*/
if (unlikely(req->seqnum == req->info.npkts)) {
if (!list_empty(&req->txps))
goto dosend;
- goto done;
+ return ret;
}
if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
@@ -808,19 +803,18 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
- ret = -EFAULT;
- goto done;
+ return -EFAULT;
}
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
- if (!tx) {
- ret = -ENOMEM;
- goto done;
- }
+ if (!tx)
+ return -ENOMEM;
+
tx->flags = 0;
tx->req = req;
tx->busycount = 0;
tx->idx = -1;
+ INIT_LIST_HEAD(&tx->list);
memset(tx->iovecs, 0, sizeof(tx->iovecs));
if (req->seqnum == req->info.npkts - 1)
@@ -945,9 +939,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (ret) {
int i;
- dd_dev_err(pq->dd,
- "SDMA txreq add page failed %d\n",
- ret);
+ SDMA_DBG(req, "SDMA txreq add page failed %d\n",
+ ret);
/* Mark all assigned vectors as complete so they
* are unpinned in the callback. */
for (i = tx->idx; i >= 0; i--) {
@@ -1017,12 +1010,12 @@ dosend:
if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
sdma_ahg_free(req->sde, req->ahg_idx);
}
- goto done;
+ return ret;
+
free_txreq:
sdma_txclean(pq->dd, &tx->txreq);
free_tx:
kmem_cache_free(pq->txreq_cache, tx);
-done:
return ret;
}
@@ -1041,52 +1034,58 @@ static inline int num_user_pages(const struct iovec *iov)
static int pin_vector_pages(struct user_sdma_request *req,
struct user_sdma_iovec *iovec) {
- int ret = 0;
- unsigned pinned;
+ int pinned, npages;
- iovec->npages = num_user_pages(&iovec->iov);
- iovec->pages = kcalloc(iovec->npages, sizeof(*iovec->pages),
- GFP_KERNEL);
+ npages = num_user_pages(&iovec->iov);
+ iovec->pages = kcalloc(npages, sizeof(*iovec->pages), GFP_KERNEL);
if (!iovec->pages) {
SDMA_DBG(req, "Failed page array alloc");
- ret = -ENOMEM;
- goto done;
+ return -ENOMEM;
}
- /* If called by the kernel thread, use the user's mm */
- if (current->flags & PF_KTHREAD)
- use_mm(req->user_proc->mm);
- pinned = get_user_pages_fast(
- (unsigned long)iovec->iov.iov_base,
- iovec->npages, 0, iovec->pages);
- /* If called by the kernel thread, unuse the user's mm */
- if (current->flags & PF_KTHREAD)
- unuse_mm(req->user_proc->mm);
- if (pinned != iovec->npages) {
- SDMA_DBG(req, "Failed to pin pages (%u/%u)", pinned,
- iovec->npages);
- ret = -EFAULT;
- goto pfree;
+
+ /*
+ * Get a reference to the process's mm so we can use it when
+ * unpinning the io vectors.
+ */
+ req->pq->user_mm = get_task_mm(current);
+
+ pinned = hfi1_acquire_user_pages((unsigned long)iovec->iov.iov_base,
+ npages, 0, iovec->pages);
+
+ if (pinned < 0)
+ return pinned;
+
+ iovec->npages = pinned;
+ if (pinned != npages) {
+ SDMA_DBG(req, "Failed to pin pages (%d/%u)", pinned, npages);
+ unpin_vector_pages(req, iovec);
+ return -EFAULT;
}
- goto done;
-pfree:
- unpin_vector_pages(iovec);
-done:
- return ret;
+ return 0;
}
-static void unpin_vector_pages(struct user_sdma_iovec *iovec)
+static void unpin_vector_pages(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec)
{
- unsigned i;
+ /*
+ * Unpinning is done through the workqueue so use the
+ * process's mm if we have a reference to it.
+ */
+ if ((current->flags & PF_KTHREAD) && req->pq->user_mm)
+ use_mm(req->pq->user_mm);
- if (ACCESS_ONCE(iovec->offset) != iovec->iov.iov_len) {
- hfi1_cdbg(SDMA,
- "the complete vector has not been sent yet %llu %zu",
- iovec->offset, iovec->iov.iov_len);
- return;
+ hfi1_release_user_pages(iovec->pages, iovec->npages, 0);
+
+ /*
+ * Unuse the user's mm (see above) and release the
+ * reference to it.
+ */
+ if (req->pq->user_mm) {
+ if (current->flags & PF_KTHREAD)
+ unuse_mm(req->pq->user_mm);
+ mmput(req->pq->user_mm);
}
- for (i = 0; i < iovec->npages; i++)
- if (iovec->pages[i])
- put_page(iovec->pages[i]);
+
kfree(iovec->pages);
iovec->pages = NULL;
iovec->npages = 0;
@@ -1354,54 +1353,116 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
return diff;
}
+/*
+ * SDMA tx request completion callback. Called when the SDMA progress
+ * state machine gets notification that the SDMA descriptors for this
+ * tx request have been processed by the DMA engine. Called in
+ * interrupt context.
+ */
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status,
int drain)
{
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
- struct user_sdma_request *req = tx->req;
- struct hfi1_user_sdma_pkt_q *pq = req ? req->pq : NULL;
- u64 tx_seqnum;
+ struct user_sdma_request *req;
+ bool defer;
+ int i;
- if (unlikely(!req || !pq))
+ if (!tx->req)
return;
- /* If we have any io vectors associated with this txreq,
- * check whether they need to be 'freed'. */
- if (tx->idx != -1) {
- int i;
+ req = tx->req;
+ /*
+ * If this is the callback for the last packet of the request,
+ * queue up the request for clean up.
+ */
+ defer = (tx->seqnum == req->info.npkts - 1);
- for (i = tx->idx; i >= 0; i--) {
- if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
- unpin_vector_pages(tx->iovecs[i].vec);
+ /*
+ * If we have any io vectors associated with this txreq,
+ * check whether they need to be 'freed'. We can't free them
+ * here because the unpin function needs to be able to sleep.
+ */
+ for (i = tx->idx; i >= 0; i--) {
+ if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) {
+ defer = true;
+ break;
}
}
- tx_seqnum = tx->seqnum;
- kmem_cache_free(pq->txreq_cache, tx);
-
+ req->status = status;
if (status != SDMA_TXREQ_S_OK) {
- dd_dev_err(pq->dd, "SDMA completion with error %d", status);
- set_comp_state(req, ERROR, status);
+ SDMA_DBG(req, "SDMA completion with error %d",
+ status);
set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
- /* Do not free the request until the sender loop has ack'ed
- * the error and we've seen all txreqs. */
- if (tx_seqnum == ACCESS_ONCE(req->seqnum) &&
- test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
- atomic_dec(&pq->n_reqs);
- user_sdma_free_request(req);
- }
+ defer = true;
+ }
+
+ /*
+ * Defer the clean up of the iovectors and the request until later
+ * so it can be done outside of interrupt context.
+ */
+ if (defer) {
+ spin_lock(&req->txcmp_lock);
+ list_add_tail(&tx->list, &req->txcmp);
+ spin_unlock(&req->txcmp_lock);
+ schedule_work(&req->worker);
} else {
- if (tx_seqnum == req->info.npkts - 1) {
- /* We've sent and completed all packets in this
- * request. Signal completion to the user */
- atomic_dec(&pq->n_reqs);
- set_comp_state(req, COMPLETE, 0);
- user_sdma_free_request(req);
+ kmem_cache_free(req->pq->txreq_cache, tx);
+ }
+}
+
+static void user_sdma_delayed_completion(struct work_struct *work)
+{
+ struct user_sdma_request *req =
+ container_of(work, struct user_sdma_request, worker);
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct user_sdma_txreq *tx = NULL;
+ unsigned long flags;
+ u64 seqnum;
+ int i;
+
+ while (1) {
+ spin_lock_irqsave(&req->txcmp_lock, flags);
+ if (!list_empty(&req->txcmp)) {
+ tx = list_first_entry(&req->txcmp,
+ struct user_sdma_txreq, list);
+ list_del(&tx->list);
+ }
+ spin_unlock_irqrestore(&req->txcmp_lock, flags);
+ if (!tx)
+ break;
+
+ for (i = tx->idx; i >= 0; i--)
+ if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
+ unpin_vector_pages(req, tx->iovecs[i].vec);
+
+ seqnum = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+ tx = NULL;
+
+ if (req->status != SDMA_TXREQ_S_OK) {
+ if (seqnum == ACCESS_ONCE(req->seqnum) &&
+ test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
+ atomic_dec(&pq->n_reqs);
+ set_comp_state(req, ERROR, req->status);
+ user_sdma_free_request(req);
+ break;
+ }
+ } else {
+ if (seqnum == req->info.npkts - 1) {
+ atomic_dec(&pq->n_reqs);
+ set_comp_state(req, COMPLETE, 0);
+ user_sdma_free_request(req);
+ break;
+ }
}
}
- if (!atomic_read(&pq->n_reqs))
+
+ if (!atomic_read(&pq->n_reqs)) {
xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+ wake_up(&pq->wait);
+ }
}
static void user_sdma_free_request(struct user_sdma_request *req)
@@ -1422,10 +1483,8 @@ static void user_sdma_free_request(struct user_sdma_request *req)
for (i = 0; i < req->data_iovs; i++)
if (req->iovs[i].npages && req->iovs[i].pages)
- unpin_vector_pages(&req->iovs[i]);
+ unpin_vector_pages(req, &req->iovs[i]);
}
- if (req->user_proc)
- put_task_struct(req->user_proc);
kfree(req->tids);
clear_bit(SDMA_REQ_IN_USE, &req->flags);
}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h
index fa4422553e23..0afa28508a8a 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ b/drivers/staging/rdma/hfi1/user_sdma.h
@@ -52,15 +52,7 @@
#include "common.h"
#include "iowait.h"
-
-#define EXP_TID_TIDLEN_MASK 0x7FFULL
-#define EXP_TID_TIDLEN_SHIFT 0
-#define EXP_TID_TIDCTRL_MASK 0x3ULL
-#define EXP_TID_TIDCTRL_SHIFT 20
-#define EXP_TID_TIDIDX_MASK 0x7FFULL
-#define EXP_TID_TIDIDX_SHIFT 22
-#define EXP_TID_GET(tid, field) \
- (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+#include "user_exp_rcv.h"
extern uint extended_psn;
@@ -76,6 +68,8 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs;
struct iowait busy;
unsigned state;
+ wait_queue_head_t wait;
+ struct mm_struct *user_mm;
};
struct hfi1_user_sdma_comp_q {
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 9beb0aa876f0..09b8d412ee90 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -162,6 +162,8 @@ static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext
return container_of(ibucontext, struct hfi1_ucontext, ibucontext);
}
+static inline void _hfi1_schedule_send(struct hfi1_qp *qp);
+
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
@@ -509,9 +511,9 @@ static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
nreq++;
}
bail:
- if (nreq && !call_send)
- hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
+ if (nreq && !call_send)
+ _hfi1_schedule_send(qp);
if (nreq && call_send)
hfi1_do_send(&qp->s_iowait.iowork);
return err;
@@ -999,17 +1001,19 @@ bail_txadd:
return ret;
}
-int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc)
+int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
{
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ahg_ib_header *ahdr = qp->s_hdr;
+ u32 hdrwords = qp->s_hdrwords;
+ struct hfi1_sge_state *ss = qp->s_cur_sge;
+ u32 len = qp->s_cur_size;
+ u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
+ struct hfi1_ibdev *dev = ps->dev;
+ struct hfi1_pportdata *ppd = ps->ppd;
struct verbs_txreq *tx;
struct sdma_txreq *stx;
u64 pbc_flags = 0;
- struct sdma_engine *sde;
u8 sc5 = qp->s_sc;
int ret;
@@ -1030,12 +1034,7 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
if (IS_ERR(tx))
goto bail_tx;
- if (!qp->s_hdr->sde) {
- tx->sde = sde = qp_to_sdma_engine(qp, sc5);
- if (!sde)
- goto bail_no_sde;
- } else
- tx->sde = sde = qp->s_hdr->sde;
+ tx->sde = qp->s_sde;
if (likely(pbc == 0)) {
u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
@@ -1050,17 +1049,15 @@ int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
if (qp->s_rdma_mr)
qp->s_rdma_mr = NULL;
tx->hdr_dwords = hdrwords + 2;
- ret = build_verbs_tx_desc(sde, ss, len, tx, ahdr, pbc);
+ ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
if (unlikely(ret))
goto bail_build;
trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
- ret = sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq);
+ ret = sdma_send_txreq(tx->sde, &qp->s_iowait, &tx->txreq);
if (unlikely(ret == -ECOMM))
goto bail_ecomm;
return ret;
-bail_no_sde:
- hfi1_put_txreq(tx);
bail_ecomm:
/* The current one got "sent" */
return 0;
@@ -1126,12 +1123,16 @@ struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5)
return dd->vld[vl].sc;
}
-int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc)
+int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc)
{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ahg_ib_header *ahdr = qp->s_hdr;
+ u32 hdrwords = qp->s_hdrwords;
+ struct hfi1_sge_state *ss = qp->s_cur_sge;
+ u32 len = qp->s_cur_size;
+ u32 dwords = (len + 3) >> 2;
+ u32 plen = hdrwords + dwords + 2; /* includes pbc */
+ struct hfi1_pportdata *ppd = ps->ppd;
u32 *hdr = (u32 *)&ahdr->ibh;
u64 pbc_flags = 0;
u32 sc5;
@@ -1303,23 +1304,18 @@ bad:
/**
* hfi1_verbs_send - send a packet
* @qp: the QP to send on
- * @ahdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
+ * @ps: the state of the packet to send
*
* Return zero if packet is sent or queued OK.
* Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
*/
-int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len)
+int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- u32 plen;
+ struct ahg_ib_header *ahdr = qp->s_hdr;
int ret;
int pio = 0;
unsigned long flags = 0;
- u32 dwords = (len + 3) >> 2;
/*
* VL15 packets (IB_QPT_SMI) will always use PIO, so we
@@ -1350,23 +1346,16 @@ int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
return -EINVAL;
}
- /*
- * Calculate the send buffer trigger address.
- * The +2 counts for the pbc control qword
- */
- plen = hdrwords + dwords + 2;
-
if (pio) {
- ret = dd->process_pio_send(
- qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
+ ret = dd->process_pio_send(qp, ps, 0);
} else {
#ifdef CONFIG_SDMA_VERBOSITY
dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n",
slashstrip(__FILE__), __LINE__, __func__);
- dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", hdrwords, len);
+ dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", qp->s_hdrwords,
+ qp->s_cur_size);
#endif
- ret = dd->process_dma_send(
- qp, ahdr, hdrwords, ss, len, plen, dwords, 0);
+ ret = dd->process_dma_send(qp, ps, 0);
}
return ret;
@@ -2063,7 +2052,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ibdev->poll_cq = hfi1_poll_cq;
ibdev->req_notify_cq = hfi1_req_notify_cq;
ibdev->get_dma_mr = hfi1_get_dma_mr;
- ibdev->reg_phys_mr = hfi1_reg_phys_mr;
ibdev->reg_user_mr = hfi1_reg_user_mr;
ibdev->dereg_mr = hfi1_dereg_mr;
ibdev->alloc_mr = hfi1_alloc_mr;
@@ -2135,28 +2123,43 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
vfree(dev->lk_table.table);
}
-/*
- * This must be called with s_lock held.
- */
-void hfi1_schedule_send(struct hfi1_qp *qp)
-{
- if (hfi1_send_ok(qp)) {
- struct hfi1_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- iowait_schedule(&qp->s_iowait, ppd->hfi1_wq);
- }
-}
-
void hfi1_cnp_rcv(struct hfi1_packet *packet)
{
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
-
- if (packet->qp->ibqp.qp_type == IB_QPT_UC)
- hfi1_uc_rcv(packet);
- else if (packet->qp->ibqp.qp_type == IB_QPT_UD)
- hfi1_ud_rcv(packet);
- else
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_ib_header *hdr = packet->hdr;
+ struct hfi1_qp *qp = packet->qp;
+ u32 lqpn, rqpn = 0;
+ u16 rlid = 0;
+ u8 sl, sc5, sc4_bit, svc_type;
+ bool sc4_set = has_sc4_bit(packet);
+
+ switch (packet->qp->ibqp.qp_type) {
+ case IB_QPT_UC:
+ rlid = qp->remote_ah_attr.dlid;
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_UC;
+ break;
+ case IB_QPT_RC:
+ rlid = qp->remote_ah_attr.dlid;
+ rqpn = qp->remote_qpn;
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ svc_type = IB_CC_SVCTYPE_UD;
+ break;
+ default:
ibp->n_pkt_drops++;
+ return;
+ }
+
+ sc4_bit = sc4_set << 4;
+ sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
+ sc5 |= sc4_bit;
+ sl = ibp->sc_to_sl[sc5];
+ lqpn = qp->ibqp.qp_num;
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
index 041ad07ee699..286e468b0479 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -120,9 +120,9 @@ struct hfi1_packet;
#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
-#define IB_BTH_REQ_ACK (1 << 31)
-#define IB_BTH_SOLICITED (1 << 23)
-#define IB_BTH_MIG_REQ (1 << 22)
+#define IB_BTH_REQ_ACK BIT(31)
+#define IB_BTH_SOLICITED BIT(23)
+#define IB_BTH_MIG_REQ BIT(22)
#define IB_GRH_VERSION 6
#define IB_GRH_VERSION_MASK 0xF
@@ -441,7 +441,8 @@ struct hfi1_qp {
struct hfi1_swqe *s_wq; /* send work queue */
struct hfi1_mmap_info *ip;
struct ahg_ib_header *s_hdr; /* next packet header to send */
- u8 s_sc; /* SC[0..4] for next packet */
+ /* sc for UC/RC QPs - based on ah for UD */
+ u8 s_sc;
unsigned long timeout_jiffies; /* computed from timeout */
enum ib_mtu path_mtu;
@@ -489,6 +490,7 @@ struct hfi1_qp {
u32 r_psn; /* expected rcv packet sequence number */
u32 r_msn; /* message sequence number */
+ u8 r_adefered; /* number of acks defered */
u8 r_state; /* opcode of last packet received */
u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */
@@ -544,6 +546,16 @@ struct hfi1_qp {
};
/*
+ * This structure is used to hold commonly lookedup and computed values during
+ * the send engine progress.
+ */
+struct hfi1_pkt_state {
+ struct hfi1_ibdev *dev;
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+};
+
+/*
* Atomic bit definitions for r_aflags.
*/
#define HFI1_R_WRID_VALID 0
@@ -552,11 +564,13 @@ struct hfi1_qp {
/*
* Bit definitions for r_flags.
*/
-#define HFI1_R_REUSE_SGE 0x01
-#define HFI1_R_RDMAR_SEQ 0x02
-#define HFI1_R_RSP_NAK 0x04
-#define HFI1_R_RSP_SEND 0x08
-#define HFI1_R_COMM_EST 0x10
+#define HFI1_R_REUSE_SGE 0x01
+#define HFI1_R_RDMAR_SEQ 0x02
+/* defer ack until end of interrupt session */
+#define HFI1_R_RSP_DEFERED_ACK 0x04
+/* relay ack to send engine */
+#define HFI1_R_RSP_SEND 0x08
+#define HFI1_R_COMM_EST 0x10
/*
* Bit definitions for s_flags.
@@ -846,9 +860,8 @@ static inline int hfi1_send_ok(struct hfi1_qp *qp)
/*
* This must be called with s_lock held.
*/
-void hfi1_schedule_send(struct hfi1_qp *qp);
void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+ u32 qp1, u32 qp2, u16 lid1, u16 lid2);
void hfi1_cap_mask_chg(struct hfi1_ibport *ibp);
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
@@ -927,8 +940,7 @@ int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp);
struct verbs_txreq;
void hfi1_put_txreq(struct verbs_txreq *tx);
-int hfi1_verbs_send(struct hfi1_qp *qp, struct ahg_ib_header *ahdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len);
+int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps);
void hfi1_copy_sge(struct hfi1_sge_state *ss, void *data, u32 length,
int release);
@@ -1012,10 +1024,6 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc);
-struct ib_mr *hfi1_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start);
-
struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
@@ -1069,8 +1077,6 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only);
-void hfi1_migrate_qp(struct hfi1_qp *qp);
-
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
int has_grh, struct hfi1_qp *qp, u32 bth0);
@@ -1101,13 +1107,11 @@ void hfi1_ib_rcv(struct hfi1_packet *packet);
unsigned hfi1_get_npkeys(struct hfi1_devdata *);
-int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc);
+int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
-int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct ahg_ib_header *hdr,
- u32 hdrwords, struct hfi1_sge_state *ss, u32 len,
- u32 plen, u32 dwords, u64 pbc);
+int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+ u64 pbc);
struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5);
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
index 13c3cd11ab92..6187b848b3ca 100644
--- a/drivers/staging/rdma/ipath/ipath_file_ops.c
+++ b/drivers/staging/rdma/ipath/ipath_file_ops.c
@@ -917,15 +917,15 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
chunk = pd->port_rcvegrbuf_chunks;
egrperchunk = pd->port_rcvegrbufs_perchunk;
size = pd->port_rcvegrbuf_size;
- pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
- GFP_KERNEL);
+ pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
+ GFP_KERNEL);
if (!pd->port_rcvegrbuf) {
ret = -ENOMEM;
goto bail;
}
pd->port_rcvegrbuf_phys =
- kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]),
- GFP_KERNEL);
+ kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
+ GFP_KERNEL);
if (!pd->port_rcvegrbuf_phys) {
ret = -ENOMEM;
goto bail_rcvegrbuf;
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
index 796af6867007..476fcdf05acb 100644
--- a/drivers/staging/rdma/ipath/ipath_fs.c
+++ b/drivers/staging/rdma/ipath/ipath_fs.c
@@ -82,14 +82,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&d_inode(parent)->i_mutex);
+ inode_lock(d_inode(parent));
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
error = ipathfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&d_inode(parent)->i_mutex);
+ inode_unlock(d_inode(parent));
return error;
}
@@ -295,7 +295,7 @@ static int remove_device_files(struct super_block *sb,
int ret;
root = dget(sb->s_root);
- mutex_lock(&d_inode(root)->i_mutex);
+ inode_lock(d_inode(root));
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -311,7 +311,7 @@ static int remove_device_files(struct super_block *sb,
ret = simple_rmdir(d_inode(root), dir);
bail:
- mutex_unlock(&d_inode(root)->i_mutex);
+ inode_unlock(d_inode(root));
dput(root);
return ret;
}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
index c7278f6a8217..b76b0ce66709 100644
--- a/drivers/staging/rdma/ipath/ipath_mr.c
+++ b/drivers/staging/rdma/ipath/ipath_mr.c
@@ -98,10 +98,6 @@ static struct ipath_mr *alloc_mr(int count,
}
mr->mr.mapsz = m;
- /*
- * ib_reg_phys_mr() will initialize mr->ibmr except for
- * lkey and rkey.
- */
if (!ipath_alloc_lkey(lk_table, &mr->mr))
goto bail;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
@@ -121,57 +117,6 @@ done:
}
/**
- * ipath_reg_phys_mr - register a physical memory region
- * @pd: protection domain for this memory region
- * @buffer_list: pointer to the list of physical buffers to register
- * @num_phys_buf: the number of physical buffers to register
- * @iova_start: the starting address passed over IB which maps to this MR
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start)
-{
- struct ipath_mr *mr;
- int n, m, i;
- struct ib_mr *ret;
-
- mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
- if (mr == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- mr->mr.pd = pd;
- mr->mr.user_base = *iova_start;
- mr->mr.iova = *iova_start;
- mr->mr.length = 0;
- mr->mr.offset = 0;
- mr->mr.access_flags = acc;
- mr->mr.max_segs = num_phys_buf;
- mr->umem = NULL;
-
- m = 0;
- n = 0;
- for (i = 0; i < num_phys_buf; i++) {
- mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
- mr->mr.map[m]->segs[n].length = buffer_list[i].size;
- mr->mr.length += buffer_list[i].size;
- n++;
- if (n == IPATH_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
* ipath_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
index 1778dee13f99..53f9dcab180d 100644
--- a/drivers/staging/rdma/ipath/ipath_verbs.c
+++ b/drivers/staging/rdma/ipath/ipath_verbs.c
@@ -2201,7 +2201,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev->poll_cq = ipath_poll_cq;
dev->req_notify_cq = ipath_req_notify_cq;
dev->get_dma_mr = ipath_get_dma_mr;
- dev->reg_phys_mr = ipath_reg_phys_mr;
dev->reg_user_mr = ipath_reg_user_mr;
dev->dereg_mr = ipath_dereg_mr;
dev->alloc_fmr = ipath_alloc_fmr;
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
index 0a90a56870ab..6c70a89667a9 100644
--- a/drivers/staging/rdma/ipath/ipath_verbs.h
+++ b/drivers/staging/rdma/ipath/ipath_verbs.h
@@ -828,10 +828,6 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
-struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
- struct ib_phys_buf *buffer_list,
- int num_phys_buf, int acc, u64 *iova_start);
-
struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);