aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c323
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c161
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c29
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c42
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c170
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h53
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c505
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c49
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h7
19 files changed, 993 insertions, 497 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index a19e0ed03d7c..f023d3936518 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -147,7 +147,7 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
switch (ah->type) {
case MTHCA_AH_ON_HCA:
mthca_free(&dev->av_table.alloc,
- (ah->avdma - dev->av_table.ddr_av_base) /
+ (ah->avdma - dev->av_table.ddr_av_base) /
MTHCA_AV_SIZE);
break;
@@ -193,6 +193,37 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
return 0;
}
+int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+ struct mthca_ah *ah = to_mah(ibah);
+ struct mthca_dev *dev = to_mdev(ibah->device);
+
+ /* Only implement for MAD and memfree ah for now. */
+ if (ah->type == MTHCA_AH_ON_HCA)
+ return -ENOSYS;
+
+ memset(attr, 0, sizeof *attr);
+ attr->dlid = be16_to_cpu(ah->av->dlid);
+ attr->sl = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
+ attr->static_rate = ah->av->msg_sr & 0x7;
+ attr->src_path_bits = ah->av->g_slid & 0x7F;
+ attr->port_num = be32_to_cpu(ah->av->port_pd) >> 24;
+ attr->ah_flags = mthca_ah_grh_present(ah) ? IB_AH_GRH : 0;
+
+ if (attr->ah_flags) {
+ attr->grh.traffic_class =
+ be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20;
+ attr->grh.flow_label =
+ be32_to_cpu(ah->av->sl_tclass_flowlabel) & 0xfffff;
+ attr->grh.hop_limit = ah->av->hop_limit;
+ attr->grh.sgid_index = ah->av->gid_index &
+ (dev->limits.gid_table_len - 1);
+ memcpy(attr->grh.dgid.raw, ah->av->dgid, 16);
+ }
+
+ return 0;
+}
+
int __devinit mthca_init_av_table(struct mthca_dev *dev)
{
int err;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 2825615ce81c..343eca507870 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -182,25 +182,58 @@ struct mthca_cmd_context {
u8 status;
};
+static int fw_cmd_doorbell = 1;
+module_param(fw_cmd_doorbell, int, 0644);
+MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero "
+ "(and supported by FW)");
+
static inline int go_bit(struct mthca_dev *dev)
{
return readl(dev->hcr + HCR_STATUS_OFFSET) &
swab32(1 << HCR_GO_BIT);
}
-static int mthca_cmd_post(struct mthca_dev *dev,
- u64 in_param,
- u64 out_param,
- u32 in_modifier,
- u8 op_modifier,
- u16 op,
- u16 token,
- int event)
+static void mthca_cmd_post_dbell(struct mthca_dev *dev,
+ u64 in_param,
+ u64 out_param,
+ u32 in_modifier,
+ u8 op_modifier,
+ u16 op,
+ u16 token)
{
- int err = 0;
+ void __iomem *ptr = dev->cmd.dbell_map;
+ u16 *offs = dev->cmd.dbell_offsets;
- mutex_lock(&dev->cmd.hcr_mutex);
+ __raw_writel((__force u32) cpu_to_be32(in_param >> 32), ptr + offs[0]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), ptr + offs[1]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32(in_modifier), ptr + offs[2]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32(out_param >> 32), ptr + offs[3]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), ptr + offs[4]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32(token << 16), ptr + offs[5]);
+ wmb();
+ __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
+ (1 << HCA_E_BIT) |
+ (op_modifier << HCR_OPMOD_SHIFT) |
+ op), ptr + offs[6]);
+ wmb();
+ __raw_writel((__force u32) 0, ptr + offs[7]);
+ wmb();
+}
+static int mthca_cmd_post_hcr(struct mthca_dev *dev,
+ u64 in_param,
+ u64 out_param,
+ u32 in_modifier,
+ u8 op_modifier,
+ u16 op,
+ u16 token,
+ int event)
+{
if (event) {
unsigned long end = jiffies + GO_BIT_TIMEOUT;
@@ -210,10 +243,8 @@ static int mthca_cmd_post(struct mthca_dev *dev,
}
}
- if (go_bit(dev)) {
- err = -EAGAIN;
- goto out;
- }
+ if (go_bit(dev))
+ return -EAGAIN;
/*
* We use writel (instead of something like memcpy_toio)
@@ -236,7 +267,29 @@ static int mthca_cmd_post(struct mthca_dev *dev,
(op_modifier << HCR_OPMOD_SHIFT) |
op), dev->hcr + 6 * 4);
-out:
+ return 0;
+}
+
+static int mthca_cmd_post(struct mthca_dev *dev,
+ u64 in_param,
+ u64 out_param,
+ u32 in_modifier,
+ u8 op_modifier,
+ u16 op,
+ u16 token,
+ int event)
+{
+ int err = 0;
+
+ mutex_lock(&dev->cmd.hcr_mutex);
+
+ if (event && dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS && fw_cmd_doorbell)
+ mthca_cmd_post_dbell(dev, in_param, out_param, in_modifier,
+ op_modifier, op, token);
+ else
+ err = mthca_cmd_post_hcr(dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
+
mutex_unlock(&dev->cmd.hcr_mutex);
return err;
}
@@ -275,7 +328,7 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
}
if (out_is_imm)
- *out_param =
+ *out_param =
(u64) be32_to_cpu((__force __be32)
__raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
@@ -386,7 +439,7 @@ static int mthca_cmd_box(struct mthca_dev *dev,
unsigned long timeout,
u8 *status)
{
- if (dev->cmd.use_events)
+ if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, &out_param, 0,
in_modifier, op_modifier, op,
timeout, status);
@@ -423,7 +476,7 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
unsigned long timeout,
u8 *status)
{
- if (dev->cmd.use_events)
+ if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, out_param, 1,
in_modifier, op_modifier, op,
timeout, status);
@@ -437,7 +490,7 @@ int mthca_cmd_init(struct mthca_dev *dev)
{
mutex_init(&dev->cmd.hcr_mutex);
sema_init(&dev->cmd.poll_sem, 1);
- dev->cmd.use_events = 0;
+ dev->cmd.flags = 0;
dev->hcr = ioremap(pci_resource_start(dev->pdev, 0) + MTHCA_HCR_BASE,
MTHCA_HCR_SIZE);
@@ -461,6 +514,8 @@ void mthca_cmd_cleanup(struct mthca_dev *dev)
{
pci_pool_destroy(dev->cmd.pool);
iounmap(dev->hcr);
+ if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
+ iounmap(dev->cmd.dbell_map);
}
/*
@@ -498,7 +553,8 @@ int mthca_cmd_use_events(struct mthca_dev *dev)
; /* nothing */
--dev->cmd.token_mask;
- dev->cmd.use_events = 1;
+ dev->cmd.flags |= MTHCA_CMD_USE_EVENTS;
+
down(&dev->cmd.poll_sem);
return 0;
@@ -511,7 +567,7 @@ void mthca_cmd_use_polling(struct mthca_dev *dev)
{
int i;
- dev->cmd.use_events = 0;
+ dev->cmd.flags &= ~MTHCA_CMD_USE_EVENTS;
for (i = 0; i < dev->cmd.max_cmds; ++i)
down(&dev->cmd.event_sem);
@@ -596,8 +652,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
* address or size and use that as our log2 size.
*/
lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
- if (lg < 12) {
- mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n",
+ if (lg < MTHCA_ICM_PAGE_SHIFT) {
+ mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
+ MTHCA_ICM_PAGE_SIZE,
(unsigned long long) mthca_icm_addr(&iter),
mthca_icm_size(&iter));
err = -EINVAL;
@@ -609,8 +666,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
virt += 1 << lg;
}
- pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) +
- (i << lg)) | (lg - 12));
+ pages[nent * 2 + 1] =
+ cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
+ (lg - MTHCA_ICM_PAGE_SHIFT));
ts += 1 << (lg - 10);
++tc;
@@ -661,12 +719,41 @@ int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
}
+static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
+{
+ unsigned long addr;
+ u16 max_off = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ max_off = max(max_off, dev->cmd.dbell_offsets[i]);
+
+ if ((base & PAGE_MASK) != ((base + max_off) & PAGE_MASK)) {
+ mthca_warn(dev, "Firmware doorbell region at 0x%016llx, "
+ "length 0x%x crosses a page boundary\n",
+ (unsigned long long) base, max_off);
+ return;
+ }
+
+ addr = pci_resource_start(dev->pdev, 2) +
+ ((pci_resource_len(dev->pdev, 2) - 1) & base);
+ dev->cmd.dbell_map = ioremap(addr, max_off + sizeof(u32));
+ if (!dev->cmd.dbell_map)
+ return;
+
+ dev->cmd.flags |= MTHCA_CMD_POST_DOORBELLS;
+ mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
+}
+
int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
+ u64 base;
+ u32 tmp;
int err = 0;
u8 lg;
+ int i;
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
@@ -674,6 +761,10 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
#define QUERY_FW_ERR_START_OFFSET 0x30
#define QUERY_FW_ERR_SIZE_OFFSET 0x38
+#define QUERY_FW_CMD_DB_EN_OFFSET 0x10
+#define QUERY_FW_CMD_DB_OFFSET 0x50
+#define QUERY_FW_CMD_DB_BASE 0x60
+
#define QUERY_FW_START_OFFSET 0x20
#define QUERY_FW_END_OFFSET 0x28
@@ -702,16 +793,29 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
((dev->fw_ver & 0xffff0000ull) >> 16) |
((dev->fw_ver & 0x0000ffffull) << 16);
+ mthca_dbg(dev, "FW version %012llx, max commands %d\n",
+ (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
+
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
dev->cmd.max_cmds = 1 << lg;
MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
- mthca_dbg(dev, "FW version %012llx, max commands %d\n",
- (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
(unsigned long long) dev->catas_err.addr, dev->catas_err.size);
+ MTHCA_GET(tmp, outbox, QUERY_FW_CMD_DB_EN_OFFSET);
+ if (tmp & 0x1) {
+ mthca_dbg(dev, "FW supports commands through doorbells\n");
+
+ MTHCA_GET(base, outbox, QUERY_FW_CMD_DB_BASE);
+ for (i = 0; i < MTHCA_CMD_NUM_DBELL_DWORDS; ++i)
+ MTHCA_GET(dev->cmd.dbell_offsets[i], outbox,
+ QUERY_FW_CMD_DB_OFFSET + (i << 1));
+
+ mthca_setup_cmd_doorbells(dev, base);
+ }
+
if (mthca_is_memfree(dev)) {
MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
@@ -720,12 +824,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
/*
- * Arbel page size is always 4 KB; round up number of
- * system pages needed.
+ * Round up number of system pages needed in case
+ * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
*/
dev->fw.arbel.fw_pages =
- ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
- (PAGE_SHIFT - 12);
+ ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
(unsigned long long) dev->fw.arbel.clr_int_base,
@@ -1173,7 +1277,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
int err;
#define INIT_HCA_IN_SIZE 0x200
-#define INIT_HCA_FLAGS_OFFSET 0x014
+#define INIT_HCA_FLAGS1_OFFSET 0x00c
+#define INIT_HCA_FLAGS2_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
@@ -1216,15 +1321,18 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
memset(inbox, 0, INIT_HCA_IN_SIZE);
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ MTHCA_PUT(inbox, 0x1, INIT_HCA_FLAGS1_OFFSET);
+
#if defined(__LITTLE_ENDIAN)
- *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
+ *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
#elif defined(__BIG_ENDIAN)
- *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
+ *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1 << 1);
#else
#error Host endianness not defined
#endif
/* Check port for UD address vector: */
- *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
+ *(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
/* We leave wqe_quota, responder_exu, etc as 0 (default) */
@@ -1438,11 +1546,11 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
return ret;
/*
- * Arbel page size is always 4 KB; round up number of system
- * pages needed.
+ * Round up number of system pages needed in case
+ * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
*/
- *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
- *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
+ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
+ (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
return 0;
}
@@ -1514,6 +1622,37 @@ int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
CMD_TIME_CLASS_A, status);
}
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
+ u8 *status)
+{
+ struct mthca_mailbox *mailbox;
+ __be32 *inbox;
+ int err;
+
+#define RESIZE_CQ_IN_SIZE 0x40
+#define RESIZE_CQ_LOG_SIZE_OFFSET 0x0c
+#define RESIZE_CQ_LKEY_OFFSET 0x1c
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ memset(inbox, 0, RESIZE_CQ_IN_SIZE);
+ /*
+ * Leave start address fields zeroed out -- mthca assumes that
+ * MRs for CQs always start at virtual address 0.
+ */
+ MTHCA_PUT(inbox, log_size, RESIZE_CQ_LOG_SIZE_OFFSET);
+ MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET);
+
+ err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
+ CMD_TIME_CLASS_B, status);
+
+ mthca_free_mailbox(dev, mailbox);
+ return err;
+}
+
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status)
{
@@ -1529,37 +1668,69 @@ int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
CMD_TIME_CLASS_A, status);
}
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status)
+{
+ return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
+ CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+}
+
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
{
return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
CMD_TIME_CLASS_B, status);
}
-int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
- int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
+int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
+ enum ib_qp_state next, u32 num, int is_ee,
+ struct mthca_mailbox *mailbox, u32 optmask,
u8 *status)
{
- static const u16 op[] = {
- [MTHCA_TRANS_RST2INIT] = CMD_RST2INIT_QPEE,
- [MTHCA_TRANS_INIT2INIT] = CMD_INIT2INIT_QPEE,
- [MTHCA_TRANS_INIT2RTR] = CMD_INIT2RTR_QPEE,
- [MTHCA_TRANS_RTR2RTS] = CMD_RTR2RTS_QPEE,
- [MTHCA_TRANS_RTS2RTS] = CMD_RTS2RTS_QPEE,
- [MTHCA_TRANS_SQERR2RTS] = CMD_SQERR2RTS_QPEE,
- [MTHCA_TRANS_ANY2ERR] = CMD_2ERR_QPEE,
- [MTHCA_TRANS_RTS2SQD] = CMD_RTS2SQD_QPEE,
- [MTHCA_TRANS_SQD2SQD] = CMD_SQD2SQD_QPEE,
- [MTHCA_TRANS_SQD2RTS] = CMD_SQD2RTS_QPEE,
- [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE
+ static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_INIT] = CMD_RST2INIT_QPEE,
+ },
+ [IB_QPS_INIT] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_INIT] = CMD_INIT2INIT_QPEE,
+ [IB_QPS_RTR] = CMD_INIT2RTR_QPEE,
+ },
+ [IB_QPS_RTR] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_RTS] = CMD_RTR2RTS_QPEE,
+ },
+ [IB_QPS_RTS] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_RTS] = CMD_RTS2RTS_QPEE,
+ [IB_QPS_SQD] = CMD_RTS2SQD_QPEE,
+ },
+ [IB_QPS_SQD] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_RTS] = CMD_SQD2RTS_QPEE,
+ [IB_QPS_SQD] = CMD_SQD2SQD_QPEE,
+ },
+ [IB_QPS_SQE] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ [IB_QPS_RTS] = CMD_SQERR2RTS_QPEE,
+ },
+ [IB_QPS_ERR] = {
+ [IB_QPS_RESET] = CMD_ERR2RST_QPEE,
+ [IB_QPS_ERR] = CMD_2ERR_QPEE,
+ }
};
+
u8 op_mod = 0;
int my_mailbox = 0;
int err;
- if (trans < 0 || trans >= ARRAY_SIZE(op))
- return -EINVAL;
-
- if (trans == MTHCA_TRANS_ANY2RST) {
+ if (op[cur][next] == CMD_ERR2RST_QPEE) {
op_mod = 3; /* don't write outbox, any->reset */
/* For debugging */
@@ -1571,34 +1742,35 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
} else
mailbox = NULL;
}
- } else {
- if (0) {
+
+ err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
+ (!!is_ee << 24) | num, op_mod,
+ op[cur][next], CMD_TIME_CLASS_C, status);
+
+ if (0 && mailbox) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
- printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
+ printk(" %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
- printk(" [%02x] ", i * 4);
+ printk("[%02x] ", i * 4);
printk(" %08x",
be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
printk("\n");
}
}
- }
-
- if (trans == MTHCA_TRANS_ANY2RST) {
- err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
- (!!is_ee << 24) | num, op_mod,
- op[trans], CMD_TIME_CLASS_C, status);
- if (0 && mailbox) {
+ if (my_mailbox)
+ mthca_free_mailbox(dev, mailbox);
+ } else {
+ if (0) {
int i;
mthca_dbg(dev, "Dumping QP context:\n");
- printk(" %08x\n", be32_to_cpup(mailbox->buf));
+ printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf));
for (i = 0; i < 0x100 / 4; ++i) {
if (i % 8 == 0)
- printk("[%02x] ", i * 4);
+ printk(" [%02x] ", i * 4);
printk(" %08x",
be32_to_cpu(((__be32 *) mailbox->buf)[i + 2]));
if ((i + 1) % 8 == 0)
@@ -1606,12 +1778,9 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
}
}
- } else
- err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num,
- op_mod, op[trans], CMD_TIME_CLASS_C, status);
-
- if (my_mailbox)
- mthca_free_mailbox(dev, mailbox);
+ err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
+ op_mod, op[cur][next], CMD_TIME_CLASS_C, status);
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 18175bec84c2..e4ec35c40dd3 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -73,9 +74,9 @@ enum {
MTHCA_CMD_STAT_REG_BOUND = 0x21,
/* HCA local attached memory not present: */
MTHCA_CMD_STAT_LAM_NOT_PRE = 0x22,
- /* Bad management packet (silently discarded): */
+ /* Bad management packet (silently discarded): */
MTHCA_CMD_STAT_BAD_PKT = 0x30,
- /* More outstanding CQEs in CQ than new CQ size: */
+ /* More outstanding CQEs in CQ than new CQ size: */
MTHCA_CMD_STAT_BAD_SIZE = 0x40
};
@@ -298,13 +299,18 @@ int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
+ u8 *status);
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int srq_num, u8 *status);
+int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
+ struct mthca_mailbox *mailbox, u8 *status);
int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
-int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
- int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
+int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
+ enum ib_qp_state next, u32 num, int is_ee,
+ struct mthca_mailbox *mailbox, u32 optmask,
u8 *status);
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
struct mthca_mailbox *mailbox, u8 *status);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 96f1a86bf049..76aabc5bf371 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
@@ -150,24 +150,29 @@ struct mthca_err_cqe {
#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
-static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
+static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
+ int entry)
{
- if (cq->is_direct)
- return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
+ if (buf->is_direct)
+ return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
else
- return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
+ return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
+ (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
}
-static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i)
+static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
+{
+ return get_cqe_from_buf(&cq->buf, entry);
+}
+
+static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
{
- struct mthca_cqe *cqe = get_cqe(cq, i);
return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
}
static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
{
- return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe);
+ return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
}
static inline void set_cqe_hw(struct mthca_cqe *cqe)
@@ -289,7 +294,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
* from our QP and therefore don't need to be checked.
*/
for (prod_index = cq->cons_index;
- cqe_sw(cq, prod_index & cq->ibcq.cqe);
+ cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
++prod_index)
if (prod_index == cq->cons_index + cq->ibcq.cqe)
break;
@@ -324,12 +329,58 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
wake_up(&cq->wait);
}
-static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
- struct mthca_qp *qp, int wqe_index, int is_send,
- struct mthca_err_cqe *cqe,
- struct ib_wc *entry, int *free_cqe)
+void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
+{
+ int i;
+
+ /*
+ * In Tavor mode, the hardware keeps the consumer and producer
+ * indices mod the CQ size. Since we might be making the CQ
+ * bigger, we need to deal with the case where the producer
+ * index wrapped around before the CQ was resized.
+ */
+ if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
+ cq->ibcq.cqe < cq->resize_buf->cqe) {
+ cq->cons_index &= cq->ibcq.cqe;
+ if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
+ cq->cons_index -= cq->ibcq.cqe + 1;
+ }
+
+ for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
+ memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
+ i & cq->resize_buf->cqe),
+ get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
+}
+
+int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
+{
+ int ret;
+ int i;
+
+ ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
+ MTHCA_MAX_DIRECT_CQ_SIZE,
+ &buf->queue, &buf->is_direct,
+ &dev->driver_pd, 1, &buf->mr);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nent; ++i)
+ set_cqe_hw(get_cqe_from_buf(buf, i));
+
+ return 0;
+}
+
+void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
+{
+ mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
+ buf->is_direct, &buf->mr);
+}
+
+static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
+ struct mthca_qp *qp, int wqe_index, int is_send,
+ struct mthca_err_cqe *cqe,
+ struct ib_wc *entry, int *free_cqe)
{
- int err;
int dbd;
__be32 new_wqe;
@@ -412,11 +463,9 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
* error case, so we don't have to check the doorbell count, etc.
*/
if (mthca_is_memfree(dev))
- return 0;
+ return;
- err = mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
- if (err)
- return err;
+ mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
/*
* If we're at the end of the WQE chain, or we've used up our
@@ -424,15 +473,13 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
* the next poll operation.
*/
if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
- return 0;
+ return;
cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
cqe->wqe = new_wqe;
cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
*free_cqe = 0;
-
- return 0;
}
static inline int mthca_poll_one(struct mthca_dev *dev,
@@ -518,9 +565,9 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
}
if (is_error) {
- err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
- (struct mthca_err_cqe *) cqe,
- entry, &free_cqe);
+ handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
+ (struct mthca_err_cqe *) cqe,
+ entry, &free_cqe);
goto out;
}
@@ -614,11 +661,14 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
spin_lock_irqsave(&cq->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
+ npolled = 0;
+repoll:
+ while (npolled < num_entries) {
err = mthca_poll_one(dev, cq, &qp,
&freed, entry + npolled);
if (err)
break;
+ ++npolled;
}
if (freed) {
@@ -626,6 +676,42 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
update_cons_index(dev, cq, freed);
}
+ /*
+ * If a CQ resize is in progress and we discovered that the
+ * old buffer is empty, then peek in the new buffer, and if
+ * it's not empty, switch to the new buffer and continue
+ * polling there.
+ */
+ if (unlikely(err == -EAGAIN && cq->resize_buf &&
+ cq->resize_buf->state == CQ_RESIZE_READY)) {
+ /*
+ * In Tavor mode, the hardware keeps the producer
+ * index modulo the CQ size. Since we might be making
+ * the CQ bigger, we need to mask our consumer index
+ * using the size of the old CQ buffer before looking
+ * in the new CQ buffer.
+ */
+ if (!mthca_is_memfree(dev))
+ cq->cons_index &= cq->ibcq.cqe;
+
+ if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
+ cq->cons_index & cq->resize_buf->cqe))) {
+ struct mthca_cq_buf tbuf;
+ int tcqe;
+
+ tbuf = cq->buf;
+ tcqe = cq->ibcq.cqe;
+ cq->buf = cq->resize_buf->buf;
+ cq->ibcq.cqe = cq->resize_buf->cqe;
+
+ cq->resize_buf->buf = tbuf;
+ cq->resize_buf->cqe = tcqe;
+ cq->resize_buf->state = CQ_RESIZE_SWAPPED;
+
+ goto repoll;
+ }
+ }
+
spin_unlock_irqrestore(&cq->lock, flags);
return err == 0 || err == -EAGAIN ? npolled : err;
@@ -684,24 +770,14 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
return 0;
}
-static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
-{
- mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
- &cq->queue, cq->is_direct, &cq->mr);
-}
-
int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_ucontext *ctx, u32 pdn,
struct mthca_cq *cq)
{
- int size = nent * MTHCA_CQ_ENTRY_SIZE;
struct mthca_mailbox *mailbox;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
u8 status;
- int i;
-
- might_sleep();
cq->ibcq.cqe = nent - 1;
cq->is_kernel = !ctx;
@@ -739,14 +815,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context = mailbox->buf;
if (cq->is_kernel) {
- err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
- &cq->queue, &cq->is_direct,
- &dev->driver_pd, 1, &cq->mr);
+ err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
if (err)
goto err_out_mailbox;
-
- for (i = 0; i < nent; ++i)
- set_cqe_hw(get_cqe(cq, i));
}
spin_lock_init(&cq->lock);
@@ -765,7 +836,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
cq_context->pd = cpu_to_be32(pdn);
- cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
+ cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey);
cq_context->cqn = cpu_to_be32(cq->cqn);
if (mthca_is_memfree(dev)) {
@@ -803,7 +874,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
err_out_free_mr:
if (cq->is_kernel)
- mthca_free_cq_buf(dev, cq);
+ mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
@@ -832,8 +903,6 @@ void mthca_free_cq(struct mthca_dev *dev,
int err;
u8 status;
- might_sleep();
-
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
mthca_warn(dev, "No memory for mailbox to free CQ.\n");
@@ -871,7 +940,7 @@ void mthca_free_cq(struct mthca_dev *dev,
wait_event(cq->wait, !atomic_read(&cq->refcount));
if (cq->is_kernel) {
- mthca_free_cq_buf(dev, cq);
+ mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index e481037288d6..ad52edbefe98 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
@@ -53,8 +53,8 @@
#define DRV_NAME "ib_mthca"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "0.07"
-#define DRV_RELDATE "February 13, 2006"
+#define DRV_VERSION "0.08"
+#define DRV_RELDATE "February 14, 2006"
enum {
MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
@@ -64,7 +64,8 @@ enum {
MTHCA_FLAG_NO_LAM = 1 << 5,
MTHCA_FLAG_FMR = 1 << 6,
MTHCA_FLAG_MEMFREE = 1 << 7,
- MTHCA_FLAG_PCIE = 1 << 8
+ MTHCA_FLAG_PCIE = 1 << 8,
+ MTHCA_FLAG_SINAI_OPT = 1 << 9
};
enum {
@@ -110,9 +111,17 @@ enum {
MTHCA_OPCODE_INVALID = 0xff
};
+enum {
+ MTHCA_CMD_USE_EVENTS = 1 << 0,
+ MTHCA_CMD_POST_DOORBELLS = 1 << 1
+};
+
+enum {
+ MTHCA_CMD_NUM_DBELL_DWORDS = 8
+};
+
struct mthca_cmd {
struct pci_pool *pool;
- int use_events;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
@@ -121,6 +130,9 @@ struct mthca_cmd {
int free_head;
struct mthca_cmd_context *context;
u16 token_mask;
+ u32 flags;
+ void __iomem *dbell_map;
+ u16 dbell_offsets[MTHCA_CMD_NUM_DBELL_DWORDS];
};
struct mthca_limits {
@@ -470,12 +482,16 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type);
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq);
+void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
+int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
+void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask);
+int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
@@ -486,6 +502,8 @@ int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask);
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
@@ -495,8 +513,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr);
-int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
- int index, int *dbd, __be32 *new_wqe);
+void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+ int index, int *dbd, __be32 *new_wqe);
int mthca_alloc_qp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
@@ -522,6 +540,7 @@ int mthca_create_ah(struct mthca_dev *dev,
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header);
+int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr);
int mthca_ah_grh_present(struct mthca_ah *ah);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 2eabb27804cd..cbdc348fb689 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -497,7 +497,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
- npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
+ npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -825,7 +825,7 @@ void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
{
u8 status;
- mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
+ mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
@@ -928,7 +928,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
- for (i = 0; i < MTHCA_EQ_CMD; ++i)
+ for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
else
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 1229c604c6e0..4ace6a392f41 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -109,6 +109,19 @@ static void smp_snoop(struct ib_device *ibdev,
}
}
+static void node_desc_override(struct ib_device *dev,
+ struct ib_mad *mad)
+{
+ if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
+ mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
+ mutex_lock(&to_mdev(dev)->cap_mask_mutex);
+ memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
+ mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
+ }
+}
+
static void forward_trap(struct mthca_dev *dev,
u8 port_num,
struct ib_mad *mad)
@@ -207,8 +220,10 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status)
+ if (!out_mad->mad_hdr.status) {
smp_snoop(ibdev, port_num, in_mad);
+ node_desc_override(ibdev, out_mad);
+ }
/* set return bit in status of directed route responses */
if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 9c849d27b06e..266f347c6707 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -935,13 +935,19 @@ enum {
static struct {
u64 latest_fw;
- int is_memfree;
- int is_pcie;
+ u32 flags;
} mthca_hca_table[] = {
- [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 3, 3), .is_memfree = 0, .is_pcie = 0 },
- [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 0), .is_memfree = 0, .is_pcie = 1 },
- [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), .is_memfree = 1, .is_pcie = 1 },
- [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 1), .is_memfree = 1, .is_pcie = 1 }
+ [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
+ .flags = 0 },
+ [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400),
+ .flags = MTHCA_FLAG_PCIE },
+ [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0),
+ .flags = MTHCA_FLAG_MEMFREE |
+ MTHCA_FLAG_PCIE },
+ [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800),
+ .flags = MTHCA_FLAG_MEMFREE |
+ MTHCA_FLAG_PCIE |
+ MTHCA_FLAG_SINAI_OPT }
};
static int __devinit mthca_init_one(struct pci_dev *pdev,
@@ -1031,12 +1037,9 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
mdev->pdev = pdev;
+ mdev->mthca_flags = mthca_hca_table[id->driver_data].flags;
if (ddr_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
- if (mthca_hca_table[id->driver_data].is_memfree)
- mdev->mthca_flags |= MTHCA_FLAG_MEMFREE;
- if (mthca_hca_table[id->driver_data].is_pcie)
- mdev->mthca_flags |= MTHCA_FLAG_PCIE;
/*
* Now reset the HCA before we touch the PCI capabilities or
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 321f11e707f2..9965bda9afed 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -187,7 +187,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) {
- mthca_dbg(dev, "QP %06x already a member of MGM\n",
+ mthca_dbg(dev, "QP %06x already a member of MGM\n",
ibqp->qp_num);
err = 0;
goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index d709cb162a72..15cc2f6eb475 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -202,7 +202,8 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
if (--table->icm[i]->refcount == 0) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+ &status);
mthca_free_icm(dev, table->icm[i]);
table->icm[i] = NULL;
}
@@ -336,7 +337,8 @@ err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+ &status);
mthca_free_icm(dev, table->icm[i]);
}
@@ -353,7 +355,8 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+ &status);
mthca_free_icm(dev, table->icm[i]);
}
@@ -364,7 +367,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
{
return dev->uar_table.uarc_base +
uar->index * dev->uar_table.uarc_size +
- page * 4096;
+ page * MTHCA_ICM_PAGE_SIZE;
}
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
@@ -401,7 +404,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
if (ret < 0)
goto out;
- db_tab->page[i].mem.length = 4096;
+ db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
@@ -455,7 +458,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
if (!mthca_is_memfree(dev))
return NULL;
- npages = dev->uar_table.uarc_size / 4096;
+ npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
if (!db_tab)
return ERR_PTR(-ENOMEM);
@@ -478,7 +481,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
if (!mthca_is_memfree(dev))
return;
- for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) {
+ for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
@@ -551,20 +554,20 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
+ page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
&page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, 4096);
+ memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
if (!ret && status)
ret = -EINVAL;
if (ret) {
- dma_free_coherent(&dev->pdev->dev, 4096,
+ dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
goto out;
}
@@ -612,7 +615,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
i >= dev->db_tab->max_group1 - 1) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
- dma_free_coherent(&dev->pdev->dev, 4096,
+ dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
page->db_rec = NULL;
@@ -640,7 +643,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
mutex_init(&dev->db_tab->mutex);
- dev->db_tab->npages = dev->uar_table.uarc_size / 4096;
+ dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
dev->db_tab->max_group1 = 0;
dev->db_tab->min_group2 = dev->db_tab->npages - 1;
@@ -681,7 +684,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
- dma_free_coherent(&dev->pdev->dev, 4096,
+ dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
dev->db_tab->page[i].db_rec,
dev->db_tab->page[i].mapping);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 36f1141a08aa..6d42947e1dc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -45,6 +45,12 @@
((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
(sizeof (struct scatterlist)))
+enum {
+ MTHCA_ICM_PAGE_SHIFT = 12,
+ MTHCA_ICM_PAGE_SIZE = 1 << MTHCA_ICM_PAGE_SHIFT,
+ MTHCA_DB_REC_PER_PAGE = MTHCA_ICM_PAGE_SIZE / 8
+};
+
struct mthca_icm_chunk {
struct list_head list;
int npages;
@@ -131,10 +137,6 @@ static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
-enum {
- MTHCA_DB_REC_PER_PAGE = 4096 / 8
-};
-
struct mthca_db_page {
DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
__be64 *db_rec;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index e995e2aa016d..698b62125765 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -76,6 +76,8 @@ struct mthca_mpt_entry {
#define MTHCA_MPT_STATUS_SW 0xF0
#define MTHCA_MPT_STATUS_HW 0x00
+#define SINAI_FMR_KEY_INC 0x1000000
+
/*
* Buddy allocator for MTT segments (currently not very efficient
* since it doesn't keep a free list and just searches linearly
@@ -330,6 +332,14 @@ static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
return tavor_key_to_hw_index(key);
}
+static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
+{
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ return ((key << 20) & 0x800000) | (key & 0x7fffff);
+ else
+ return key;
+}
+
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
{
@@ -340,13 +350,12 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
int err;
u8 status;
- might_sleep();
-
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
+ key = adjust_key(dev, key);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
@@ -467,8 +476,6 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
int err;
u8 status;
- might_sleep();
-
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
(dev->limits.num_mpts - 1),
@@ -495,9 +502,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
int err = -ENOMEM;
int i;
- might_sleep();
-
- if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
+ if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
@@ -510,6 +515,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
+ key = adjust_key(dev, key);
idx = key & (dev->limits.num_mpts - 1);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
@@ -523,7 +529,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
BUG_ON(!mr->mem.arbel.mpt);
} else
mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx;
+ sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt))
@@ -549,7 +555,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
MTHCA_MPT_FLAG_REGION |
access);
- mpt_entry->page_size = cpu_to_be32(mr->attr.page_size - 12);
+ mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
memset(&mpt_entry->start, 0,
@@ -617,7 +623,7 @@ static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
if (list_len > fmr->attr.max_pages)
return -EINVAL;
- page_mask = (1 << fmr->attr.page_size) - 1;
+ page_mask = (1 << fmr->attr.page_shift) - 1;
/* We are getting page lists, so va must be page aligned. */
if (iova & page_mask)
@@ -665,7 +671,7 @@ int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
}
mpt_entry.lkey = cpu_to_be32(key);
- mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
+ mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
mpt_entry.start = cpu_to_be64(iova);
__raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
@@ -693,7 +699,10 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
++fmr->maps;
key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ key += SINAI_FMR_KEY_INC;
+ else
+ key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
@@ -706,7 +715,7 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
fmr->mem.arbel.mpt->key = cpu_to_be32(key);
fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
- fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
+ fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
wmb();
@@ -766,6 +775,9 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
else
dev->mthca_flags |= MTHCA_FLAG_FMR;
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
+ mthca_dbg(dev, "Memory key throughput optimization activated.\n");
+
err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
fls(dev->limits.num_mtt_segs - 1));
@@ -785,7 +797,7 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
}
dev->mr_table.tavor_fmr.mpt_base =
- ioremap(dev->mr_table.mpt_base,
+ ioremap(dev->mr_table.mpt_base,
(1 << i) * sizeof (struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) {
@@ -813,7 +825,7 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_reserve_fmr;
dev->mr_table.fmr_mtt_buddy =
- &dev->mr_table.tavor_fmr.mtt_buddy;
+ &dev->mr_table.tavor_fmr.mtt_buddy;
} else
dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index 3dbf06a6e6f4..105fc5faaddf 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -43,8 +43,6 @@ int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd)
{
int err = 0;
- might_sleep();
-
pd->privileged = privileged;
atomic_set(&pd->sqp_count, 0);
@@ -66,7 +64,6 @@ int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd)
void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
{
- might_sleep();
if (pd->privileged)
mthca_free_mr(dev, &pd->ntmr);
mthca_free(&dev->pd_table.alloc, pd->pd_num);
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index 08a909371b0a..58d44aa3c302 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -152,7 +152,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
}
if (total_size > mem_avail) {
mthca_err(dev, "Profile requires 0x%llx bytes; "
- "won't in 0x%llx bytes of context memory.\n",
+ "won't fit in 0x%llx bytes of context memory.\n",
(unsigned long long) total_size,
(unsigned long long) mem_avail);
kfree(profile);
@@ -262,6 +262,14 @@ u64 mthca_make_profile(struct mthca_dev *dev,
*/
dev->limits.num_pds = MTHCA_NUM_PDS;
+ if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT &&
+ init_hca->log_mpt_sz > 23) {
+ mthca_warn(dev, "MPT table too large (requested size 2^%d >= 2^24)\n",
+ init_hca->log_mpt_sz);
+ mthca_warn(dev, "Disabling memory key throughput optimization.\n");
+ dev->mthca_flags &= ~MTHCA_FLAG_SINAI_OPT;
+ }
+
/*
* For Tavor, FMRs use ioremapped PCI memory. For 32 bit
* systems it may use too much vmalloc space to map all MTT
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index e88e39aef85a..2c250bc11c33 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1,7 +1,7 @@
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
@@ -108,12 +108,12 @@ static int mthca_query_device(struct ib_device *ibdev,
props->max_srq_wr = mdev->limits.max_srq_wqes;
props->max_srq_sge = mdev->limits.max_sg;
props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
- props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
+ props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = mdev->limits.pkey_table_len;
props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
err = 0;
@@ -176,6 +176,23 @@ static int mthca_query_port(struct ib_device *ibdev,
return err;
}
+static int mthca_modify_device(struct ib_device *ibdev,
+ int mask,
+ struct ib_device_modify *props)
+{
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
+ return -ERESTARTSYS;
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+ mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
+ }
+
+ return 0;
+}
+
static int mthca_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
@@ -669,9 +686,9 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
}
if (context) {
- cq->mr.ibmr.lkey = ucmd.lkey;
- cq->set_ci_db_index = ucmd.set_db_index;
- cq->arm_db_index = ucmd.arm_db_index;
+ cq->buf.mr.ibmr.lkey = ucmd.lkey;
+ cq->set_ci_db_index = ucmd.set_db_index;
+ cq->arm_db_index = ucmd.arm_db_index;
}
for (nent = 1; nent <= entries; nent <<= 1)
@@ -689,6 +706,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
goto err_free;
}
+ cq->resize_buf = NULL;
+
return &cq->ibcq;
err_free:
@@ -707,6 +726,121 @@ err_unmap_set:
return ERR_PTR(err);
}
+static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
+ int entries)
+{
+ int ret;
+
+ spin_lock_irq(&cq->lock);
+ if (cq->resize_buf) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ if (!cq->resize_buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ cq->resize_buf->state = CQ_RESIZE_ALLOC;
+
+ ret = 0;
+
+unlock:
+ spin_unlock_irq(&cq->lock);
+
+ if (ret)
+ return ret;
+
+ ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
+ if (ret) {
+ spin_lock_irq(&cq->lock);
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ spin_unlock_irq(&cq->lock);
+ return ret;
+ }
+
+ cq->resize_buf->cqe = entries - 1;
+
+ spin_lock_irq(&cq->lock);
+ cq->resize_buf->state = CQ_RESIZE_READY;
+ spin_unlock_irq(&cq->lock);
+
+ return 0;
+}
+
+static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ struct mthca_dev *dev = to_mdev(ibcq->device);
+ struct mthca_cq *cq = to_mcq(ibcq);
+ struct mthca_resize_cq ucmd;
+ u32 lkey;
+ u8 status;
+ int ret;
+
+ if (entries < 1 || entries > dev->limits.max_cqes)
+ return -EINVAL;
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries == ibcq->cqe + 1)
+ return 0;
+
+ if (cq->is_kernel) {
+ ret = mthca_alloc_resize_buf(dev, cq, entries);
+ if (ret)
+ return ret;
+ lkey = cq->resize_buf->buf.mr.ibmr.lkey;
+ } else {
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+ return -EFAULT;
+ lkey = ucmd.lkey;
+ }
+
+ ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, long_log2(entries), &status);
+ if (status)
+ ret = -EINVAL;
+
+ if (ret) {
+ if (cq->resize_buf) {
+ mthca_free_cq_buf(dev, &cq->resize_buf->buf,
+ cq->resize_buf->cqe);
+ kfree(cq->resize_buf);
+ spin_lock_irq(&cq->lock);
+ cq->resize_buf = NULL;
+ spin_unlock_irq(&cq->lock);
+ }
+ return ret;
+ }
+
+ if (cq->is_kernel) {
+ struct mthca_cq_buf tbuf;
+ int tcqe;
+
+ spin_lock_irq(&cq->lock);
+ if (cq->resize_buf->state == CQ_RESIZE_READY) {
+ mthca_cq_resize_copy_cqes(cq);
+ tbuf = cq->buf;
+ tcqe = cq->ibcq.cqe;
+ cq->buf = cq->resize_buf->buf;
+ cq->ibcq.cqe = cq->resize_buf->cqe;
+ } else {
+ tbuf = cq->resize_buf->buf;
+ tcqe = cq->resize_buf->cqe;
+ }
+
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ spin_unlock_irq(&cq->lock);
+
+ mthca_free_cq_buf(dev, &tbuf, tcqe);
+ } else
+ ibcq->cqe = entries - 1;
+
+ return 0;
+}
+
static int mthca_destroy_cq(struct ib_cq *cq)
{
if (cq->uobject) {
@@ -1070,6 +1204,20 @@ static int mthca_init_node_data(struct mthca_dev *dev)
goto out;
init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mthca_MAD_IFC(dev, 1, 1,
+ 1, NULL, NULL, in_mad, out_mad,
+ &status);
+ if (err)
+ goto out;
+ if (status) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
+
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(dev, 1, 1,
@@ -1113,14 +1261,17 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
dev->ib_dev.node_type = IB_NODE_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
@@ -1128,6 +1279,7 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.class_dev.dev = &dev->pdev->dev;
dev->ib_dev.query_device = mthca_query_device;
dev->ib_dev.query_port = mthca_query_port;
+ dev->ib_dev.modify_device = mthca_modify_device;
dev->ib_dev.modify_port = mthca_modify_port;
dev->ib_dev.query_pkey = mthca_query_pkey;
dev->ib_dev.query_gid = mthca_query_gid;
@@ -1137,11 +1289,13 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.alloc_pd = mthca_alloc_pd;
dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
dev->ib_dev.create_ah = mthca_ah_create;
+ dev->ib_dev.query_ah = mthca_ah_query;
dev->ib_dev.destroy_ah = mthca_ah_destroy;
if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
dev->ib_dev.create_srq = mthca_create_srq;
- dev->ib_dev.modify_srq = mthca_modify_srq;
+ dev->ib_dev.modify_srq = mthca_modify_srq;
+ dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
if (mthca_is_memfree(dev))
@@ -1152,8 +1306,10 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp;
+ dev->ib_dev.query_qp = mthca_query_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp;
dev->ib_dev.create_cq = mthca_create_cq;
+ dev->ib_dev.resize_cq = mthca_resize_cq;
dev->ib_dev.destroy_cq = mthca_destroy_cq;
dev->ib_dev.poll_cq = mthca_poll_cq;
dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1e73947b4702..2e7f52136965 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -164,9 +164,11 @@ struct mthca_ah {
* - wait_event until ref count is zero
*
* It is the consumer's responsibilty to make sure that no QP
- * operations (WQE posting or state modification) are pending when the
+ * operations (WQE posting or state modification) are pending when a
* QP is destroyed. Also, the consumer must make sure that calls to
- * qp_modify are serialized.
+ * qp_modify are serialized. Similarly, the consumer is responsible
+ * for ensuring that no CQ resize operations are pending when a CQ
+ * is destroyed.
*
* Possible optimizations (wait for profile data to see if/where we
* have locks bouncing between CPUs):
@@ -176,25 +178,40 @@ struct mthca_ah {
* send queue and one for the receive queue)
*/
+struct mthca_cq_buf {
+ union mthca_buf queue;
+ struct mthca_mr mr;
+ int is_direct;
+};
+
+struct mthca_cq_resize {
+ struct mthca_cq_buf buf;
+ int cqe;
+ enum {
+ CQ_RESIZE_ALLOC,
+ CQ_RESIZE_READY,
+ CQ_RESIZE_SWAPPED
+ } state;
+};
+
struct mthca_cq {
- struct ib_cq ibcq;
- spinlock_t lock;
- atomic_t refcount;
- int cqn;
- u32 cons_index;
- int is_direct;
- int is_kernel;
+ struct ib_cq ibcq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int cqn;
+ u32 cons_index;
+ struct mthca_cq_buf buf;
+ struct mthca_cq_resize *resize_buf;
+ int is_kernel;
/* Next fields are Arbel only */
- int set_ci_db_index;
- __be32 *set_ci_db;
- int arm_db_index;
- __be32 *arm_db;
- int arm_sn;
+ int set_ci_db_index;
+ __be32 *set_ci_db;
+ int arm_db_index;
+ __be32 *arm_db;
+ int arm_sn;
- union mthca_buf queue;
- struct mthca_mr mr;
- wait_queue_head_t wait;
+ wait_queue_head_t wait;
};
struct mthca_srq {
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index fba608ed7df2..1bc2678c2fae 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -2,7 +2,7 @@
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -286,207 +286,6 @@ static int to_mthca_st(int transport)
}
}
-static const struct {
- int trans;
- u32 req_param[NUM_TRANS];
- u32 opt_param[NUM_TRANS];
-} state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_INIT] = {
- .trans = MTHCA_TRANS_RST2INIT,
- .req_param = {
- [UD] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_QKEY),
- [UC] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_ACCESS_FLAGS),
- [RC] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_ACCESS_FLAGS),
- [MLX] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- },
- /* bug-for-bug compatibility with VAPI: */
- .opt_param = {
- [MLX] = IB_QP_PORT
- }
- },
- },
- [IB_QPS_INIT] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_INIT] = {
- .trans = MTHCA_TRANS_INIT2INIT,
- .opt_param = {
- [UD] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_QKEY),
- [UC] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_ACCESS_FLAGS),
- [RC] = (IB_QP_PKEY_INDEX |
- IB_QP_PORT |
- IB_QP_ACCESS_FLAGS),
- [MLX] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- }
- },
- [IB_QPS_RTR] = {
- .trans = MTHCA_TRANS_INIT2RTR,
- .req_param = {
- [UC] = (IB_QP_AV |
- IB_QP_PATH_MTU |
- IB_QP_DEST_QPN |
- IB_QP_RQ_PSN),
- [RC] = (IB_QP_AV |
- IB_QP_PATH_MTU |
- IB_QP_DEST_QPN |
- IB_QP_RQ_PSN |
- IB_QP_MAX_DEST_RD_ATOMIC |
- IB_QP_MIN_RNR_TIMER),
- },
- .opt_param = {
- [UD] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- [UC] = (IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX),
- [RC] = (IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX),
- [MLX] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- }
- }
- },
- [IB_QPS_RTR] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_RTS] = {
- .trans = MTHCA_TRANS_RTR2RTS,
- .req_param = {
- [UD] = IB_QP_SQ_PSN,
- [UC] = IB_QP_SQ_PSN,
- [RC] = (IB_QP_TIMEOUT |
- IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
- IB_QP_SQ_PSN |
- IB_QP_MAX_QP_RD_ATOMIC),
- [MLX] = IB_QP_SQ_PSN,
- },
- .opt_param = {
- [UD] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- [UC] = (IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PATH_MIG_STATE),
- [RC] = (IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_MIN_RNR_TIMER |
- IB_QP_PATH_MIG_STATE),
- [MLX] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- }
- }
- },
- [IB_QPS_RTS] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_RTS] = {
- .trans = MTHCA_TRANS_RTS2RTS,
- .opt_param = {
- [UD] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- [UC] = (IB_QP_ACCESS_FLAGS |
- IB_QP_ALT_PATH |
- IB_QP_PATH_MIG_STATE),
- [RC] = (IB_QP_ACCESS_FLAGS |
- IB_QP_ALT_PATH |
- IB_QP_PATH_MIG_STATE |
- IB_QP_MIN_RNR_TIMER),
- [MLX] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- }
- },
- [IB_QPS_SQD] = {
- .trans = MTHCA_TRANS_RTS2SQD,
- },
- },
- [IB_QPS_SQD] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_RTS] = {
- .trans = MTHCA_TRANS_SQD2RTS,
- .opt_param = {
- [UD] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- [UC] = (IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PATH_MIG_STATE),
- [RC] = (IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_MIN_RNR_TIMER |
- IB_QP_PATH_MIG_STATE),
- [MLX] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- }
- },
- [IB_QPS_SQD] = {
- .trans = MTHCA_TRANS_SQD2SQD,
- .opt_param = {
- [UD] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- [UC] = (IB_QP_AV |
- IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
- IB_QP_PATH_MIG_STATE),
- [RC] = (IB_QP_AV |
- IB_QP_TIMEOUT |
- IB_QP_RETRY_CNT |
- IB_QP_RNR_RETRY |
- IB_QP_MAX_QP_RD_ATOMIC |
- IB_QP_MAX_DEST_RD_ATOMIC |
- IB_QP_CUR_STATE |
- IB_QP_ALT_PATH |
- IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
- IB_QP_MIN_RNR_TIMER |
- IB_QP_PATH_MIG_STATE),
- [MLX] = (IB_QP_PKEY_INDEX |
- IB_QP_QKEY),
- }
- }
- },
- [IB_QPS_SQE] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
- [IB_QPS_RTS] = {
- .trans = MTHCA_TRANS_SQERR2RTS,
- .opt_param = {
- [UD] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- [UC] = (IB_QP_CUR_STATE |
- IB_QP_ACCESS_FLAGS),
- [MLX] = (IB_QP_CUR_STATE |
- IB_QP_QKEY),
- }
- }
- },
- [IB_QPS_ERR] = {
- [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
- [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
- }
-};
-
static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
int attr_mask)
{
@@ -549,23 +348,167 @@ static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
return cpu_to_be32(hw_access_flags);
}
-static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
+static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
+{
+ switch (mthca_state) {
+ case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
+ case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
+ case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
+ case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
+ case MTHCA_QP_STATE_DRAINING:
+ case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
+ case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
+ case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
+{
+ switch (mthca_mig_state) {
+ case 0: return IB_MIG_ARMED;
+ case 1: return IB_MIG_REARM;
+ case 3: return IB_MIG_MIGRATED;
+ default: return -1;
+ }
+}
+
+static int to_ib_qp_access_flags(int mthca_flags)
+{
+ int ib_flags = 0;
+
+ if (mthca_flags & MTHCA_QP_BIT_RRE)
+ ib_flags |= IB_ACCESS_REMOTE_READ;
+ if (mthca_flags & MTHCA_QP_BIT_RWE)
+ ib_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (mthca_flags & MTHCA_QP_BIT_RAE)
+ ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
+ struct mthca_qp_path *path)
+{
+ memset(ib_ah_attr, 0, sizeof *path);
+ ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+ ib_ah_attr->dlid = be16_to_cpu(path->rlid);
+ ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
+ ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
+ ib_ah_attr->static_rate = path->static_rate & 0x7;
+ ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+ if (ib_ah_attr->ah_flags) {
+ ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
+ ib_ah_attr->grh.hop_limit = path->hop_limit;
+ ib_ah_attr->grh.traffic_class =
+ (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
+ ib_ah_attr->grh.flow_label =
+ be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
+ memcpy(ib_ah_attr->grh.dgid.raw,
+ path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ }
+}
+
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mthca_dev *dev = to_mdev(ibqp->device);
+ struct mthca_qp *qp = to_mqp(ibqp);
+ int err;
+ struct mthca_mailbox *mailbox;
+ struct mthca_qp_param *qp_param;
+ struct mthca_qp_context *context;
+ int mthca_state;
+ u8 status;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
+ if (err)
+ goto out;
+ if (status) {
+ mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
+ err = -EINVAL;
+ goto out;
+ }
+
+ qp_param = mailbox->buf;
+ context = &qp_param->context;
+ mthca_state = be32_to_cpu(context->flags) >> 28;
+
+ qp_attr->qp_state = to_ib_qp_state(mthca_state);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->path_mtu = context->mtu_msgmax >> 5;
+ qp_attr->path_mig_state =
+ to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
+ qp_attr->qkey = be32_to_cpu(context->qkey);
+ qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
+ qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
+ qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
+ qp_attr->qp_access_flags =
+ to_ib_qp_access_flags(be32_to_cpu(context->params2));
+ qp_attr->cap.max_send_wr = qp->sq.max;
+ qp_attr->cap.max_recv_wr = qp->rq.max;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+
+ qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+ qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+
+ /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
+
+ qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+
+ qp_attr->max_dest_rd_atomic =
+ 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
+ qp_attr->min_rnr_timer =
+ (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
+ qp_attr->port_num = qp_attr->ah_attr.port_num;
+ qp_attr->timeout = context->pri_path.ackto >> 3;
+ qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
+ qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ qp_attr->alt_timeout = context->alt_path.ackto >> 3;
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ mthca_free_mailbox(dev, mailbox);
+ return err;
+}
+
+static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
+ struct mthca_qp_path *path)
{
path->g_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
path->static_rate = !!ah->static_rate;
if (ah->ah_flags & IB_AH_GRH) {
+ if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
+ mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
+ ah->grh.sgid_index, dev->limits.gid_table_len-1);
+ return -1;
+ }
+
path->g_mylmc |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
- path->sl_tclass_flowlabel =
+ path->sl_tclass_flowlabel =
cpu_to_be32((ah->sl << 28) |
- (ah->grh.traffic_class << 20) |
+ (ah->grh.traffic_class << 20) |
(ah->grh.flow_label));
memcpy(path->rgid, ah->grh.dgid.raw, 16);
} else
path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+
+ return 0;
}
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
@@ -576,18 +519,12 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
- u32 req_param, opt_param;
+ u32 sqd_event = 0;
u8 status;
int err;
if (attr_mask & IB_QP_CUR_STATE) {
- if (attr->cur_qp_state != IB_QPS_RTR &&
- attr->cur_qp_state != IB_QPS_RTS &&
- attr->cur_qp_state != IB_QPS_SQD &&
- attr->cur_qp_state != IB_QPS_SQE)
- return -EINVAL;
- else
- cur_state = attr->cur_qp_state;
+ cur_state = attr->cur_qp_state;
} else {
spin_lock_irq(&qp->sq.lock);
spin_lock(&qp->rq.lock);
@@ -596,44 +533,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
spin_unlock_irq(&qp->sq.lock);
}
- if (attr_mask & IB_QP_STATE) {
- if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
- return -EINVAL;
- new_state = attr->qp_state;
- } else
- new_state = cur_state;
-
- if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
- mthca_dbg(dev, "Illegal QP transition "
- "%d->%d\n", cur_state, new_state);
- return -EINVAL;
- }
-
- req_param = state_table[cur_state][new_state].req_param[qp->transport];
- opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
-
- if ((req_param & attr_mask) != req_param) {
- mthca_dbg(dev, "QP transition "
- "%d->%d missing req attr 0x%08x\n",
- cur_state, new_state,
- req_param & ~attr_mask);
- return -EINVAL;
- }
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
- mthca_dbg(dev, "QP transition (transport %d) "
- "%d->%d has extra attr 0x%08x\n",
- qp->transport,
- cur_state, new_state,
- attr_mask & ~(req_param | opt_param |
- IB_QP_STATE));
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ mthca_dbg(dev, "Bad QP transition (transport %d) "
+ "%d->%d with attr 0x%08x\n",
+ qp->transport, cur_state, new_state,
+ attr_mask);
return -EINVAL;
}
- if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
attr->pkey_index >= dev->limits.pkey_table_len) {
- mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
- attr->pkey_index,dev->limits.pkey_table_len-1);
+ mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
+ attr->pkey_index, dev->limits.pkey_table_len-1);
return -EINVAL;
}
@@ -688,8 +601,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (qp->transport == MLX || qp->transport == UD)
qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
- else if (attr_mask & IB_QP_PATH_MTU)
+ else if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
+ mthca_dbg(dev, "path MTU (%u) is invalid\n",
+ attr->path_mtu);
+ return -EINVAL;
+ }
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
+ }
if (mthca_is_memfree(dev)) {
if (qp->rq.max)
@@ -733,12 +652,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (attr_mask & IB_QP_RNR_RETRY) {
qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
attr->rnr_retry << 5;
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
}
if (attr_mask & IB_QP_AV) {
- mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
+ if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path))
+ return -EINVAL;
+
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
}
@@ -748,14 +669,22 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
+ mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
+ attr->alt_pkey_index, dev->limits.pkey_table_len-1);
+ return -EINVAL;
+ }
+
if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
- mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
+ mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
attr->alt_port_num);
return -EINVAL;
}
- mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
- qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
+ if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path))
+ return -EINVAL;
+
+ qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
attr->alt_port_num << 24);
qp_context->alt_path.ackto = attr->alt_timeout << 3;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
@@ -841,23 +770,27 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->srqn = cpu_to_be32(1 << 24 |
to_msrq(ibqp->srq)->srqn);
- err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
- qp->qpn, 0, mailbox, 0, &status);
+ if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
+ attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
+ attr->en_sqd_async_notify)
+ sqd_event = 1 << 31;
+
+ err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
+ mailbox, sqd_event, &status);
+ if (err)
+ goto out;
if (status) {
- mthca_warn(dev, "modify QP %d returned status %02x.\n",
- state_table[cur_state][new_state].trans, status);
+ mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
+ cur_state, new_state, status);
err = -EINVAL;
+ goto out;
}
- if (!err) {
- qp->state = new_state;
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->atomic_rd_en = attr->qp_access_flags;
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->resp_depth = attr->max_dest_rd_atomic;
- }
-
- mthca_free_mailbox(dev, mailbox);
+ qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
@@ -882,7 +815,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
- if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
+ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
@@ -901,6 +834,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
}
+out:
+ mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -1078,10 +1013,10 @@ static int mthca_map_memfree(struct mthca_dev *dev,
if (ret)
goto err_qpc;
- ret = mthca_table_get(dev, dev->qp_table.rdb_table,
- qp->qpn << dev->qp_table.rdb_shift);
- if (ret)
- goto err_eqpc;
+ ret = mthca_table_get(dev, dev->qp_table.rdb_table,
+ qp->qpn << dev->qp_table.rdb_shift);
+ if (ret)
+ goto err_eqpc;
}
@@ -1262,10 +1197,6 @@ int mthca_alloc_qp(struct mthca_dev *dev,
{
int err;
- err = mthca_set_qp_size(dev, cap, pd, qp);
- if (err)
- return err;
-
switch (type) {
case IB_QPT_RC: qp->transport = RC; break;
case IB_QPT_UC: qp->transport = UC; break;
@@ -1273,6 +1204,10 @@ int mthca_alloc_qp(struct mthca_dev *dev,
default: return -EINVAL;
}
+ err = mthca_set_qp_size(dev, cap, pd, qp);
+ if (err)
+ return err;
+
qp->qpn = mthca_alloc(&dev->qp_table.alloc);
if (qp->qpn == -1)
return -ENOMEM;
@@ -1305,6 +1240,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
+ sqp->qp.transport = MLX;
err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
if (err)
return err;
@@ -1393,7 +1329,8 @@ void mthca_free_qp(struct mthca_dev *dev,
wait_event(qp->wait, !atomic_read(&qp->refcount));
if (qp->state != IB_QPS_RESET)
- mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
+ mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
+ NULL, 0, &status);
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
@@ -1699,7 +1636,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
+ cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+ ((wr->send_flags & IB_SEND_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
@@ -2061,7 +2000,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32(MTHCA_NEXT_DBD | size);
+ cpu_to_be32(MTHCA_NEXT_DBD | size |
+ ((wr->send_flags & IB_SEND_FENCE) ?
+ MTHCA_NEXT_FENCE : 0));
if (!size0) {
size0 = size;
@@ -2115,7 +2056,7 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int i;
void *wqe;
- spin_lock_irqsave(&qp->rq.lock, flags);
+ spin_lock_irqsave(&qp->rq.lock, flags);
/* XXX check that state is OK to post receive */
@@ -2182,8 +2123,8 @@ out:
return err;
}
-int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
- int index, int *dbd, __be32 *new_wqe)
+void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+ int index, int *dbd, __be32 *new_wqe)
{
struct mthca_next_seg *next;
@@ -2193,7 +2134,7 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
*/
if (qp->ibqp.srq) {
*new_wqe = 0;
- return 0;
+ return;
}
if (is_send)
@@ -2207,8 +2148,6 @@ int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
(next->ee_nds & cpu_to_be32(0x3f));
else
*new_wqe = 0;
-
- return 0;
}
int __devinit mthca_init_qp_table(struct mthca_dev *dev)
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index e7e153d9c4c6..0cfd15802217 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -49,7 +49,8 @@ struct mthca_tavor_srq_context {
__be32 state_pd;
__be32 lkey;
__be32 uar;
- __be32 wqe_cnt;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
u32 reserved[2];
};
@@ -204,6 +205,10 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
ds = max(64UL,
roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg)));
+
+ if (ds > dev->limits.max_desc_sz)
+ return -EINVAL;
+
srq->wqe_shift = long_log2(ds);
srq->srqn = mthca_alloc(&dev->srq_table.alloc);
@@ -271,6 +276,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
srq->first_free = 0;
srq->last_free = srq->max - 1;
+ attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
+ attr->max_sge = srq->max_gs;
+
return 0;
err_out_free_srq:
@@ -339,7 +347,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask)
-{
+{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
int ret;
@@ -350,6 +358,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit > srq->max)
+ return -EINVAL;
ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
if (ret)
return ret;
@@ -360,6 +370,41 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return 0;
}
+int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ struct mthca_mailbox *mailbox;
+ struct mthca_arbel_srq_context *arbel_ctx;
+ struct mthca_tavor_srq_context *tavor_ctx;
+ u8 status;
+ int err;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+ if (err)
+ goto out;
+
+ if (mthca_is_memfree(dev)) {
+ arbel_ctx = mailbox->buf;
+ srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
+ } else {
+ tavor_ctx = mailbox->buf;
+ srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
+ }
+
+ srq_attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
+ srq_attr->max_sge = srq->max_gs;
+
+out:
+ mthca_free_mailbox(dev, mailbox);
+
+ return err;
+}
+
void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type)
{
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index bb015c6494c4..02cc0a766f3a 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -75,6 +75,11 @@ struct mthca_create_cq_resp {
__u32 reserved;
};
+struct mthca_resize_cq {
+ __u32 lkey;
+ __u32 reserved;
+};
+
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;