aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 16:49:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 16:49:49 -0700
commite0456717e483bb8a9431b80a5bdc99a928b9b003 (patch)
tree5eb5add2bafd1f20326d70f5cb3b711d00a40b10 /drivers/infiniband/hw/mlx4
parentMerge branch 'sched-hrtimers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentbridge: vlan: flush the dynamically learned entries on port vlan delete (diff)
downloadlinux-dev-e0456717e483bb8a9431b80a5bdc99a928b9b003.tar.xz
linux-dev-e0456717e483bb8a9431b80a5bdc99a928b9b003.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add TX fast path in mac80211, from Johannes Berg. 2) Add TSO/GRO support to ibmveth, from Thomas Falcon 3) Move away from cached routes in ipv6, just like ipv4, from Martin KaFai Lau. 4) Lots of new rhashtable tests, from Thomas Graf. 5) Run ingress qdisc lockless, from Alexei Starovoitov. 6) Allow servers to fetch TCP packet headers for SYN packets of new connections, for fingerprinting. From Eric Dumazet. 7) Add mode parameter to pktgen, for testing receive. From Alexei Starovoitov. 8) Cache access optimizations via simplifications of build_skb(), from Alexander Duyck. 9) Move page frag allocator under mm/, also from Alexander. 10) Add xmit_more support to hv_netvsc, from KY Srinivasan. 11) Add a counter guard in case we try to perform endless reclassify loops in the packet scheduler. 12) Extern flow dissector to be programmable and use it in new "Flower" classifier. From Jiri Pirko. 13) AF_PACKET fanout rollover fixes, performance improvements, and new statistics. From Willem de Bruijn. 14) Add netdev driver for GENEVE tunnels, from John W Linville. 15) Add ingress netfilter hooks and filtering, from Pablo Neira Ayuso. 16) Fix handling of epoll edge triggers in TCP, from Eric Dumazet. 17) Add an ECN retry fallback for the initial TCP handshake, from Daniel Borkmann. 18) Add tail call support to BPF, from Alexei Starovoitov. 19) Add several pktgen helper scripts, from Jesper Dangaard Brouer. 20) Add zerocopy support to AF_UNIX, from Hannes Frederic Sowa. 21) Favor even port numbers for allocation to connect() requests, and odd port numbers for bind(0), in an effort to help avoid ip_local_port_range exhaustion. From Eric Dumazet. 22) Add Cavium ThunderX driver, from Sunil Goutham. 23) Allow bpf programs to access skb_iif and dev->ifindex SKB metadata, from Alexei Starovoitov. 24) Add support for T6 chips in cxgb4vf driver, from Hariprasad Shenai. 25) Double TCP Small Queues default to 256K to accomodate situations like the XEN driver and wireless aggregation. From Wei Liu. 26) Add more entropy inputs to flow dissector, from Tom Herbert. 27) Add CDG congestion control algorithm to TCP, from Kenneth Klette Jonassen. 28) Convert ipset over to RCU locking, from Jozsef Kadlecsik. 29) Track and act upon link status of ipv4 route nexthops, from Andy Gospodarek. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1670 commits) bridge: vlan: flush the dynamically learned entries on port vlan delete bridge: multicast: add a comment to br_port_state_selection about blocking state net: inet_diag: export IPV6_V6ONLY sockopt stmmac: troubleshoot unexpected bits in des0 & des1 net: ipv4 sysctl option to ignore routes when nexthop link is down net: track link-status of ipv4 nexthops net: switchdev: ignore unsupported bridge flags net: Cavium: Fix MAC address setting in shutdown state drivers: net: xgene: fix for ACPI support without ACPI ip: report the original address of ICMP messages net/mlx5e: Prefetch skb data on RX net/mlx5e: Pop cq outside mlx5e_get_cqe net/mlx5e: Remove mlx5e_cq.sqrq back-pointer net/mlx5e: Remove extra spaces net/mlx5e: Avoid TX CQE generation if more xmit packets expected net/mlx5e: Avoid redundant dev_kfree_skb() upon NOP completion net/mlx5e: Remove re-assignment of wq type in mlx5e_enable_rq() net/mlx5e: Use skb_shinfo(skb)->gso_segs rather than counting them net/mlx5e: Static mapping of netdev priv resources to/from netdev TX queues net/mlx4_en: Use HW counters for rx/tx bytes/packets in PF device ...
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c7
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c49
-rw-r--r--drivers/infiniband/hw/mlx4/main.c118
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
5 files changed, 90 insertions, 99 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0f00204d2ece..21cb41a60fe8 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -189,7 +189,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
{
int i;
u64 guid_indexes;
- int slave_id;
+ int slave_id, slave_port;
enum slave_port_state new_state;
enum slave_port_state prev_state;
__be64 tmp_cur_ag, form_cache_ag;
@@ -217,6 +217,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
if (slave_id >= dev->dev->persist->num_vfs + 1)
return;
+
+ slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
+ if (slave_port < 0) /* this port isn't available for the VF */
+ continue;
+
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
(NUM_ALIAS_GUID_IN_REC * block_num) + i);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 3e2dee46caa2..85a50df2f203 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -64,14 +64,6 @@ enum {
#define GUID_TBL_BLK_NUM_ENTRIES 8
#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
-/* Counters should be saturate once they reach their maximum value */
-#define ASSIGN_32BIT_COUNTER(counter, value) do {\
- if ((value) > U32_MAX) \
- counter = cpu_to_be32(U32_MAX); \
- else \
- counter = cpu_to_be32(value); \
-} while (0)
-
struct mlx4_mad_rcv_buf {
struct ib_grh grh;
u8 payload[256];
@@ -830,31 +822,25 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_counter counter_stats;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int err;
- u32 inmod = dev->counters[port_num - 1] & 0xffff;
- u8 mode;
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
- mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
- if (IS_ERR(mailbox))
- return IB_MAD_RESULT_FAILURE;
-
- err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
- MLX4_CMD_WRAPPED);
+ memset(&counter_stats, 0, sizeof(counter_stats));
+ err = mlx4_get_counter_stats(dev->dev,
+ dev->counters[port_num - 1].index,
+ &counter_stats, 0);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
memset(out_mad->data, 0, sizeof out_mad->data);
- mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
- switch (mode & 0xf) {
+ switch (counter_stats.counter_mode & 0xf) {
case 0:
- edit_counter(mailbox->buf,
- (void *)(out_mad->data + 40));
+ edit_counter(&counter_stats,
+ (void *)(out_mad->data + 40));
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@@ -862,8 +848,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
}
- mlx4_free_cmd_mailbox(dev->dev, mailbox);
-
return err;
}
@@ -873,6 +857,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
@@ -881,8 +866,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ if (!mlx4_is_slave(dev->dev))
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
case IB_LINK_LAYER_ETHERNET:
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
@@ -1375,14 +1361,17 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
* stadard address handle by decoding the tunnelled mlx4_ah fields */
memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
ah.ibah.device = ctx->ib_dev;
+
+ port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+ port = mlx4_slave_convert_port(dev->dev, slave, port);
+ if (port < 0)
+ return;
+ ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
if (ah_attr.ah_flags & IB_AH_GRH)
fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
- port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
- if (port < 0)
- return;
- ah_attr.port_num = port;
memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
/* if slave have default vlan use it */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 166da787780c..067a691ecbed 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1146,7 +1146,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ MLX4_CMD_WRAPPED);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
@@ -1163,7 +1163,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ MLX4_CMD_WRAPPED);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
@@ -2098,77 +2098,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
- char name[80];
- int eq_per_port = 0;
- int added_eqs = 0;
- int total_eqs = 0;
- int i, j, eq;
-
- /* Legacy mode or comp_pool is not large enough */
- if (dev->caps.comp_pool == 0 ||
- dev->caps.num_ports > dev->caps.comp_pool)
- return;
-
- eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
+ int i, j, eq = 0, total_eqs = 0;
- /* Init eq table */
- added_eqs = 0;
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- added_eqs += eq_per_port;
-
- total_eqs = dev->caps.num_comp_vectors + added_eqs;
-
- ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+ ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+ sizeof(ibdev->eq_table[0]), GFP_KERNEL);
if (!ibdev->eq_table)
return;
- ibdev->eq_added = added_eqs;
-
- eq = 0;
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
- for (j = 0; j < eq_per_port; j++) {
- snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
- i, j, dev->persist->pdev->bus->name);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(dev, name, NULL,
- &ibdev->eq_table[eq])) {
- /* Use legacy (same as mlx4_en driver) */
- pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
- ibdev->eq_table[eq] =
- (eq % dev->caps.num_comp_vectors);
- }
- eq++;
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+ j++, total_eqs++) {
+ if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
+ continue;
+ ibdev->eq_table[eq] = total_eqs;
+ if (!mlx4_assign_eq(dev, i,
+ &ibdev->eq_table[eq]))
+ eq++;
+ else
+ ibdev->eq_table[eq] = -1;
}
}
- /* Fill the reset of the vector with legacy EQ */
- for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
- ibdev->eq_table[eq++] = i;
+ for (i = eq; i < dev->caps.num_comp_vectors;
+ ibdev->eq_table[i++] = -1)
+ ;
/* Advertise the new number of EQs to clients */
- ibdev->ib_dev.num_comp_vectors = total_eqs;
+ ibdev->ib_dev.num_comp_vectors = eq;
}
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
int i;
+ int total_eqs = ibdev->ib_dev.num_comp_vectors;
- /* no additional eqs were added */
+ /* no eqs were allocated */
if (!ibdev->eq_table)
return;
/* Reset the advertised EQ number */
- ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+ ibdev->ib_dev.num_comp_vectors = 0;
- /* Free only the added eqs */
- for (i = 0; i < ibdev->eq_added; i++) {
- /* Don't free legacy eqs if used */
- if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
- continue;
+ for (i = 0; i < total_eqs; i++)
mlx4_release_eq(dev, ibdev->eq_table[i]);
- }
kfree(ibdev->eq_table);
+ ibdev->eq_table = NULL;
}
static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -2203,6 +2178,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
int num_req_counters;
+ int allocated;
+ u32 counter_index;
pr_info_once("%s", mlx4_ib_version);
@@ -2373,19 +2350,31 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
+ allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
- err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
+ err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+ /* if failed to allocate a new counter, use default */
if (err)
- ibdev->counters[i] = -1;
- } else {
- ibdev->counters[i] = -1;
+ counter_index =
+ mlx4_get_default_counter_index(dev,
+ i + 1);
+ else
+ allocated = 1;
+ } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
+ counter_index = mlx4_get_default_counter_index(dev,
+ i + 1);
}
+ ibdev->counters[i].index = counter_index;
+ ibdev->counters[i].allocated = allocated;
+ pr_info("counter index %d for port %d allocated %d\n",
+ counter_index, i + 1, allocated);
}
if (mlx4_is_bonded(dev))
- for (i = 1; i < ibdev->num_ports ; ++i)
- ibdev->counters[i] = ibdev->counters[0];
-
+ for (i = 1; i < ibdev->num_ports ; ++i) {
+ ibdev->counters[i].index = ibdev->counters[0].index;
+ ibdev->counters[i].allocated = 0;
+ }
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
@@ -2525,10 +2514,12 @@ err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
err_counter:
- for (; i; --i)
- if (ibdev->counters[i - 1] != -1)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
-
+ for (i = 0; i < ibdev->num_ports; ++i) {
+ if (ibdev->counters[i].index != -1 &&
+ ibdev->counters[i].allocated)
+ mlx4_counter_free(ibdev->dev,
+ ibdev->counters[i].index);
+ }
err_map:
iounmap(ibdev->uar_map);
@@ -2645,8 +2636,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- if (ibdev->counters[p] != -1)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p].index != -1 &&
+ ibdev->counters[p].allocated)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p].index);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7933adfff662..334387f63358 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -504,6 +504,11 @@ struct mlx4_ib_iov_port {
struct mlx4_ib_iov_sysfs_attr mcg_dentry;
};
+struct counter_index {
+ u32 index;
+ u8 allocated;
+};
+
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@@ -522,9 +527,8 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
- int counters[MLX4_MAX_PORTS];
+ struct counter_index counters[MLX4_MAX_PORTS];
int *eq_table;
- int eq_added;
struct kobject *iov_parent;
struct kobject *ports_parent;
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 02fc91c68027..c5a3a5f0de41 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1539,12 +1539,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if (dev->counters[qp->port - 1] != -1) {
+ if (dev->counters[qp->port - 1].index != -1) {
context->pri_path.counter_index =
- dev->counters[qp->port - 1];
+ dev->counters[qp->port - 1].index;
optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
} else
- context->pri_path.counter_index = 0xff;
+ context->pri_path.counter_index =
+ MLX4_SINK_COUNTER_INDEX(dev->dev);
if (qp->flags & MLX4_IB_QP_NETIF) {
mlx4_ib_steer_qp_reg(dev, qp, 1);