aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qed
diff options
context:
space:
mode:
authorMintz, Yuval <Yuval.Mintz@cavium.com>2017-01-01 13:57:07 +0200
committerDavid S. Miller <davem@davemloft.net>2017-01-01 21:02:14 -0500
commitf29ffdb65ff0eaf95d2a2b80f0dee3fbd5a64772 (patch)
tree118fc535a39a0cde87f3bf6c483687ffa75adf94 /drivers/net/ethernet/qlogic/qed
parentqede: Remove unnecessary datapath dereference (diff)
downloadlinux-dev-f29ffdb65ff0eaf95d2a2b80f0dee3fbd5a64772.tar.xz
linux-dev-f29ffdb65ff0eaf95d2a2b80f0dee3fbd5a64772.zip
qed*: RSS indirection based on queue-handles
A step toward having qede agnostic to the queue configurations in firmware/hardware - let the RSS indirections use queue handles instead of actual queue indices. Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c232
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c12
4 files changed, 220 insertions, 134 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 03d31b394df7..a35db6951147 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -98,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->cid = cid;
p_cid->vf_qid = vf_qid;
p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
@@ -272,76 +273,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
- struct qed_rss_params *p_params)
+ struct qed_rss_params *p_rss)
{
- struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
- u16 abs_l2_queue = 0, capabilities = 0;
- int rc = 0, i;
+ struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
+ int i, table_size;
+ int rc = 0;
- if (!p_params) {
+ if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
return rc;
}
+ p_config = &p_ramrod->rss_config;
- BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
- ETH_RSS_IND_TABLE_ENTRIES_NUM);
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
- rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+ rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
if (rc)
return rc;
- p_ramrod->common.update_rss_flg = p_params->update_rss_config;
- rss->update_rss_capabilities = p_params->update_rss_capabilities;
- rss->update_rss_ind_table = p_params->update_rss_ind_table;
- rss->update_rss_key = p_params->update_rss_key;
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
- rss->rss_mode = p_params->rss_enable ?
- ETH_VPORT_RSS_MODE_REGULAR :
- ETH_VPORT_RSS_MODE_DISABLED;
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4));
+ !!(p_rss->rss_caps & QED_RSS_IPV4));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6));
+ !!(p_rss->rss_caps & QED_RSS_IPV6));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+ !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+ !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+ !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
- !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
- rss->tbl_size = p_params->rss_table_size_log;
+ !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
- rss->capabilities = cpu_to_le16(capabilities);
+ p_config->capabilities = cpu_to_le16(capabilities);
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
p_ramrod->common.update_rss_flg,
- rss->rss_mode, rss->update_rss_capabilities,
- capabilities, rss->update_rss_ind_table,
- rss->update_rss_key);
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
- rc = qed_fw_l2_queue(p_hwfn,
- (u8)p_params->rss_ind_table[i],
- &abs_l2_queue);
- if (rc)
- return rc;
+ table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
- rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
- i, rss->indirection_table[i]);
+ if (!p_queue)
+ return -EINVAL;
+
+ p_config->indirection_table[i] =
+ cpu_to_le16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ le16_to_cpu(p_config->indirection_table[i]),
+ le16_to_cpu(p_config->indirection_table[i + 1]),
+ le16_to_cpu(p_config->indirection_table[i + 2]),
+ le16_to_cpu(p_config->indirection_table[i + 3]),
+ le16_to_cpu(p_config->indirection_table[i + 4]),
+ le16_to_cpu(p_config->indirection_table[i + 5]),
+ le16_to_cpu(p_config->indirection_table[i + 6]),
+ le16_to_cpu(p_config->indirection_table[i + 7]),
+ le16_to_cpu(p_config->indirection_table[i + 8]),
+ le16_to_cpu(p_config->indirection_table[i + 9]),
+ le16_to_cpu(p_config->indirection_table[i + 10]),
+ le16_to_cpu(p_config->indirection_table[i + 11]),
+ le16_to_cpu(p_config->indirection_table[i + 12]),
+ le16_to_cpu(p_config->indirection_table[i + 13]),
+ le16_to_cpu(p_config->indirection_table[i + 14]),
+ le16_to_cpu(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
- rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+ p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
return rc;
}
@@ -1899,18 +1927,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
return 0;
}
+static int qed_update_vport_rss(struct qed_dev *cdev,
+ struct qed_update_vport_rss_params *input,
+ struct qed_rss_params *rss)
+{
+ int i, fn;
+
+ /* Update configuration with what's correct regardless of CMT */
+ rss->update_rss_config = 1;
+ rss->rss_enable = 1;
+ rss->update_rss_capabilities = 1;
+ rss->update_rss_ind_table = 1;
+ rss->update_rss_key = 1;
+ rss->rss_caps = input->rss_caps;
+ memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+ if (cdev->num_hwfns == 1) {
+ memcpy(rss->rss_ind_table,
+ input->rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(void *));
+ rss->rss_table_size_log = 7;
+ return 0;
+ }
+
+ /* Start by copying the non-spcific information to the 2nd copy */
+ memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
+
+ /* CMT should be round-robin */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ struct qed_queue_cid *cid = input->rss_ind_table[i];
+ struct qed_rss_params *t_rss;
+
+ if (cid->p_owner == QED_LEADING_HWFN(cdev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
+ }
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(cdev, fn) {
+ for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+ if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
+ DP_VERBOSE(cdev, NETIF_MSG_IFUP,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ return -EINVAL;
+ }
+ rss[fn].rss_table_size_log = 6;
+ }
+
+ return 0;
+}
+
static int qed_update_vport(struct qed_dev *cdev,
struct qed_update_vport_params *params)
{
struct qed_sp_vport_update_params sp_params;
- struct qed_rss_params sp_rss_params;
- int rc, i;
+ struct qed_rss_params *rss;
+ int rc = 0, i;
if (!cdev)
return -ENODEV;
+ rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
+ if (!rss)
+ return -ENOMEM;
+
memset(&sp_params, 0, sizeof(sp_params));
- memset(&sp_rss_params, 0, sizeof(sp_rss_params));
/* Translate protocol params into sp params */
sp_params.vport_id = params->vport_id;
@@ -1924,66 +2018,24 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
- /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
- * We need to re-fix the rss values per engine for CMT.
- */
- if (cdev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss = &params->rss_params;
- int k, max = 0;
-
- /* Find largest entry, since it's possible RSS needs to
- * be disabled [in case only 1 queue per-hwfn]
- */
- for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
- max = (max > rss->rss_ind_table[k]) ?
- max : rss->rss_ind_table[k];
-
- /* Either fix RSS values or disable RSS */
- if (cdev->num_hwfns < max + 1) {
- int divisor = (max + cdev->num_hwfns - 1) /
- cdev->num_hwfns;
-
- DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "CMT - fixing RSS values (modulo %02x)\n",
- divisor);
-
- for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
- rss->rss_ind_table[k] =
- rss->rss_ind_table[k] % divisor;
- } else {
- DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
- "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ /* Prepare the RSS configuration */
+ if (params->update_rss_flg)
+ if (qed_update_vport_rss(cdev, &params->rss_params, rss))
params->update_rss_flg = 0;
- }
- }
-
- /* Now, update the RSS configuration for actual configuration */
- if (params->update_rss_flg) {
- sp_rss_params.update_rss_config = 1;
- sp_rss_params.rss_enable = 1;
- sp_rss_params.update_rss_capabilities = 1;
- sp_rss_params.update_rss_ind_table = 1;
- sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = params->rss_params.rss_caps;
- sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
- memcpy(sp_rss_params.rss_ind_table,
- params->rss_params.rss_ind_table,
- QED_RSS_IND_TABLE_SIZE * sizeof(u16));
- memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
- QED_RSS_KEY_SIZE * sizeof(u32));
- sp_params.rss_params = &sp_rss_params;
- }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (params->update_rss_flg)
+ sp_params.rss_params = &rss[i];
+
sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = qed_sp_vport_update(p_hwfn, &sp_params,
QED_SPQ_MODE_EBLOCK,
NULL);
if (rc) {
DP_ERR(cdev, "Failed to update VPORT\n");
- return rc;
+ goto out;
}
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
@@ -1992,7 +2044,9 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg);
}
- return 0;
+out:
+ vfree(rss);
+ return rc;
}
static int qed_start_rxq(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 2f0303761d6b..93cb932ef663 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -39,6 +39,20 @@
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
struct qed_sge_tpa_params {
u8 max_buffers_per_cqe;
@@ -156,18 +170,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
@@ -287,6 +289,8 @@ struct qed_queue_cid {
/* Legacy VFs might have Rx producer located elsewhere */
bool b_legacy_vf;
+
+ struct qed_hwfn *p_owner;
};
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 469e857f5dcd..b22baf5e5daf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
+#include <linux/vmalloc.h>
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -2318,12 +2319,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
struct qed_sp_vport_update_params *p_data,
struct qed_rss_params *p_rss,
- struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct qed_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2347,34 +2350,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
p_rss->rss_eng_id = vf->relative_vf_id + 1;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn,
- "rss_ind_table[%d] = %d, rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].p_rx_cid)
- DP_NOTICE(p_hwfn,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ if (!vf->vf_queues[q_idx].p_rx_cid) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Omitting RSS due to inactive queue %08x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
}
static void
@@ -2429,12 +2437,12 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
+ struct qed_rss_params *p_rss_params = NULL;
struct qed_sp_vport_update_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
struct qed_sge_tpa_params sge_tpa_params;
- struct qed_rss_params rss_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
u8 status = PFVF_STATUS_SUCCESS;
- u16 tlvs_mask = 0;
u16 length;
int rc;
@@ -2447,6 +2455,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
goto out;
}
+ p_rss_params = vzalloc(sizeof(*p_rss_params));
+ if (p_rss_params == NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
@@ -2461,20 +2474,26 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
- qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
- mbx, &tlvs_mask);
qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
- /* Just log a message if there is no single extended tlv in buffer.
- * When all features of vport update ramrod would be requested by VF
- * as extended TLVs in buffer then an error can be returned in response
- * if there is no extended TLV present in buffer.
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
*/
- if (!tlvs_mask) {
- DP_NOTICE(p_hwfn,
- "No feature tlvs found for vport update\n");
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Upper-layer prevents VF vport configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
@@ -2485,8 +2504,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
out:
+ vfree(p_rss_params);
length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
- tlvs_mask, tlvs_mask);
+ tlvs_mask, tlvs_accepted);
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index af0542c0351c..9667059b15bd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -838,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
if (p_params->rss_params) {
struct qed_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = qed_add_tlv(p_hwfn,
@@ -860,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}