aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c1000
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h20
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h72
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c803
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c300
9 files changed, 2133 insertions, 92 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4cf99292fc2f..36d9401a6258 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1109,4 +1109,10 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 101702af099f..f6d37456f3b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -69,12 +69,6 @@ static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
-static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add);
-static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add);
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type);
@@ -6841,8 +6835,8 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
* Add or delete a cloud filter for a specific flow spec.
* Returns 0 if the filter were successfully added.
**/
-static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter, bool add)
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter, bool add)
{
struct i40e_aqc_cloud_filters_element_data cld_filter;
struct i40e_pf *pf = vsi->back;
@@ -6908,9 +6902,9 @@ static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
* Add or delete a cloud filter for a specific flow spec using big buffer.
* Returns 0 if the filter were successfully added.
**/
-static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add)
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add)
{
struct i40e_aqc_cloud_filters_element_bb cld_filter;
struct i40e_pf *pf = vsi->back;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index cd294e6a8587..b0eed8c0b2f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -39,7 +39,7 @@
#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
-#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_VF_VSI 4
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index e9309fb9084b..5cca083da93c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -258,6 +258,38 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
}
/**
+ * i40e_get_real_pf_qid
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @queue_id: queue number
+ *
+ * wrapper function to get pf_queue_id handling ADq code as well
+ **/
+static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
+{
+ int i;
+
+ if (vf->adq_enabled) {
+ /* Although VF considers all the queues(can be 1 to 16) as its
+ * own but they may actually belong to different VSIs(up to 4).
+ * We need to find which queues belongs to which VSI.
+ */
+ for (i = 0; i < vf->num_tc; i++) {
+ if (queue_id < vf->ch[i].num_qps) {
+ vsi_id = vf->ch[i].vsi_id;
+ break;
+ }
+ /* find right queue id which is relative to a
+ * given VSI.
+ */
+ queue_id -= vf->ch[i].num_qps;
+ }
+ }
+
+ return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
+}
+
+/**
* i40e_config_irq_link_list
* @vf: pointer to the VF info
* @vsi_id: id of VSI as given by the FW
@@ -310,7 +342,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
wr32(hw, reg_idx, reg);
@@ -333,8 +365,9 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
if (next_q < size) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
- vsi_queue_id);
+ pf_queue_id = i40e_get_real_pf_qid(vf,
+ vsi_id,
+ vsi_queue_id);
} else {
pf_queue_id = I40E_QUEUE_END_OF_LIST;
qtype = 0;
@@ -669,18 +702,20 @@ error_param:
/**
* i40e_alloc_vsi_res
* @vf: pointer to the VF info
- * @type: type of VSI to allocate
+ * @idx: VSI index, applies only for ADq mode, zero otherwise
*
* alloc VF vsi context & resources
**/
-static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
+ u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
+ vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -689,7 +724,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
ret = -ENOENT;
goto error_alloc_vsi_res;
}
- if (type == I40E_VSI_SRIOV) {
+
+ if (!idx) {
u64 hena = i40e_pf_get_default_rss_hena(pf);
u8 broadcast[ETH_ALEN];
@@ -721,17 +757,29 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ /* program mac filter only for VF VSI */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
}
- /* program mac filter */
- ret = i40e_sync_vsi_filters(vsi);
- if (ret)
- dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* storing VSI index and id for ADq and don't apply the mac filter */
+ if (vf->adq_enabled) {
+ vf->ch[idx].vsi_idx = vsi->idx;
+ vf->ch[idx].vsi_id = vsi->id;
+ }
/* Set VF bandwidth if specified */
if (vf->tx_rate) {
+ max_tx_rate = vf->tx_rate;
+ } else if (vf->ch[idx].max_tx_rate) {
+ max_tx_rate = vf->ch[idx].max_tx_rate;
+ }
+
+ if (max_tx_rate) {
+ max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
- vf->tx_rate / 50, 0, NULL);
+ max_tx_rate, 0, NULL);
if (ret)
dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
vf->vf_id, ret);
@@ -742,6 +790,92 @@ error_alloc_vsi_res:
}
/**
+ * i40e_map_pf_queues_to_vsi
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
+ **/
+static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qps;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= qps) {
+ /* end of list */
+ reg = 0x07FF07FF;
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf,
+ vsi_id,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ i40e_write_rx_ctl(hw,
+ I40E_VSILAN_QTABLE(j, vsi_id),
+ reg);
+ }
+ }
+}
+
+/**
+ * i40e_map_pf_to_vf_queues
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
+ **/
+static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_qps = 0;
+ u32 qps, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qid;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < qps; j++) {
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
+
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
+ reg);
+ total_qps++;
+ }
+ }
+}
+
+/**
* i40e_enable_vf_mappings
* @vf: pointer to the VF info
*
@@ -751,8 +885,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- u32 reg, total_queue_pairs = 0;
- int j;
+ u32 reg;
/* Tell the hardware we're using noncontiguous mapping. HW requires
* that VF queues be mapped using this method, even when they are
@@ -765,30 +898,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
- /* map PF queues to VF queues */
- for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
-
- reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
- wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
- total_queue_pairs++;
- }
-
- /* map PF queues to VSI */
- for (j = 0; j < 7; j++) {
- if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
- reg = 0x07FF07FF; /* unused */
- } else {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
- j * 2);
- reg = qid;
- qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
- (j * 2) + 1);
- reg |= qid << 16;
- }
- i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
- reg);
- }
+ i40e_map_pf_to_vf_queues(vf);
+ i40e_map_pf_queues_to_vsi(vf);
i40e_flush(hw);
}
@@ -824,7 +935,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg_idx, reg;
- int i, msix_vf;
+ int i, j, msix_vf;
/* Start by disabling VF's configuration API to prevent the OS from
* accessing the VF's VSI after it's freed / invalidated.
@@ -846,6 +957,20 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_id = 0;
vf->num_mac = 0;
}
+
+ /* do the accounting and remove additional ADq VSI's */
+ if (vf->adq_enabled && vf->ch[0].vsi_idx) {
+ for (j = 0; j < vf->num_tc; j++) {
+ /* At this point VSI0 is already released so don't
+ * release it again and only clear their values in
+ * structure variables
+ */
+ if (j)
+ i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
+ vf->ch[j].vsi_idx = 0;
+ vf->ch[j].vsi_id = 0;
+ }
+ }
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
/* disable interrupts so the VF starts in a known state */
@@ -891,7 +1016,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
int total_queue_pairs = 0;
- int ret;
+ int ret, idx;
if (vf->num_req_queues &&
vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
@@ -900,11 +1025,30 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
/* allocate hw vsi context & associated resources */
- ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+ ret = i40e_alloc_vsi_res(vf, 0);
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ /* allocate additional VSIs based on tc information for ADq */
+ if (vf->adq_enabled) {
+ if (pf->queues_left >=
+ (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
+ /* TC 0 always belongs to VF VSI */
+ for (idx = 1; idx < vf->num_tc; idx++) {
+ ret = i40e_alloc_vsi_res(vf, idx);
+ if (ret)
+ goto error_alloc;
+ }
+ /* send correct number of queues */
+ total_queue_pairs = I40E_MAX_VF_QUEUES;
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
+ vf->vf_id);
+ vf->adq_enabled = false;
+ }
+ }
+
/* We account for each VF to get a default number of queue pairs. If
* the VF has now requested more, we need to account for that to make
* certain we never request more queues than we actually have left in
@@ -1537,6 +1681,27 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
}
/**
+ * i40e_del_qch - delete all the additional VSIs created as a part of ADq
+ * @vf: pointer to VF structure
+ **/
+static void i40e_del_qch(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int i;
+
+ /* first element in the array belongs to primary VF VSI and we shouldn't
+ * delete it. We should however delete the rest of the VSIs created
+ */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (vf->ch[i].vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
+ vf->ch[i].vsi_idx = 0;
+ vf->ch[i].vsi_id = 0;
+ }
+ }
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1631,6 +1796,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
+
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1855,27 +2023,37 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id, vsi_queue_id;
+ u16 vsi_id, vsi_queue_id = 0;
i40e_status aq_ret = 0;
- int i;
+ int i, j = 0, idx = 0;
+
+ vsi_id = qci->vsi_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_id = qci->vsi_id;
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
- vsi_queue_id = qpi->txq.queue_id;
- if ((qpi->txq.vsi_id != vsi_id) ||
- (qpi->rxq.vsi_id != vsi_id) ||
- (qpi->rxq.queue_id != vsi_queue_id) ||
- !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+
+ if (!vf->adq_enabled) {
+ vsi_queue_id = qpi->txq.queue_id;
+
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != vsi_queue_id) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1887,9 +2065,33 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ /* For ADq there can be up to 4 VSIs with max 4 queues each.
+ * VF does not know about these additional VSIs and all
+ * it cares is about its own queues. PF configures these queues
+ * to its appropriate VSIs based on TC mapping
+ **/
+ if (vf->adq_enabled) {
+ if (j == (vf->ch[idx].num_qps - 1)) {
+ idx++;
+ j = 0; /* resetting the queue count */
+ vsi_queue_id = 0;
+ } else {
+ j++;
+ vsi_queue_id++;
+ }
+ vsi_id = vf->ch[idx].vsi_id;
+ }
}
/* set vsi num_queue_pairs in use to num configured by VF */
- pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
+ if (!vf->adq_enabled) {
+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
+ qci->num_queue_pairs;
+ } else {
+ for (i = 0; i < vf->num_tc; i++)
+ pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
+ vf->ch[i].num_qps;
+ }
error_param:
/* send the response to the VF */
@@ -1898,6 +2100,33 @@ error_param:
}
/**
+ * i40e_validate_queue_map
+ * @vsi_id: vsi id
+ * @queuemap: Tx or Rx queue map
+ *
+ * check if Tx or Rx queue map is valid
+ **/
+static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
+ unsigned long queuemap)
+{
+ u16 vsi_queue_id, queue_id;
+
+ for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
+ if (vf->adq_enabled) {
+ vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
+ } else {
+ queue_id = vsi_queue_id;
+ }
+
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* i40e_vc_config_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1911,9 +2140,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vsi_queue_id, vector_id;
+ u16 vsi_id, vector_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -1923,7 +2151,6 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
-
vector_id = map->vector_id;
vsi_id = map->vsi_id;
/* validate msg params */
@@ -1933,23 +2160,14 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- /* lookout for the invalid queue index */
- tempmap = map->rxq_map;
- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
- vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
}
- tempmap = map->txq_map;
- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
- vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1975,6 +2193,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
+ int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1993,6 +2212,16 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
+
+ /* need to start the rings for additional ADq VSI's as well */
+ if (vf->adq_enabled) {
+ /* zero belongs to LAN VSI */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
+ aq_ret = I40E_ERR_TIMEOUT;
+ }
+ }
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2688,6 +2917,618 @@ err:
}
/**
+ * i40e_validate_cloud_filter
+ * @mask: mask for TC filter
+ * @data: data for TC filter
+ *
+ * This function validates cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ struct virtchnl_filter *tc_filter)
+{
+ struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
+ struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ if (!tc_filter->action) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Currently ADq doesn't support Drop Action\n",
+ vf->vf_id);
+ goto err;
+ }
+
+ /* action_meta is TC number here to which the filter is applied */
+ if (!tc_filter->action_meta ||
+ tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ vf->vf_id, tc_filter->action_meta);
+ goto err;
+ }
+
+ /* Check filter if it's programmed for advanced mode or basic mode.
+ * There are two ADq modes (for VF only),
+ * 1. Basic mode: intended to allow as many filter options as possible
+ * to be added to a VF in Non-trusted mode. Main goal is
+ * to add filters to its own MAC and VLAN id.
+ * 2. Advanced mode: is for allowing filters to be applied other than
+ * its own MAC or VLAN. This mode requires the VF to be
+ * Trusted.
+ */
+ if (mask.dst_mac[0] && !mask.dst_ip[0]) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ f = i40e_find_mac(vsi, data.dst_mac);
+
+ if (!f) {
+ dev_info(&pf->pdev->dev,
+ "Destination MAC %pM doesn't belong to VF %d\n",
+ data.dst_mac, vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
+ hlist) {
+ if (f->vlan == ntohs(data.vlan_id)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_info(&pf->pdev->dev,
+ "VF %d doesn't have any VLAN id %u\n",
+ vf->vf_id, ntohs(data.vlan_id));
+ goto err;
+ }
+ }
+ } else {
+ /* Check if VF is trusted */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
+ vf->vf_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask.dst_mac[0] & data.dst_mac[0]) {
+ if (is_broadcast_ether_addr(data.dst_mac) ||
+ is_zero_ether_addr(data.dst_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
+ vf->vf_id, data.dst_mac);
+ goto err;
+ }
+ }
+
+ if (mask.src_mac[0] & data.src_mac[0]) {
+ if (is_broadcast_ether_addr(data.src_mac) ||
+ is_zero_ether_addr(data.src_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
+ vf->vf_id, data.src_mac);
+ goto err;
+ }
+ }
+
+ if (mask.dst_port & data.dst_port) {
+ if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (mask.src_port & data.src_port) {
+ if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
+ tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
+ vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id & data.vlan_id) {
+ if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
+ dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ return I40E_SUCCESS;
+err:
+ return I40E_ERR_CONFIG;
+}
+
+/**
+ * i40e_find_vsi_from_seid - searches for the vsi with the given seid
+ * @vf: pointer to the VF info
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int i;
+
+ for (i = 0; i < vf->num_tc ; i++) {
+ vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
+ if (vsi->seid == seid)
+ return vsi;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_del_all_cloud_filters
+ * @vf: pointer to the VF info
+ *
+ * This function deletes all cloud filters
+ **/
+static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+{
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ int ret;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &vf->cloud_filter_list, cloud_node) {
+ vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
+ vf->vf_id, cfilter->seid);
+ continue;
+ }
+
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+ false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ vf->num_cloud_filters--;
+ }
+}
+
+/**
+ * i40e_vc_del_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function deletes a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter cfilter, *cf = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ memset(&cfilter, 0, sizeof(cfilter));
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter.dst_port = mask.dst_port & tcf.dst_port;
+ cfilter.src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter.n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter.n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter.ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter.ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the vsi to which the tc belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter.seid = vsi->seid;
+ cfilter.flags = vcf->field_flags;
+
+ /* Deleting TC filter */
+ if (tcf.dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+
+ hlist_for_each_entry_safe(cf, node,
+ &vf->cloud_filter_list, cloud_node) {
+ if (cf->seid != cfilter.seid)
+ continue;
+ if (mask.dst_port)
+ if (cfilter.dst_port != cf->dst_port)
+ continue;
+ if (mask.dst_mac[0])
+ if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
+ continue;
+ /* for ipv4 data to be valid, only first byte of mask is set */
+ if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
+ if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip)))
+ continue;
+ /* for ipv6, mask is set for all sixteen bytes (4 words) */
+ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
+ if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
+ sizeof(cfilter.ip.v6.src_ip6)))
+ continue;
+ if (mask.vlan_id)
+ if (cfilter.vlan_id != cf->vlan_id)
+ continue;
+
+ hlist_del(&cf->cloud_node);
+ kfree(cf);
+ vf->num_cloud_filters--;
+ }
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function adds a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq is not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input/s, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+ if (!cfilter)
+ return -ENOMEM;
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter->dst_port = mask.dst_port & tcf.dst_port;
+ cfilter->src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter->n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter->n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter->ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter->ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the VSI to which the TC belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter->seid = vsi->seid;
+ cfilter->flags = vcf->field_flags;
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+
+ INIT_HLIST_NODE(&cfilter->cloud_node);
+ hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ vf->num_cloud_filters++;
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_qch_msg: Add queue channel and enable ADq
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_tc_info *tci =
+ (struct virtchnl_tc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i, adq_request_qps = 0, speed = 0;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* ADq cannot be applied if spoof check is ON */
+ if (vf->spoofchk) {
+ dev_err(&pf->pdev->dev,
+ "Spoof check is ON, turn it OFF to enable ADq\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* max number of traffic classes for VF currently capped at 4 */
+ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
+ dev_err(&pf->pdev->dev,
+ "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
+ vf->vf_id, tci->num_tc);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* validate queues for each TC */
+ for (i = 0; i < tci->num_tc; i++)
+ if (!tci->list[i].count ||
+ tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
+ vf->vf_id, i, tci->list[i].count);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* need Max VF queues but already have default number of queues */
+ adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
+
+ if (pf->queues_left < adq_request_qps) {
+ dev_err(&pf->pdev->dev,
+ "No queues left to allocate to VF %d\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ /* we need to allocate max VF queues to enable ADq so as to
+ * make sure ADq enabled VF always gets back queues when it
+ * goes through a reset.
+ */
+ vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
+ }
+
+ /* get link speed in MB to validate rate limit */
+ switch (ls->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ speed = SPEED_100;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ speed = SPEED_1000;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ speed = SPEED_10000;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ speed = SPEED_20000;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ speed = SPEED_25000;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ speed = SPEED_40000;
+ break;
+ default:
+ dev_err(&pf->pdev->dev,
+ "Cannot detect link speed\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* parse data from the queue channel info */
+ vf->num_tc = tci->num_tc;
+ for (i = 0; i < vf->num_tc; i++) {
+ if (tci->list[i].max_tx_rate) {
+ if (tci->list[i].max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev,
+ "Invalid max tx rate %llu specified for VF %d.",
+ tci->list[i].max_tx_rate,
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ vf->ch[i].max_tx_rate =
+ tci->list[i].max_tx_rate;
+ }
+ }
+ vf->ch[i].num_qps = tci->list[i].count;
+ }
+
+ /* set this flag only after making sure all inputs are sane */
+ vf->adq_enabled = true;
+ /* num_req_queues is set when user changes number of queues via ethtool
+ * and this causes issue for default VSI(which depends on this variable)
+ * when ADq is enabled, hence reset it.
+ */
+ vf->num_req_queues = 0;
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+
+ return I40E_SUCCESS;
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_qch_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (vf->adq_enabled) {
+ i40e_del_all_cloud_filters(vf);
+ i40e_del_qch(vf);
+ vf->adq_enabled = false;
+ vf->num_tc = 0;
+ dev_info(&pf->pdev->dev,
+ "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
+ vf->vf_id);
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ }
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+
+ return I40E_SUCCESS;
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
* i40e_vc_process_vf_msg
* @pf: pointer to the PF structure
* @vf_id: source VF id
@@ -2816,7 +3657,18 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_REQUEST_QUEUES:
ret = i40e_vc_request_queues_msg(vf, msg, msglen);
break;
-
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ ret = i40e_vc_add_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ ret = i40e_vc_del_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ ret = i40e_vc_add_cloud_filter(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ ret = i40e_vc_del_cloud_filter(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -3382,6 +4234,16 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
+
+ if (vf->adq_enabled) {
+ if (!vf->trusted) {
+ dev_info(&pf->pdev->dev,
+ "VF %u no longer Trusted, deleting all cloud filters\n",
+ vf_id);
+ i40e_del_all_cloud_filters(vf);
+ }
+ }
+
out:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5efc4f92bb37..6852599b2379 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -69,6 +69,19 @@ enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_IWARP,
};
+/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
+ * These variables are used to store indices, id's and number of queues
+ * for each VSI including that of primary VF VSI. Each Traffic class is
+ * termed as channel and each channel can in-turn have 4 queues which
+ * means max 16 queues overall per VF.
+ */
+struct i40evf_channel {
+ u16 vsi_idx; /* index in PF struct for all channel VSIs */
+ u16 vsi_id; /* VSI ID used by firmware */
+ u16 num_qps; /* number of queue pairs requested by user */
+ u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -111,6 +124,13 @@ struct i40e_vf {
u16 num_mac;
u16 num_vlan;
+ /* ADq related variables */
+ bool adq_enabled; /* flag to enable adq */
+ u8 num_tc;
+ struct i40evf_channel ch[I40E_MAX_VF_VSI];
+ struct hlist_head cloud_filter_list;
+ u16 num_cloud_filters;
+
/* RDMA Client */
struct virtchnl_iwarp_qvlist_info *qvlist_info;
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index b6991e8014d8..e46555ad7122 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -52,7 +52,10 @@
#include <linux/socket.h>
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
+#include <net/pkt_cls.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
#include "i40e_type.h"
#include <linux/avf/virtchnl.h>
@@ -106,6 +109,7 @@ struct i40e_vsi {
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
@@ -168,6 +172,28 @@ struct i40evf_vlan_filter {
bool add; /* filter needs to be added */
};
+#define I40EVF_MAX_TRAFFIC_CLASS 4
+/* State of traffic class creation */
+enum i40evf_tc_state_t {
+ __I40EVF_TC_INVALID, /* no traffic class, default state */
+ __I40EVF_TC_RUNNING, /* traffic classes have been created */
+};
+
+/* channel info */
+struct i40evf_channel_config {
+ struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS];
+ enum i40evf_tc_state_t state;
+ u8 total_qps;
+};
+
+/* State of cloud filter */
+enum i40evf_cloud_filter_state_t {
+ __I40EVF_CF_INVALID, /* cloud filter not added */
+ __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
+ __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
+ __I40EVF_CF_ACTIVE, /* cloud filter is active */
+};
+
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
@@ -189,6 +215,36 @@ enum i40evf_critical_section_t {
__I40EVF_IN_REMOVE_TASK, /* device being removed */
};
+#define I40EVF_CLOUD_FIELD_OMAC 0x01
+#define I40EVF_CLOUD_FIELD_IMAC 0x02
+#define I40EVF_CLOUD_FIELD_IVLAN 0x04
+#define I40EVF_CLOUD_FIELD_TEN_ID 0x08
+#define I40EVF_CLOUD_FIELD_IIP 0x10
+
+#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC
+#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC
+#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_IVLAN)
+#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\
+ I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_IVLAN |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP
+
+/* bookkeeping of cloud filters */
+struct i40evf_cloud_filter {
+ enum i40evf_cloud_filter_state_t state;
+ struct list_head list;
+ struct virtchnl_filter f;
+ unsigned long cookie;
+ bool del; /* filter needs to be deleted */
+ bool add; /* filter needs to be added */
+};
+
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
@@ -240,6 +296,7 @@ struct i40evf_adapter {
#define I40EVF_FLAG_ALLMULTI_ON BIT(14)
#define I40EVF_FLAG_LEGACY_RX BIT(15)
#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+#define I40EVF_FLAG_QUEUES_DISABLED BIT(17)
/* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
@@ -268,6 +325,10 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
+#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
+#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
+#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
+#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
/* OS defined structs */
struct net_device *netdev;
@@ -313,6 +374,13 @@ struct i40evf_adapter {
u16 rss_lut_size;
u8 *rss_key;
u8 *rss_lut;
+ /* ADQ related members */
+ struct i40evf_channel_config ch_config;
+ u8 num_tc;
+ struct list_head cloud_filter_list;
+ /* lock to protest access to the cloud filter list */
+ spinlock_t cloud_filter_list_lock;
+ u16 num_cloud_filters;
};
@@ -379,4 +447,8 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
void i40evf_notify_client_open(struct i40e_vsi *vsi);
void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
+void i40evf_enable_channels(struct i40evf_adapter *adapter);
+void i40evf_disable_channels(struct i40evf_adapter *adapter);
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index aded3ad7763e..e6793255de0b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -695,6 +695,12 @@ static int i40evf_set_channels(struct net_device *netdev,
return -EINVAL;
}
+ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc) {
+ dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
+ return -EINVAL;
+ }
+
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 34fd6c553879..4955ce3ab6a2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1041,6 +1041,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
+ struct i40evf_cloud_filter *cf;
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
@@ -1064,11 +1065,18 @@ void i40evf_down(struct i40evf_adapter *adapter)
/* remove all VLAN filters */
list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- f->remove = true;
+ vlf->remove = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* remove all cloud filters */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ cf->del = true;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1079,6 +1087,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
*/
adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1164,6 +1173,9 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
*/
if (adapter->num_req_queues)
num_active_queues = adapter->num_req_queues;
+ else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc)
+ num_active_queues = adapter->ch_config.total_qps;
else
num_active_queues = min_t(int,
adapter->vsi_res->num_queue_pairs,
@@ -1491,6 +1503,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
+ /* If we've made it so far while ADq flag being ON, then we haven't
+ * bailed out anywhere in middle. And ADq isn't just enabled but actual
+ * resources have been allocated in the reset path.
+ * Now we can truly claim that ADq is enabled.
+ */
+ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc)
+ dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
+ adapter->num_tc);
+
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
@@ -1732,6 +1754,27 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
+ i40evf_enable_channels(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
+ i40evf_disable_channels(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
+ i40evf_add_cloud_filter(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
+ i40evf_del_cloud_filter(adapter);
+ goto watchdog_done;
+ }
+
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
@@ -1755,6 +1798,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
{
struct i40evf_mac_filter *f, *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
+ struct i40evf_cloud_filter *cf, *cftmp;
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
@@ -1776,7 +1820,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
- /* Delete all of the filters, both MAC and VLAN. */
+ /* Delete all of the filters */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
kfree(f);
@@ -1789,6 +1833,14 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1818,9 +1870,11 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_adapter *adapter = container_of(work,
struct i40evf_adapter,
reset_task);
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
struct i40evf_vlan_filter *vlf;
+ struct i40evf_cloud_filter *cf;
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
@@ -1913,6 +1967,7 @@ continue_reset:
i40evf_free_all_rx_resources(adapter);
i40evf_free_all_tx_resources(adapter);
+ adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
/* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -1944,8 +1999,19 @@ continue_reset:
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* check if TCs are running and re-add all cloud filters */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc) {
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ cf->add = true;
+ }
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -2211,6 +2277,724 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
+ * @adapter: board private structure
+ * @max_tx_rate: max Tx bw for a tc
+ **/
+static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
+ u64 max_tx_rate)
+{
+ int speed = 0, ret = 0;
+
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ speed = 20000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ speed = 100;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid tx rate specified\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_validate_channel_config - validate queue mapping info
+ * @adapter: board private structure
+ * @mqprio_qopt: queue parameters
+ *
+ * This function validates if the config provided by the user to
+ * configure queue channels is valid or not. Returns 0 on a valid
+ * config.
+ **/
+static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 total_max_rate = 0;
+ int i, num_qps = 0;
+ u64 tx_rate = 0;
+ int ret = 0;
+
+ if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
+ mqprio_qopt->qopt.num_tc < 1)
+ return -EINVAL;
+
+ for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
+ if (!mqprio_qopt->qopt.count[i] ||
+ mqprio_qopt->qopt.offset[i] != num_qps)
+ return -EINVAL;
+ if (mqprio_qopt->min_rate[i]) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid min tx rate (greater than 0) specified\n");
+ return -EINVAL;
+ }
+ /*convert to Mbps */
+ tx_rate = div_u64(mqprio_qopt->max_rate[i],
+ I40EVF_MBPS_DIVISOR);
+ total_max_rate += tx_rate;
+ num_qps += mqprio_qopt->qopt.count[i];
+ }
+ if (num_qps > MAX_QUEUES)
+ return -EINVAL;
+
+ ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
+ return ret;
+}
+
+/**
+ * i40evf_del_all_cloud_filters - delete all cloud filters
+ * on the traffic classes
+ **/
+static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * __i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type_date: tc offload data
+ *
+ * This function processes the config information provided by the
+ * user to configure traffic classes/queue channels and packages the
+ * information to request the PF to setup traffic classes.
+ *
+ * Returns 0 on success.
+ **/
+static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
+ u8 num_tc = 0, total_qps = 0;
+ int ret = 0, netdev_tc = 0;
+ u64 max_tx_rate;
+ u16 mode;
+ int i;
+
+ num_tc = mqprio_qopt->qopt.num_tc;
+ mode = mqprio_qopt->mode;
+
+ /* delete queue_channel */
+ if (!mqprio_qopt->qopt.hw) {
+ if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
+ /* reset the tc configuration */
+ netdev_reset_tc(netdev);
+ adapter->num_tc = 0;
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ i40evf_del_all_cloud_filters(adapter);
+ adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+ goto exit;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ /* add queue channel */
+ if (mode == TC_MQPRIO_MODE_CHANNEL) {
+ if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+ dev_err(&adapter->pdev->dev, "ADq not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
+ dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
+ return -EINVAL;
+ }
+
+ ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
+ if (ret)
+ return ret;
+ /* Return if same TC config is requested */
+ if (adapter->num_tc == num_tc)
+ return 0;
+ adapter->num_tc = num_tc;
+
+ for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ if (i < num_tc) {
+ adapter->ch_config.ch_info[i].count =
+ mqprio_qopt->qopt.count[i];
+ adapter->ch_config.ch_info[i].offset =
+ mqprio_qopt->qopt.offset[i];
+ total_qps += mqprio_qopt->qopt.count[i];
+ max_tx_rate = mqprio_qopt->max_rate[i];
+ /* convert to Mbps */
+ max_tx_rate = div_u64(max_tx_rate,
+ I40EVF_MBPS_DIVISOR);
+ adapter->ch_config.ch_info[i].max_tx_rate =
+ max_tx_rate;
+ } else {
+ adapter->ch_config.ch_info[i].count = 1;
+ adapter->ch_config.ch_info[i].offset = 0;
+ }
+ }
+ adapter->ch_config.total_qps = total_qps;
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+ netdev_reset_tc(netdev);
+ /* Report the tc mapping up the stack */
+ netdev_set_num_tc(adapter->netdev, num_tc);
+ for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ u16 qcount = mqprio_qopt->qopt.count[i];
+ u16 qoffset = mqprio_qopt->qopt.offset[i];
+
+ if (i < num_tc)
+ netdev_set_tc_queue(netdev, netdev_tc++, qcount,
+ qoffset);
+ }
+ }
+exit:
+ return ret;
+}
+
+/**
+ * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @adapter: board private structure
+ * @cls_flower: pointer to struct tc_cls_flower_offload
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *f,
+ struct i40evf_cloud_filter *filter)
+{
+ u16 n_proto_mask = 0;
+ u16 n_proto_key = 0;
+ u8 field_flags = 0;
+ u16 addr_type = 0;
+ u16 n_proto = 0;
+ int i = 0;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+
+ if (mask->keyid != 0)
+ field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ n_proto_key = ntohs(key->n_proto);
+ n_proto_mask = ntohs(mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ n_proto = n_proto_key & n_proto_mask;
+ if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
+ return -EINVAL;
+ if (n_proto == ETH_P_IPV6) {
+ /* specify flow type as TCP IPv6 */
+ filter->f.flow_type = VIRTCHNL_TCP_V6_FLOW;
+ }
+
+ if (key->ip_proto != IPPROTO_TCP) {
+ dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+
+ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
+ if (!is_zero_ether_addr(mask->dst)) {
+ if (is_broadcast_ether_addr(mask->dst)) {
+ field_flags |= I40EVF_CLOUD_FIELD_OMAC;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
+ mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(mask->src)) {
+ if (is_broadcast_ether_addr(mask->src)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IMAC;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
+ mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(key->dst))
+ if (is_valid_ether_addr(key->dst) ||
+ is_multicast_ether_addr(key->dst)) {
+ /* set the mask if a valid dst_mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ filter->f.mask.tcp_spec.dst_mac[i] |=
+ 0xff;
+ ether_addr_copy(filter->f.data.tcp_spec.dst_mac,
+ key->dst);
+ }
+
+ if (!is_zero_ether_addr(key->src))
+ if (is_valid_ether_addr(key->src) ||
+ is_multicast_ether_addr(key->src)) {
+ /* set the mask if a valid dst_mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ filter->f.mask.tcp_spec.src_mac[i] |=
+ 0xff;
+ ether_addr_copy(filter->f.data.tcp_spec.src_mac,
+ key->src);
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+
+ if (mask->vlan_id) {
+ if (mask->vlan_id == VLAN_VID_MASK) {
+ field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
+ mask->vlan_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+ filter->f.mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
+ filter->f.data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ addr_type = key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
+ be32_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
+ be32_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
+ dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (key->dst) {
+ filter->f.mask.tcp_spec.dst_ip[0] |=
+ cpu_to_be32(0xffffffff);
+ filter->f.data.tcp_spec.dst_ip[0] = key->dst;
+ }
+ if (key->src) {
+ filter->f.mask.tcp_spec.src_ip[0] |=
+ cpu_to_be32(0xffffffff);
+ filter->f.data.tcp_spec.src_ip[0] = key->src;
+ }
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ /* validate mask, make sure it is not IPV6_ADDR_ANY */
+ if (ipv6_addr_any(&mask->dst)) {
+ dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
+ IPV6_ADDR_ANY);
+ return I40E_ERR_CONFIG;
+ }
+
+ /* src and dest IPv6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1) which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&key->dst) ||
+ ipv6_addr_loopback(&key->src)) {
+ dev_err(&adapter->pdev->dev,
+ "ipv6 addr should not be loopback\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+
+ if (key->dst.s6_addr) {
+ for (i = 0; i < 4; i++)
+ filter->f.mask.tcp_spec.dst_ip[i] |=
+ cpu_to_be32(0xffffffff);
+ memcpy(&filter->f.data.tcp_spec.dst_ip,
+ &key->dst.s6_addr32,
+ sizeof(filter->f.data.tcp_spec.dst_ip));
+ }
+ if (key->src.s6_addr) {
+ for (i = 0; i < 4; i++)
+ filter->f.mask.tcp_spec.src_ip[i] |=
+ cpu_to_be32(0xffffffff);
+ memcpy(&filter->f.data.tcp_spec.src_ip,
+ &key->src.s6_addr32,
+ sizeof(filter->f.data.tcp_spec.src_ip));
+ }
+ }
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be16(0xffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
+ be16_to_cpu(mask->src));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be16(0xffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
+ be16_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+ if (key->dst) {
+ filter->f.mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
+ filter->f.data.tcp_spec.dst_port = key->dst;
+ }
+
+ if (key->src) {
+ filter->f.mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
+ filter->f.data.tcp_spec.src_port = key->dst;
+ }
+ }
+ filter->f.field_flags = field_flags;
+
+ return 0;
+}
+
+/**
+ * i40evf_handle_tclass - Forward to a traffic class on the device
+ * @adapter: board private structure
+ * @tc: traffic class index on the device
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
+ struct i40evf_cloud_filter *filter)
+{
+ if (tc == 0)
+ return 0;
+ if (tc < adapter->num_tc) {
+ if (!filter->f.data.tcp_spec.dst_port) {
+ dev_err(&adapter->pdev->dev,
+ "Specify destination port to redirect to traffic class other than TC0\n");
+ return -EINVAL;
+ }
+ }
+ /* redirect to a traffic class on the same device */
+ filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
+ filter->f.action_meta = tc;
+ return 0;
+}
+
+/**
+ * i40evf_configure_clsflower - Add tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
+ struct i40evf_cloud_filter *filter = NULL;
+ int err = 0, count = 50;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ udelay(1);
+ if (--count == 0)
+ return -EINVAL;
+ }
+
+ if (tc < 0) {
+ dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter) {
+ err = -ENOMEM;
+ goto clearout;
+ }
+ filter->cookie = cls_flower->cookie;
+
+ /* set the mask to all zeroes to begin with */
+ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
+ /* start out with flow type and eth type IPv4 to begin with */
+ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
+ err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
+ if (err < 0)
+ goto err;
+
+ err = i40evf_handle_tclass(adapter, tc, filter);
+ if (err < 0)
+ goto err;
+
+ /* add filter to the list */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_add_tail(&filter->list, &adapter->cloud_filter_list);
+ adapter->num_cloud_filters++;
+ filter->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+err:
+ if (err)
+ kfree(filter);
+clearout:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return err;
+}
+
+/* i40evf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct i40evf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_delete_clsflower - Remove tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct i40evf_cloud_filter *filter = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ filter = i40evf_find_cf(adapter, &cls_flower->cookie);
+ if (filter) {
+ filter->del = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ } else {
+ err = -EINVAL;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+ return err;
+}
+
+/**
+ * i40evf_setup_tc_cls_flower - flower classifier offloads
+ * @netdev: net device to configure
+ * @type_data: offload data
+ */
+static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return i40evf_configure_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return i40evf_delete_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * i40evf_setup_tc_block_cb - block callback for tc
+ * @type: type of offload
+ * @type_data: offload data
+ * @cb_priv:
+ *
+ * This function is the block callback for traffic classes
+ **/
+static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return i40evf_setup_tc_cls_flower(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40evf_setup_tc_block - register callbacks for tc
+ * @netdev: network interface device structure
+ * @f: tc offload data
+ *
+ * This function registers block callbacks for tc
+ * offloads
+ **/
+static int i40evf_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct i40evf_adapter *adapter = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
+ adapter, adapter);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
+ adapter);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type: type of offload
+ * @type_date: tc offload data
+ *
+ * This function is the callback to ndo_setup_tc in the
+ * netdev_ops.
+ *
+ * Returns 0 on success
+ **/
+static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return __i40evf_setup_tc(netdev, type_data);
+ case TC_SETUP_BLOCK:
+ return i40evf_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40evf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2477,6 +3261,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
+ .ndo_setup_tc = i40evf_setup_tc,
};
/**
@@ -2591,6 +3376,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
+ /* Enable cloud filter if ADQ is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= hw_features;
@@ -2958,9 +3746,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.arq_mutex);
spin_lock_init(&adapter->mac_vlan_list_lock);
+ spin_lock_init(&adapter->cloud_filter_list_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ INIT_LIST_HEAD(&adapter->cloud_filter_list);
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
@@ -3087,6 +3877,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_vlan_filter *vlf, *vlftmp;
struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_cloud_filter *cf, *cftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
/* Indicate we are in remove and not to run reset_task */
@@ -3108,6 +3899,7 @@ static void i40evf_remove(struct pci_dev *pdev)
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
i40evf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3156,6 +3948,13 @@ static void i40evf_remove(struct pci_dev *pdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ list_del(&cf->list);
+ kfree(cf);
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 0700f0afe2d3..6134b61e0938 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -161,7 +161,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_OFFLOAD_ADQ;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -973,6 +974,201 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_enable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable channels as specified by
+ * the user via tc tool.
+ **/
+void i40evf_enable_channels(struct i40evf_adapter *adapter)
+{
+ struct virtchnl_tc_info *vti = NULL;
+ u16 len;
+ int i;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
+ sizeof(struct virtchnl_tc_info);
+
+ vti = kzalloc(len, GFP_KERNEL);
+ if (!vti)
+ return;
+ vti->num_tc = adapter->num_tc;
+ for (i = 0; i < vti->num_tc; i++) {
+ vti->list[i].count = adapter->ch_config.ch_info[i].count;
+ vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
+ vti->list[i].pad = 0;
+ vti->list[i].max_tx_rate =
+ adapter->ch_config.ch_info[i].max_tx_rate;
+ }
+
+ adapter->ch_config.state = __I40EVF_TC_RUNNING;
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
+ (u8 *)vti, len);
+ kfree(vti);
+}
+
+/**
+ * i40evf_disable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable channels that are configured
+ **/
+void i40evf_disable_channels(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->ch_config.state = __I40EVF_TC_INVALID;
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_print_cloud_filter
+ * @adapter: adapter structure
+ * @f: cloud filter to print
+ *
+ * Print the cloud filter
+ **/
+static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
+ struct virtchnl_filter f)
+{
+ switch (f.flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
+ &f.data.tcp_spec.dst_mac, &f.data.tcp_spec.src_mac,
+ ntohs(f.data.tcp_spec.vlan_id),
+ &f.data.tcp_spec.dst_ip[0], &f.data.tcp_spec.src_ip[0],
+ ntohs(f.data.tcp_spec.dst_port),
+ ntohs(f.data.tcp_spec.src_port));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
+ &f.data.tcp_spec.dst_mac, &f.data.tcp_spec.src_mac,
+ ntohs(f.data.tcp_spec.vlan_id),
+ &f.data.tcp_spec.dst_ip, &f.data.tcp_spec.src_ip,
+ ntohs(f.data.tcp_spec.dst_port),
+ ntohs(f.data.tcp_spec.src_port));
+ break;
+ }
+}
+
+/**
+ * i40evf_add_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF add cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf;
+ struct virtchnl_filter *f;
+ int len = 0, count = 0;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->add) {
+ count++;
+ break;
+ }
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
+
+ len = sizeof(struct virtchnl_filter);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->add) {
+ memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+ cf->add = false;
+ cf->state = __I40EVF_CF_ADD_PENDING;
+ i40evf_send_pf_msg(adapter,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ (u8 *)f, len);
+ }
+ }
+ kfree(f);
+}
+
+/**
+ * i40evf_del_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF delete cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf, *cftmp;
+ struct virtchnl_filter *f;
+ int len = 0, count = 0;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->del) {
+ count++;
+ break;
+ }
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
+
+ len = sizeof(struct virtchnl_filter);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ if (cf->del) {
+ memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+ cf->del = false;
+ cf->state = __I40EVF_CF_DEL_PENDING;
+ i40evf_send_pf_msg(adapter,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ (u8 *)f, len);
+ }
+ }
+ kfree(f);
+}
+
+/**
* i40evf_request_reset
* @adapter: adapter structure
*
@@ -1017,14 +1213,25 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (adapter->link_up == link_up)
break;
- /* If we get link up message and start queues before
- * our queues are configured it will trigger a TX hang.
- * In that case, just ignore the link status message,
- * we'll get another one after we enable queues and
- * actually prepared to send traffic.
- */
- if (link_up && adapter->state != __I40EVF_RUNNING)
- break;
+ if (link_up) {
+ /* If we get link up message and start queues
+ * before our queues are configured it will
+ * trigger a TX hang. In that case, just ignore
+ * the link status message,we'll get another one
+ * after we enable queues and actually prepared
+ * to send traffic.
+ */
+ if (adapter->state != __I40EVF_RUNNING)
+ break;
+
+ /* For ADq enabled VF, we reconfigure VSIs and
+ * re-allocate queues. Hence wait till all
+ * queues are enabled.
+ */
+ if (adapter->flags &
+ I40EVF_FLAG_QUEUES_DISABLED)
+ break;
+ }
adapter->link_up = link_up;
if (link_up) {
@@ -1069,6 +1276,57 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
+ i40evf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __I40EVF_TC_INVALID;
+ netdev_reset_tc(netdev);
+ netif_tx_start_all_queues(netdev);
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
+ i40evf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __I40EVF_TC_RUNNING;
+ netif_tx_start_all_queues(netdev);
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ list_for_each_entry_safe(cf, cftmp,
+ &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_ADD_PENDING) {
+ cf->state = __I40EVF_CF_INVALID;
+ dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
+ i40evf_stat_str(&adapter->hw,
+ v_retval));
+ i40evf_print_cloud_filter(adapter,
+ cf->f);
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ }
+ }
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_DEL_PENDING) {
+ cf->state = __I40EVF_CF_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
+ i40evf_stat_str(&adapter->hw,
+ v_retval));
+ i40evf_print_cloud_filter(adapter,
+ cf->f);
+ }
+ }
+ }
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval,
@@ -1108,6 +1366,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
+ adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
@@ -1162,6 +1421,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->state == __I40EVF_CF_ADD_PENDING)
+ cf->state = __I40EVF_CF_ACTIVE;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_DEL_PENDING) {
+ cf->state = __I40EVF_CF_INVALID;
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ }
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",