aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
blob: f643202b29c6c97a6bff1a161e2214d284d46b66 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */

#include <linux/netdevice.h>
#include <net/nexthop.h>
#include "lag/lag.h"
#include "eswitch.h"
#include "lib/mlx5.h"

void mlx5_mpesw_work(struct work_struct *work)
{
	struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);

	mutex_lock(&ldev->lock);
	mlx5_disable_lag(ldev);
	mutex_unlock(&ldev->lock);
}

static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
{
	struct mlx5_lag *ldev = dev->priv.lag;

	if (!queue_work(ldev->wq, &ldev->mpesw_work))
		mlx5_core_warn(dev, "failed to queue work\n");
}

void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
{
	struct mlx5_lag *ldev = dev->priv.lag;

	if (!ldev)
		return;

	mutex_lock(&ldev->lock);
	if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
	    ldev->mode == MLX5_LAG_MODE_MPESW)
		mlx5_lag_disable_mpesw(dev);
	mutex_unlock(&ldev->lock);
}

int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
{
	struct mlx5_lag *ldev = dev->priv.lag;
	int err = 0;

	if (!ldev)
		return 0;

	mutex_lock(&ldev->lock);
	if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
		goto out;

	if (ldev->mode != MLX5_LAG_MODE_NONE) {
		err = -EINVAL;
		goto out;
	}

	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
	if (err)
		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);

out:
	mutex_unlock(&ldev->lock);
	return err;
}

int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
{
	struct mlx5_lag *ldev = mdev->priv.lag;

	if (!netif_is_bond_master(out_dev) || !ldev)
		return 0;

	mutex_lock(&ldev->lock);
	if (ldev->mode == MLX5_LAG_MODE_MPESW) {
		mutex_unlock(&ldev->lock);
		return -EOPNOTSUPP;
	}
	mutex_unlock(&ldev->lock);
	return 0;
}

bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
{
	bool ret;

	ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
	return ret;
}

void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{
	INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
	atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
}

void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{
	cancel_delayed_work_sync(&ldev->bond_work);
}