aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/intel/igc/igc_tsn.c
blob: 174103c4bea6d828fffc3e7dfb85aaafd671e0cd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c)  2019 Intel Corporation */

#include "igc.h"
#include "igc_tsn.h"

static bool is_any_launchtime(struct igc_adapter *adapter)
{
	int i;

	for (i = 0; i < adapter->num_tx_queues; i++) {
		struct igc_ring *ring = adapter->tx_ring[i];

		if (ring->launchtime_enable)
			return true;
	}

	return false;
}

/* Returns the TSN specific registers to their default values after
 * TSN offloading is disabled.
 */
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
	struct igc_hw *hw = &adapter->hw;
	u32 tqavctrl;
	int i;

	if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED))
		return 0;

	adapter->cycle_time = 0;

	wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);

	tqavctrl = rd32(IGC_TQAVCTRL);
	tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
		      IGC_TQAVCTRL_ENHANCED_QAV);
	wr32(IGC_TQAVCTRL, tqavctrl);

	for (i = 0; i < adapter->num_tx_queues; i++) {
		struct igc_ring *ring = adapter->tx_ring[i];

		ring->start_time = 0;
		ring->end_time = 0;
		ring->launchtime_enable = false;

		wr32(IGC_TXQCTL(i), 0);
		wr32(IGC_STQT(i), 0);
		wr32(IGC_ENDQT(i), NSEC_PER_SEC);
	}

	wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC);
	wr32(IGC_QBVCYCLET, NSEC_PER_SEC);

	adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;

	return 0;
}

static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
	struct igc_hw *hw = &adapter->hw;
	u32 tqavctrl, baset_l, baset_h;
	u32 sec, nsec, cycle;
	ktime_t base_time, systim;
	int i;

	if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)
		return 0;

	cycle = adapter->cycle_time;
	base_time = adapter->base_time;

	wr32(IGC_TSAUXC, 0);
	wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
	wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);

	tqavctrl = rd32(IGC_TQAVCTRL);
	tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
	wr32(IGC_TQAVCTRL, tqavctrl);

	wr32(IGC_QBVCYCLET_S, cycle);
	wr32(IGC_QBVCYCLET, cycle);

	for (i = 0; i < adapter->num_tx_queues; i++) {
		struct igc_ring *ring = adapter->tx_ring[i];
		u32 txqctl = 0;

		wr32(IGC_STQT(i), ring->start_time);
		wr32(IGC_ENDQT(i), ring->end_time);

		if (adapter->base_time) {
			/* If we have a base_time we are in "taprio"
			 * mode and we need to be strict about the
			 * cycles: only transmit a packet if it can be
			 * completed during that cycle.
			 */
			txqctl |= IGC_TXQCTL_STRICT_CYCLE |
				IGC_TXQCTL_STRICT_END;
		}

		if (ring->launchtime_enable)
			txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;

		wr32(IGC_TXQCTL(i), txqctl);
	}

	nsec = rd32(IGC_SYSTIML);
	sec = rd32(IGC_SYSTIMH);

	systim = ktime_set(sec, nsec);

	if (ktime_compare(systim, base_time) > 0) {
		s64 n;

		n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
		base_time = ktime_add_ns(base_time, (n + 1) * cycle);
	}

	baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);

	wr32(IGC_BASET_H, baset_h);
	wr32(IGC_BASET_L, baset_l);

	adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED;

	return 0;
}

int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
	bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter);

	if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled)
		return 0;

	if (!is_any_enabled) {
		int err = igc_tsn_disable_offload(adapter);

		if (err < 0)
			return err;

		/* The BASET registers aren't cleared when writing
		 * into them, force a reset if the interface is
		 * running.
		 */
		if (netif_running(adapter->netdev))
			schedule_work(&adapter->reset_task);

		return 0;
	}

	return igc_tsn_enable_offload(adapter);
}