aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wwan/iosm/iosm_ipc_pm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 15:51:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-30 15:51:09 -0700
commitdbe69e43372212527abf48609aba7fc39a6daa27 (patch)
tree96cfafdf70f5325ceeac1054daf7deca339c9730 /drivers/net/wwan/iosm/iosm_ipc_pm.c
parentMerge tag 'sched-urgent-2021-06-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net (diff)
downloadlinux-dev-dbe69e43372212527abf48609aba7fc39a6daa27.tar.xz
linux-dev-dbe69e43372212527abf48609aba7fc39a6daa27.zip
Merge tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core: - BPF: - add syscall program type and libbpf support for generating instructions and bindings for in-kernel BPF loaders (BPF loaders for BPF), this is a stepping stone for signed BPF programs - infrastructure to migrate TCP child sockets from one listener to another in the same reuseport group/map to improve flexibility of service hand-off/restart - add broadcast support to XDP redirect - allow bypass of the lockless qdisc to improving performance (for pktgen: +23% with one thread, +44% with 2 threads) - add a simpler version of "DO_ONCE()" which does not require jump labels, intended for slow-path usage - virtio/vsock: introduce SOCK_SEQPACKET support - add getsocketopt to retrieve netns cookie - ip: treat lowest address of a IPv4 subnet as ordinary unicast address allowing reclaiming of precious IPv4 addresses - ipv6: use prandom_u32() for ID generation - ip: add support for more flexible field selection for hashing across multi-path routes (w/ offload to mlxsw) - icmp: add support for extended RFC 8335 PROBE (ping) - seg6: add support for SRv6 End.DT46 behavior - mptcp: - DSS checksum support (RFC 8684) to detect middlebox meddling - support Connection-time 'C' flag - time stamping support - sctp: packetization Layer Path MTU Discovery (RFC 8899) - xfrm: speed up state addition with seq set - WiFi: - hidden AP discovery on 6 GHz and other HE 6 GHz improvements - aggregation handling improvements for some drivers - minstrel improvements for no-ack frames - deferred rate control for TXQs to improve reaction times - switch from round robin to virtual time-based airtime scheduler - add trace points: - tcp checksum errors - openvswitch - action execution, upcalls - socket errors via sk_error_report Device APIs: - devlink: add rate API for hierarchical control of max egress rate of virtual devices (VFs, SFs etc.) - don't require RCU read lock to be held around BPF hooks in NAPI context - page_pool: generic buffer recycling New hardware/drivers: - mobile: - iosm: PCIe Driver for Intel M.2 Modem - support for Qualcomm MSM8998 (ipa) - WiFi: Qualcomm QCN9074 and WCN6855 PCI devices - sparx5: Microchip SparX-5 family of Enterprise Ethernet switches - Mellanox BlueField Gigabit Ethernet (control NIC of the DPU) - NXP SJA1110 Automotive Ethernet 10-port switch - Qualcomm QCA8327 switch support (qca8k) - Mikrotik 10/25G NIC (atl1c) Driver changes: - ACPI support for some MDIO, MAC and PHY devices from Marvell and NXP (our first foray into MAC/PHY description via ACPI) - HW timestamping (PTP) support: bnxt_en, ice, sja1105, hns3, tja11xx - Mellanox/Nvidia NIC (mlx5) - NIC VF offload of L2 bridging - support IRQ distribution to Sub-functions - Marvell (prestera): - add flower and match all - devlink trap - link aggregation - Netronome (nfp): connection tracking offload - Intel 1GE (igc): add AF_XDP support - Marvell DPU (octeontx2): ingress ratelimit offload - Google vNIC (gve): new ring/descriptor format support - Qualcomm mobile (rmnet & ipa): inline checksum offload support - MediaTek WiFi (mt76) - mt7915 MSI support - mt7915 Tx status reporting - mt7915 thermal sensors support - mt7921 decapsulation offload - mt7921 enable runtime pm and deep sleep - Realtek WiFi (rtw88) - beacon filter support - Tx antenna path diversity support - firmware crash information via devcoredump - Qualcomm WiFi (wcn36xx) - Wake-on-WLAN support with magic packets and GTK rekeying - Micrel PHY (ksz886x/ksz8081): add cable test support" * tag 'net-next-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (2168 commits) tcp: change ICSK_CA_PRIV_SIZE definition tcp_yeah: check struct yeah size at compile time gve: DQO: Fix off by one in gve_rx_dqo() stmmac: intel: set PCI_D3hot in suspend stmmac: intel: Enable PHY WOL option in EHL net: stmmac: option to enable PHY WOL with PMT enabled net: say "local" instead of "static" addresses in ndo_dflt_fdb_{add,del} net: use netdev_info in ndo_dflt_fdb_{add,del} ptp: Set lookup cookie when creating a PTP PPS source. net: sock: add trace for socket errors net: sock: introduce sk_error_report net: dsa: replay the local bridge FDB entries pointing to the bridge dev too net: dsa: ensure during dsa_fdb_offload_notify that dev_hold and dev_put are on the same dev net: dsa: include fdb entries pointing to bridge in the host fdb list net: dsa: include bridge addresses which are local in the host fdb list net: dsa: sync static FDB entries on foreign interfaces to hardware net: dsa: install the host MDB and FDB entries in the master's RX filter net: dsa: reference count the FDB addresses at the cross-chip notifier level net: dsa: introduce a separate cross-chip notifier type for host FDBs net: dsa: reference count the MDB entries at the cross-chip notifier level ...
Diffstat (limited to 'drivers/net/wwan/iosm/iosm_ipc_pm.c')
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.c333
1 files changed, 333 insertions, 0 deletions
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.c b/drivers/net/wwan/iosm/iosm_ipc_pm.c
new file mode 100644
index 000000000000..413601c72dcd
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+
+/* Timeout value in MS for the PM to wait for device to reach active state */
+#define IPC_PM_ACTIVE_TIMEOUT_MS (500)
+
+/* Note that here "active" has the value 1, as compared to the enums
+ * ipc_mem_host_pm_state or ipc_mem_dev_pm_state, where "active" is 0
+ */
+#define IPC_PM_SLEEP (0)
+#define CONSUME_STATE (0)
+#define IPC_PM_ACTIVE (1)
+
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check)
+{
+ if (host_slp_check && ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE &&
+ ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE_WAIT) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev,
+ "Pend HPDA update set. Host PM_State: %d identifier:%d",
+ ipc_pm->host_pm_state, identifier);
+ return;
+ }
+
+ if (!ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, true)) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev, "Pending HPDA update set. identifier:%d",
+ identifier);
+ return;
+ }
+ ipc_pm->pending_hpda_update = false;
+
+ /* Trigger the irq towards CP */
+ ipc_cp_irq_hpda_update(ipc_pm->pcie, identifier);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, false);
+}
+
+/* Wake up the device if it is in low power mode. */
+static bool ipc_pm_link_activate(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_ACTIVE)
+ return true;
+
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_SLEEP) {
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_SLEEP) {
+ /* Wake up the device. */
+ ipc_cp_irq_sleep_control(ipc_pm->pcie,
+ IPC_MEM_DEV_PM_WAKEUP);
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE_WAIT;
+
+ goto not_active;
+ }
+
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_ACTIVE_WAIT)
+ goto not_active;
+
+ return true;
+ }
+
+not_active:
+ /* link is not ready */
+ return false;
+}
+
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm)
+{
+ bool ret_val = false;
+
+ if (ipc_pm->ap_state != IPC_MEM_DEV_PM_ACTIVE) {
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ /* Wait for IPC_PM_ACTIVE_TIMEOUT_MS for Device sleep state
+ * machine to enter ACTIVE state.
+ */
+ set_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ if (!wait_for_completion_interruptible_timeout
+ (&ipc_pm->host_sleep_complete,
+ msecs_to_jiffies(IPC_PM_ACTIVE_TIMEOUT_MS))) {
+ dev_err(ipc_pm->dev,
+ "PM timeout. Expected State:%d. Actual: %d",
+ IPC_MEM_DEV_PM_ACTIVE, ipc_pm->ap_state);
+ goto active_timeout;
+ }
+ }
+
+ ret_val = true;
+active_timeout:
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ /* Reset the atomic variable in any case as device sleep
+ * state machine change is no longer of interest.
+ */
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+
+ return ret_val;
+}
+
+static void ipc_pm_on_link_sleep(struct iosm_pm *ipc_pm)
+{
+ /* pending sleep ack and all conditions are cleared
+ * -> signal SLEEP__ACK to CP
+ */
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_SLEEP);
+}
+
+static void ipc_pm_on_link_wake(struct iosm_pm *ipc_pm, bool ack)
+{
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ if (ack) {
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_ACTIVE);
+
+ /* check the consume state !!! */
+ if (test_bit(CONSUME_STATE, &ipc_pm->host_sleep_pend))
+ complete(&ipc_pm->host_sleep_complete);
+ }
+
+ /* Check for pending HPDA update.
+ * Pending HP update could be because of sending message was
+ * put on hold due to Device sleep state or due to TD update
+ * which could be because of Device Sleep and Host Sleep
+ * states.
+ */
+ if (ipc_pm->pending_hpda_update &&
+ ipc_pm->host_pm_state == IPC_MEM_HOST_PM_ACTIVE)
+ ipc_pm_signal_hpda_doorbell(ipc_pm, IPC_HP_PM_TRIGGER, true);
+}
+
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active)
+{
+ union ipc_pm_cond old_cond;
+ union ipc_pm_cond new_cond;
+ bool link_active;
+
+ /* Save the current D3 state. */
+ new_cond = ipc_pm->pm_cond;
+ old_cond = ipc_pm->pm_cond;
+
+ /* Calculate the power state only in the runtime phase. */
+ switch (unit) {
+ case IPC_PM_UNIT_IRQ: /* CP irq */
+ new_cond.irq = active;
+ break;
+
+ case IPC_PM_UNIT_LINK: /* Device link state. */
+ new_cond.link = active;
+ break;
+
+ case IPC_PM_UNIT_HS: /* Host sleep trigger requires Link. */
+ new_cond.hs = active;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Something changed ? */
+ if (old_cond.raw == new_cond.raw) {
+ /* Stay in the current PM state. */
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+ goto ret;
+ }
+
+ ipc_pm->pm_cond = new_cond;
+
+ if (new_cond.link)
+ ipc_pm_on_link_wake(ipc_pm, unit == IPC_PM_UNIT_LINK);
+ else if (unit == IPC_PM_UNIT_LINK)
+ ipc_pm_on_link_sleep(ipc_pm);
+
+ if (old_cond.link == IPC_PM_SLEEP && new_cond.raw) {
+ link_active = ipc_pm_link_activate(ipc_pm);
+ goto ret;
+ }
+
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+
+ret:
+ return link_active;
+}
+
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm)
+{
+ /* suspend not allowed if host_pm_state is not IPC_MEM_HOST_PM_ACTIVE */
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_ACTIVE);
+ return false;
+ }
+
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_SLEEP_WAIT_D3;
+
+ return true;
+}
+
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_SLEEP) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_SLEEP);
+ return false;
+ }
+
+ /* Sending Sleep Exit message to CP. Update the state */
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE_WAIT;
+
+ return true;
+}
+
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep)
+{
+ if (sleep) {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_SLEEP;
+ } else {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+ }
+}
+
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm, u32 cp_pm_req)
+{
+ if (cp_pm_req == ipc_pm->device_sleep_notification)
+ return false;
+
+ ipc_pm->device_sleep_notification = cp_pm_req;
+
+ /* Evaluate the PM request. */
+ switch (ipc_pm->cp_state) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ /* Inform the PM that the device link can go down. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, false);
+ return true;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d active: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ /* Inform the PM that the device link is active. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, true);
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ break;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d sleep: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(ipc_pm->dev, "confused loc-pm=%d, req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+
+ return false;
+}
+
+void ipc_pm_init(struct iosm_protocol *ipc_protocol)
+{
+ struct iosm_imem *ipc_imem = ipc_protocol->imem;
+ struct iosm_pm *ipc_pm = &ipc_protocol->pm;
+
+ ipc_pm->pcie = ipc_imem->pcie;
+ ipc_pm->dev = ipc_imem->dev;
+
+ ipc_pm->pm_cond.irq = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.hs = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ /* Create generic wait-for-completion handler for Host Sleep
+ * and device sleep coordination.
+ */
+ init_completion(&ipc_pm->host_sleep_complete);
+
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+}
+
+void ipc_pm_deinit(struct iosm_protocol *proto)
+{
+ struct iosm_pm *ipc_pm = &proto->pm;
+
+ complete(&ipc_pm->host_sleep_complete);
+}