From 955315b0dc8c8641311430f40fbe53990ba40e33 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Tue, 23 Jul 2019 15:14:13 +0900 Subject: qlge: Move drivers/net/ethernet/qlogic/qlge/ to drivers/staging/qlge/ The hardware has been declared EOL by the vendor more than 5 years ago. What's more relevant to the Linux kernel is that the quality of this driver is not on par with many other mainline drivers. Cc: Manish Chopra Message-id: <20190617074858.32467-1-bpoirier@suse.com> Signed-off-by: Benjamin Poirier Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..9bca7781d67e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13217,7 +13217,7 @@ M: Manish Chopra M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported -F: drivers/net/ethernet/qlogic/qlge/ +F: drivers/staging/qlge/ QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada -- cgit v1.2.3-59-g8ed1b From 61670d62bd9eb3fa3d423b4f8f1d37bca853910d Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Mon, 1 Jul 2019 15:42:46 +0200 Subject: MAINTAINERS: can: add missing files to CAN NETWORK DRIVERS and CAN NETWORK LAYER This patch adds missing files to the CAN NETWORK DRIVERS and CAN NETWORK LAYER entry. Reported-by: Robert P. J. Day Signed-off-by: Marc Kleine-Budde --- MAINTAINERS | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9bca7781d67e..9cc156c58f0c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3631,9 +3631,12 @@ S: Maintained F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h +F: include/linux/can/led.h +F: include/linux/can/rx-offload.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h +F: include/uapi/linux/can/vxcan.h CAN NETWORK LAYER M: Oliver Hartkopp @@ -3646,6 +3649,8 @@ S: Maintained F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h +F: include/linux/can/skb.h +F: include/net/netns/can.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h -- cgit v1.2.3-59-g8ed1b From 3b0b278312ba7d6c1eb8b2fb48d459fb7f341a20 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 29 Jul 2019 16:35:03 +0300 Subject: NFC: nxp-nci: Get rid of platform data Legacy platform data must go away. We are on the safe side here since there are no users of it in the kernel. If anyone by any odd reason needs it the GPIO lookup tables and built-in device properties at your service. Signed-off-by: Andy Shevchenko Tested-by: Sedat Dilek Signed-off-by: David S. Miller --- MAINTAINERS | 1 - drivers/nfc/nxp-nci/core.c | 1 - drivers/nfc/nxp-nci/i2c.c | 9 +-------- drivers/nfc/nxp-nci/nxp-nci.h | 1 - include/linux/platform_data/nxp-nci.h | 19 ------------------- 5 files changed, 1 insertion(+), 30 deletions(-) delete mode 100644 include/linux/platform_data/nxp-nci.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9cc156c58f0c..ee663e0e2f2e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11327,7 +11327,6 @@ F: include/net/nfc/ F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h -F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/ NFS, SUNRPC, AND LOCKD CLIENTS diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index 8dafc696719f..aed18ca60170 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -14,7 +14,6 @@ #include #include #include -#include #include diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 5db71869f04b..47b3b7e612e6 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -304,7 +303,6 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct nxp_nci_i2c_phy *phy; - struct nxp_nci_nfc_platform_data *pdata; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { @@ -323,17 +321,12 @@ static int nxp_nci_i2c_probe(struct i2c_client *client, phy->i2c_dev = client; i2c_set_clientdata(client, phy); - pdata = client->dev.platform_data; - - if (!pdata && client->dev.of_node) { + if (client->dev.of_node) { r = nxp_nci_i2c_parse_devtree(client); if (r < 0) { nfc_err(&client->dev, "Failed to get DT data\n"); goto probe_exit; } - } else if (pdata) { - phy->gpio_en = pdata->gpio_en; - phy->gpio_fw = pdata->gpio_fw; } else if (ACPI_HANDLE(&client->dev)) { r = nxp_nci_i2c_acpi_config(phy); if (r < 0) diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h index 6fe7c45544bf..ae3fb2735a4e 100644 --- a/drivers/nfc/nxp-nci/nxp-nci.h +++ b/drivers/nfc/nxp-nci/nxp-nci.h @@ -14,7 +14,6 @@ #include #include #include -#include #include diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h deleted file mode 100644 index 97827ad468e2..000000000000 --- a/include/linux/platform_data/nxp-nci.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Generic platform data for the NXP NCI NFC chips. - * - * Copyright (C) 2014 NXP Semiconductors All rights reserved. - * - * Authors: Clément Perrochaud - */ - -#ifndef _NXP_NCI_H_ -#define _NXP_NCI_H_ - -struct nxp_nci_nfc_platform_data { - unsigned int gpio_en; - unsigned int gpio_fw; - unsigned int irq; -}; - -#endif /* _NXP_NCI_H_ */ -- cgit v1.2.3-59-g8ed1b From 9c1029818c713eccdce945e45cd8fb927d6a3d6e Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 16 Aug 2019 16:09:59 +0300 Subject: net: phy: adin: add support for Analog Devices PHYs This change adds support for Analog Devices Industrial Ethernet PHYs. Particularly the PHYs this driver adds support for: * ADIN1200 - Robust, Industrial, Low Power 10/100 Ethernet PHY * ADIN1300 - Robust, Industrial, Low Latency 10/100/1000 Gigabit Ethernet PHY The 2 chips are register compatible with one another. The main difference being that ADIN1200 doesn't operate in gigabit mode. The chips can be operated by the Generic PHY driver as well via the standard IEEE PHY registers (0x0000 - 0x000F) which are supported by the kernel as well. This assumes that configuration of the PHY has been done completely in HW, according to spec. Configuration can also be done via registers, which will be supported by this driver. Datasheets: https://www.analog.com/media/en/technical-documentation/data-sheets/ADIN1300.pdf https://www.analog.com/media/en/technical-documentation/data-sheets/ADIN1200.pdf Reviewed-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: Alexandru Ardelean Signed-off-by: David S. Miller --- MAINTAINERS | 7 +++++++ drivers/net/phy/Kconfig | 9 +++++++++ drivers/net/phy/Makefile | 1 + drivers/net/phy/adin.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+) create mode 100644 drivers/net/phy/adin.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index e352550a6895..e8aa8a667864 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -938,6 +938,13 @@ S: Supported F: drivers/mux/adgs1408.c F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt +ANALOG DEVICES INC ADIN DRIVER +M: Alexandru Ardelean +L: netdev@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/net/phy/adin.c + ANALOG DEVICES INC ADIS DRIVER LIBRARY M: Alexandru Ardelean S: Supported diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 48ca213c0ada..03be30cde552 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -257,6 +257,15 @@ config SFP depends on HWMON || HWMON=n select MDIO_I2C +config ADIN_PHY + tristate "Analog Devices Industrial Ethernet PHYs" + help + Adds support for the Analog Devices Industrial Ethernet PHYs. + Currently supports the: + - ADIN1200 - Robust,Industrial, Low Power 10/100 Ethernet PHY + - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit + Ethernet PHY + config AMD_PHY tristate "AMD PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index ba07c27e4208..a03437e091f3 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_SFP) += sfp.o sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) +obj-$(CONFIG_ADIN_PHY) += adin.o obj-$(CONFIG_AMD_PHY) += amd.o aquantia-objs += aquantia_main.o ifdef CONFIG_HWMON diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c new file mode 100644 index 000000000000..6d7af4743957 --- /dev/null +++ b/drivers/net/phy/adin.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0+ +/** + * Driver for Analog Devices Industrial Ethernet PHYs + * + * Copyright 2019 Analog Devices Inc. + */ +#include +#include +#include +#include +#include +#include + +#define PHY_ID_ADIN1200 0x0283bc20 +#define PHY_ID_ADIN1300 0x0283bc30 + +static int adin_config_init(struct phy_device *phydev) +{ + return genphy_config_init(phydev); +} + +static struct phy_driver adin_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200), + .name = "ADIN1200", + .config_init = adin_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + }, + { + PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300), + .name = "ADIN1300", + .config_init = adin_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + }, +}; + +module_phy_driver(adin_driver); + +static struct mdio_device_id __maybe_unused adin_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) }, + { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, adin_tbl); +MODULE_DESCRIPTION("Analog Devices Industrial Ethernet PHY driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-59-g8ed1b From 767078132ff9625396f1387f487ce9389f405b2f Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 16 Aug 2019 16:10:11 +0300 Subject: dt-bindings: net: add bindings for ADIN PHY driver This change adds bindings for the Analog Devices ADIN PHY driver, detailing all the properties implemented by the driver. Reviewed-by: Andrew Lunn Reviewed-by: Rob Herring Signed-off-by: Alexandru Ardelean Signed-off-by: David S. Miller --- .../devicetree/bindings/net/adi,adin.yaml | 73 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 74 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/adi,adin.yaml (limited to 'MAINTAINERS') diff --git a/Documentation/devicetree/bindings/net/adi,adin.yaml b/Documentation/devicetree/bindings/net/adi,adin.yaml new file mode 100644 index 000000000000..69375cb28e92 --- /dev/null +++ b/Documentation/devicetree/bindings/net/adi,adin.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: GPL-2.0+ +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/adi,adin.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Analog Devices ADIN1200/ADIN1300 PHY + +maintainers: + - Alexandru Ardelean + +description: | + Bindings for Analog Devices Industrial Ethernet PHYs + +allOf: + - $ref: ethernet-phy.yaml# + +properties: + adi,rx-internal-delay-ps: + description: | + RGMII RX Clock Delay used only when PHY operates in RGMII mode with + internal delay (phy-mode is 'rgmii-id' or 'rgmii-rxid') in pico-seconds. + enum: [ 1600, 1800, 2000, 2200, 2400 ] + default: 2000 + + adi,tx-internal-delay-ps: + description: | + RGMII TX Clock Delay used only when PHY operates in RGMII mode with + internal delay (phy-mode is 'rgmii-id' or 'rgmii-txid') in pico-seconds. + enum: [ 1600, 1800, 2000, 2200, 2400 ] + default: 2000 + + adi,fifo-depth-bits: + description: | + When operating in RMII mode, this option configures the FIFO depth. + enum: [ 4, 8, 12, 16, 20, 24 ] + default: 8 + + adi,disable-energy-detect: + description: | + Disables Energy Detect Powerdown Mode (default disabled, i.e energy detect + is enabled if this property is unspecified) + type: boolean + +examples: + - | + ethernet { + #address-cells = <1>; + #size-cells = <0>; + + phy-mode = "rgmii-id"; + + ethernet-phy@0 { + reg = <0>; + + adi,rx-internal-delay-ps = <1800>; + adi,tx-internal-delay-ps = <2200>; + }; + }; + - | + ethernet { + #address-cells = <1>; + #size-cells = <0>; + + phy-mode = "rmii"; + + ethernet-phy@1 { + reg = <1>; + + adi,fifo-depth-bits = <16>; + adi,disable-energy-detect; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index e8aa8a667864..fd9ab61c2670 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -944,6 +944,7 @@ L: netdev@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/net/phy/adin.c +F: Documentation/devicetree/bindings/net/adi,adin.yaml ANALOG DEVICES INC ADIS DRIVER LIBRARY M: Alexandru Ardelean -- cgit v1.2.3-59-g8ed1b From 2a813f1392205654aea28098f3bcc3e6e2478fa5 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 16 Aug 2019 10:26:26 +0200 Subject: batman-adv: Add Sven to MAINTAINERS file Sven is taking care of tracking our patches and merging most of them in our tree. Let's add him to the MAINTAINERS file so he will get all patch e-mails. Signed-off-by: Simon Wunderlich Acked-by: Antonio Quartulli --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..ce8316cbe3b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2911,6 +2911,7 @@ BATMAN ADVANCED M: Marek Lindner M: Simon Wunderlich M: Antonio Quartulli +M: Sven Eckelmann L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers) W: https://www.open-mesh.org/ B: https://www.open-mesh.org/projects/batman-adv/issues -- cgit v1.2.3-59-g8ed1b From edd3d0074c256848bbf5805350b39d40b1e2bf2b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 17 Aug 2019 16:28:12 +0300 Subject: drop_monitor: Add basic infrastructure for hardware drops Export a function that can be invoked in order to report packets that were dropped by the underlying hardware along with metadata. Subsequent patches will add support for the different alert modes. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- MAINTAINERS | 1 + include/net/drop_monitor.h | 33 +++++++++++++++++++++++++++++++++ net/core/drop_monitor.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+) create mode 100644 include/net/drop_monitor.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index fd9ab61c2670..96d3e60697f5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11156,6 +11156,7 @@ S: Maintained W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c F: include/uapi/linux/net_dropmon.h +F: include/net/drop_monitor.h NETWORKING DRIVERS M: "David S. Miller" diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h new file mode 100644 index 000000000000..2ab668461463 --- /dev/null +++ b/include/net/drop_monitor.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _NET_DROP_MONITOR_H_ +#define _NET_DROP_MONITOR_H_ + +#include +#include +#include + +/** + * struct net_dm_hw_metadata - Hardware-supplied packet metadata. + * @trap_group_name: Hardware trap group name. + * @trap_name: Hardware trap name. + * @input_dev: Input netdevice. + */ +struct net_dm_hw_metadata { + const char *trap_group_name; + const char *trap_name; + struct net_device *input_dev; +}; + +#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) +void net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata); +#else +static inline void +net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata) +{ +} +#endif + +#endif /* _NET_DROP_MONITOR_H_ */ diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index aa9147a18329..6020f34728af 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,7 @@ * netlink alerts */ static int trace_state = TRACE_OFF; +static bool monitor_hw; /* net_dm_mutex * @@ -93,6 +95,8 @@ struct net_dm_alert_ops { void (*napi_poll_probe)(void *ignore, struct napi_struct *napi, int work, int budget); void (*work_item_func)(struct work_struct *work); + void (*hw_probe)(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata); }; struct net_dm_skb_cb { @@ -267,10 +271,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_unlock(); } +static void +net_dm_hw_summary_probe(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata) +{ +} + static const struct net_dm_alert_ops net_dm_alert_summary_ops = { .kfree_skb_probe = trace_kfree_skb_hit, .napi_poll_probe = trace_napi_poll_hit, .work_item_func = send_dm_alert, + .hw_probe = net_dm_hw_summary_probe, }; static void net_dm_packet_trace_kfree_skb_hit(void *ignore, @@ -482,10 +493,17 @@ static void net_dm_packet_work(struct work_struct *work) net_dm_packet_report(skb); } +static void +net_dm_hw_packet_probe(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata) +{ +} + static const struct net_dm_alert_ops net_dm_alert_packet_ops = { .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit, .napi_poll_probe = net_dm_packet_trace_napi_poll_hit, .work_item_func = net_dm_packet_work, + .hw_probe = net_dm_hw_packet_probe, }; static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { @@ -493,6 +511,16 @@ static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops, }; +void net_dm_hw_report(struct sk_buff *skb, + const struct net_dm_hw_metadata *hw_metadata) +{ + if (!monitor_hw) + return; + + net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata); +} +EXPORT_SYMBOL_GPL(net_dm_hw_report); + static int net_dm_trace_on_set(struct netlink_ext_ack *extack) { const struct net_dm_alert_ops *ops; -- cgit v1.2.3-59-g8ed1b From 348dd93e40c112afda3cd07daa6f470e474d29dc Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Thu, 22 Aug 2019 05:05:41 +0000 Subject: PCI: hv: Add a Hyper-V PCI interface driver for software backchannel interface This interface driver is a helper driver allows other drivers to have a common interface with the Hyper-V PCI frontend driver. Signed-off-by: Haiyang Zhang Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/pci/Kconfig | 1 + drivers/pci/controller/Kconfig | 7 ++++ drivers/pci/controller/Makefile | 1 + drivers/pci/controller/pci-hyperv-intf.c | 67 ++++++++++++++++++++++++++++++++ drivers/pci/controller/pci-hyperv.c | 12 ++++-- include/linux/hyperv.h | 30 ++++++++++---- 7 files changed, 108 insertions(+), 11 deletions(-) create mode 100644 drivers/pci/controller/pci-hyperv-intf.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a406947b369e..986085351d79 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7469,6 +7469,7 @@ F: drivers/hid/hid-hyperv.c F: drivers/hv/ F: drivers/input/serio/hyperv-keyboard.c F: drivers/pci/controller/pci-hyperv.c +F: drivers/pci/controller/pci-hyperv-intf.c F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 2ab92409210a..c313de96a357 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -182,6 +182,7 @@ config PCI_LABEL config PCI_HYPERV tristate "Hyper-V PCI Frontend" depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + select PCI_HYPERV_INTERFACE help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index fe9f9f13ce11..70e078238899 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -281,5 +281,12 @@ config VMD To compile this driver as a module, choose M here: the module will be called vmd. +config PCI_HYPERV_INTERFACE + tristate "Hyper-V PCI Interface" + depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 + help + The Hyper-V PCI Interface is a helper driver allows other drivers to + have a common interface with the Hyper-V PCI frontend driver. + source "drivers/pci/controller/dwc/Kconfig" endmenu diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index d56a507495c5..a2a22c9d91af 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o +obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c new file mode 100644 index 000000000000..cc96be450360 --- /dev/null +++ b/drivers/pci/controller/pci-hyperv-intf.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Microsoft Corporation. + * + * Author: + * Haiyang Zhang + * + * This small module is a helper driver allows other drivers to + * have a common interface with the Hyper-V PCI frontend driver. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +struct hyperv_pci_block_ops hvpci_block_ops; +EXPORT_SYMBOL_GPL(hvpci_block_ops); + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned) +{ + if (!hvpci_block_ops.read_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.read_block(dev, buf, buf_len, block_id, + bytes_returned); +} +EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk); + +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id) +{ + if (!hvpci_block_ops.write_block) + return -EOPNOTSUPP; + + return hvpci_block_ops.write_block(dev, buf, len, block_id); +} +EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk); + +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) +{ + if (!hvpci_block_ops.reg_blk_invalidate) + return -EOPNOTSUPP; + + return hvpci_block_ops.reg_blk_invalidate(dev, context, + block_invalidate); +} +EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate); + +static void __exit exit_hv_pci_intf(void) +{ +} + +static int __init init_hv_pci_intf(void) +{ + return 0; +} + +module_init(init_hv_pci_intf); +module_exit(exit_hv_pci_intf); + +MODULE_DESCRIPTION("Hyper-V PCI Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 57adeca7bda9..9c93ac2215b7 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -983,7 +983,6 @@ int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, *bytes_returned = comp_pkt.bytes_returned; return 0; } -EXPORT_SYMBOL(hv_read_config_block); /** * hv_pci_write_config_compl() - Invoked when a response packet for a write @@ -1070,7 +1069,6 @@ int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, return 0; } -EXPORT_SYMBOL(hv_write_config_block); /** * hv_register_block_invalidate() - Invoked when a config block invalidation @@ -1101,7 +1099,6 @@ int hv_register_block_invalidate(struct pci_dev *pdev, void *context, return 0; } -EXPORT_SYMBOL(hv_register_block_invalidate); /* Interrupt management hooks */ static void hv_int_desc_free(struct hv_pci_dev *hpdev, @@ -3045,10 +3042,19 @@ static struct hv_driver hv_pci_drv = { static void __exit exit_hv_pci_drv(void) { vmbus_driver_unregister(&hv_pci_drv); + + hvpci_block_ops.read_block = NULL; + hvpci_block_ops.write_block = NULL; + hvpci_block_ops.reg_blk_invalidate = NULL; } static int __init init_hv_pci_drv(void) { + /* Initialize PCI block r/w interface */ + hvpci_block_ops.read_block = hv_read_config_block; + hvpci_block_ops.write_block = hv_write_config_block; + hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate; + return vmbus_driver_register(&hv_pci_drv); } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 9d37f8cf1245..2afe6fdc1dda 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1579,18 +1579,32 @@ hv_pkt_iter_next(struct vmbus_channel *channel, pkt = hv_pkt_iter_next(channel, pkt)) /* - * Functions for passing data between SR-IOV PF and VF drivers. The VF driver + * Interface for passing data between SR-IOV PF and VF drivers. The VF driver * sends requests to read and write blocks. Each block must be 128 bytes or * smaller. Optionally, the VF driver can register a callback function which * will be invoked when the host says that one or more of the first 64 block * IDs is "invalid" which means that the VF driver should reread them. */ #define HV_CONFIG_BLOCK_SIZE_MAX 128 -int hv_read_config_block(struct pci_dev *dev, void *buf, unsigned int buf_len, - unsigned int block_id, unsigned int *bytes_returned); -int hv_write_config_block(struct pci_dev *dev, void *buf, unsigned int len, - unsigned int block_id); -int hv_register_block_invalidate(struct pci_dev *dev, void *context, - void (*block_invalidate)(void *context, - u64 block_mask)); + +int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); +int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); +int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); + +struct hyperv_pci_block_ops { + int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, + unsigned int block_id, unsigned int *bytes_returned); + int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, + unsigned int block_id); + int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)); +}; + +extern struct hyperv_pci_block_ops hvpci_block_ops; + #endif /* _HYPERV_H */ -- cgit v1.2.3-59-g8ed1b From 9d71dd0c70099914fcd063135da3c580865e924c Mon Sep 17 00:00:00 2001 From: The j1939 authors Date: Mon, 8 Oct 2018 11:48:36 +0200 Subject: can: add support of SAE J1939 protocol SAE J1939 is the vehicle bus recommended practice used for communication and diagnostics among vehicle components. Originating in the car and heavy-duty truck industry in the United States, it is now widely used in other parts of the world. J1939, ISO 11783 and NMEA 2000 all share the same high level protocol. SAE J1939 can be considered the replacement for the older SAE J1708 and SAE J1587 specifications. Acked-by: Oliver Hartkopp Signed-off-by: Bastian Stender Signed-off-by: Elenita Hinds Signed-off-by: kbuild test robot Signed-off-by: Kurt Van Dijck Signed-off-by: Maxime Jayat Signed-off-by: Robin van der Gracht Signed-off-by: Oleksij Rempel Signed-off-by: Marc Kleine-Budde --- Documentation/networking/index.rst | 1 + Documentation/networking/j1939.rst | 422 ++++++++ MAINTAINERS | 10 + include/linux/can/can-ml.h | 3 + include/uapi/linux/can/j1939.h | 99 ++ net/can/Kconfig | 2 + net/can/Makefile | 2 + net/can/j1939/Kconfig | 15 + net/can/j1939/Makefile | 10 + net/can/j1939/address-claim.c | 230 ++++ net/can/j1939/bus.c | 333 ++++++ net/can/j1939/j1939-priv.h | 338 ++++++ net/can/j1939/main.c | 403 +++++++ net/can/j1939/socket.c | 1160 +++++++++++++++++++++ net/can/j1939/transport.c | 2027 ++++++++++++++++++++++++++++++++++++ 15 files changed, 5055 insertions(+) create mode 100644 Documentation/networking/j1939.rst create mode 100644 include/uapi/linux/can/j1939.h create mode 100644 net/can/j1939/Kconfig create mode 100644 net/can/j1939/Makefile create mode 100644 net/can/j1939/address-claim.c create mode 100644 net/can/j1939/bus.c create mode 100644 net/can/j1939/j1939-priv.h create mode 100644 net/can/j1939/main.c create mode 100644 net/can/j1939/socket.c create mode 100644 net/can/j1939/transport.c (limited to 'MAINTAINERS') diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 37eabc17894c..0481d0ffebed 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -17,6 +17,7 @@ Contents: devlink-trap devlink-trap-netdevsim ieee802154 + j1939 kapi z8530book msg_zerocopy diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst new file mode 100644 index 000000000000..ce7e7a044e08 --- /dev/null +++ b/Documentation/networking/j1939.rst @@ -0,0 +1,422 @@ +.. SPDX-License-Identifier: (GPL-2.0 OR MIT) + +=================== +J1939 Documentation +=================== + +Overview / What Is J1939 +======================== + +SAE J1939 defines a higher layer protocol on CAN. It implements a more +sophisticated addressing scheme and extends the maximum packet size above 8 +bytes. Several derived specifications exist, which differ from the original +J1939 on the application level, like MilCAN A, NMEA2000 and especially +ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended +Transport Protocol) which is has been included in this implementation. This +results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB. + +Specifications used +------------------- + +* SAE J1939-21 : data link layer +* SAE J1939-81 : network management +* ISO 11783-6 : Virtual Terminal (Extended Transport Protocol) + +.. _j1939-motivation: + +Motivation +========== + +Given the fact there's something like SocketCAN with an API similar to BSD +sockets, we found some reasons to justify a kernel implementation for the +addressing and transport methods used by J1939. + +* **Addressing:** when a process on an ECU communicates via J1939, it should + not necessarily know its source address. Although at least one process per + ECU should know the source address. Other processes should be able to reuse + that address. This way, address parameters for different processes + cooperating for the same ECU, are not duplicated. This way of working is + closely related to the UNIX concept where programs do just one thing, and do + it well. + +* **Dynamic addressing:** Address Claiming in J1939 is time critical. + Furthermore data transport should be handled properly during the address + negotiation. Putting this functionality in the kernel eliminates it as a + requirement for _every_ user space process that communicates via J1939. This + results in a consistent J1939 bus with proper addressing. + +* **Transport:** both TP & ETP reuse some PGNs to relay big packets over them. + Different processes may thus use the same TP & ETP PGNs without actually + knowing it. The individual TP & ETP sessions _must_ be serialized + (synchronized) between different processes. The kernel solves this problem + properly and eliminates the serialization (synchronization) as a requirement + for _every_ user space process that communicates via J1939. + +J1939 defines some other features (relaying, gateway, fast packet transport, +...). In-kernel code for these would not contribute to protocol stability. +Therefore, these parts are left to user space. + +The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939 +user space library operating on CAN raw sockets will still operate properly. +Since such library does not communicate with the in-kernel implementation, care +must be taken that these two do not interfere. In practice, this means they +cannot share ECU addresses. A single ECU (or virtual ECU) address is used by +the library exclusively, or by the in-kernel system exclusively. + +J1939 concepts +============== + +PGN +--- + +The PGN (Parameter Group Number) is a number to identify a packet. The PGN +is composed as follows: +1 bit : Reserved Bit +1 bit : Data Page +8 bits : PF (PDU Format) +8 bits : PS (PDU Specific) + +In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2 +format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field +contains a so-called Group Extension, which is part of the PGN. When using PDU2 +format, the Group Extension is set in the PS-field. + +On the other hand, when using PDU1 format, the PS-field contains a so-called +Destination Address, which is _not_ part of the PGN. When communicating a PGN +from user space to kernel (or visa versa) and PDU2 format is used, the PS-field +of the PGN shall be set to zero. The Destination Address shall be set +elsewhere. + +Regarding PGN mapping to 29-bit CAN identifier, the Destination Address shall +be get/set from/to the appropriate bits of the identifier by the kernel. + + +Addressing +---------- + +Both static and dynamic addressing methods can be used. + +For static addresses, no extra checks are made by the kernel, and provided +addresses are considered right. This responsibility is for the OEM or system +integrator. + +For dynamic addressing, so-called Address Claiming, extra support is foreseen +in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of +a successful address claim, the kernel keeps track of both NAME and source +address being claimed. This serves as a base for filter schemes. By default, +packets with a destination that is not locally, will be rejected. + +Mixed mode packets (from a static to a dynamic address or vice versa) are +allowed. The BSD sockets define separate API calls for getting/setting the +local & remote address and are applicable for J1939 sockets. + +Filtering +--------- + +J1939 defines white list filters per socket that a user can set in order to +receive a subset of the J1939 traffic. Filtering can be based on: + +* SA +* SOURCE_NAME +* PGN + +When multiple filters are in place for a single socket, and a packet comes in +that matches several of those filters, the packet is only received once for +that socket. + +How to Use J1939 +================ + +API Calls +--------- + +On CAN, you first need to open a socket for communicating over a CAN network. +To use J1939, #include . From there, will be +included too. To open a socket, use: + +.. code-block:: C + + s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939); + +J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are +mentioned in the context of transport protocol sessions. These still deliver +packets to the other end (using several CAN packets). SOCK_STREAM is not +supported. + +After the successful creation of the socket, you would normally use the bind(2) +and/or connect(2) system call to bind the socket to a CAN interface. After +binding and/or connecting the socket, you can read(2) and write(2) from/to the +socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart +operations on the socket as usual. There are also J1939 specific socket options +described below. + +In order to send data, a bind(2) must have been successful. bind(2) assigns a +local address to a socket. + +Different from CAN is that the payload data is just the data that get send, +without it's header info. The header info is derived from the sockaddr supplied +to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will +result in a packet with 4 bytes. + +The sockaddr structure has extensions for use with J1939 as specified below: + +.. code-block:: C + + struct sockaddr_can { + sa_family_t can_family; + int can_ifindex; + union { + struct { + __u64 name; + /* pgn: + * 8 bit: PS in PDU2 case, else 0 + * 8 bit: PF + * 1 bit: DP + * 1 bit: reserved + */ + __u32 pgn; + __u8 addr; + } j1939; + } can_addr; + } + +can_family & can_ifindex serve the same purpose as for other SocketCAN sockets. + +can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are +specified above. + +can_addr.j1939.name contains the 64-bit J1939 NAME. + +can_addr.j1939.addr contains the address. + +The bind(2) system call assigns the local address, i.e. the source address when +sending packages. If a PGN during bind(2) is set, it's used as a RX filter. +I.e. only packets with a matching PGN are received. If an ADDR or NAME is set +it is used as a receive filter, too. It will match the destination NAME or ADDR +of the incoming packet. The NAME filter will work only if appropriate Address +Claiming for this name was done on the CAN bus and registered/cached by the +kernel. + +On the other hand connect(2) assigns the remote address, i.e. the destination +address. The PGN from connect(2) is used as the default PGN when sending +packets. If ADDR or NAME is set it will be used as the default destination ADDR +or NAME. Further a set ADDR or NAME during connect(2) is used as a receive +filter. It will match the source NAME or ADDR of the incoming packet. + +Both write(2) and send(2) will send a packet with local address from bind(2) and +the remote address from connect(2). Use sendto(2) to overwrite the destination +address. + +If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and +the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0), +can_addr.j1939.addr is used. + +When creating a socket, reasonable defaults are set. Some options can be +modified with setsockopt(2) & getsockopt(2). + +RX path related options: + +- SO_J1939_FILTER - configure array of filters +- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2) + +By default no broadcast packets can be send or received. To enable sending or +receiving broadcast packets use the socket option SO_BROADCAST: + +.. code-block:: C + + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + +The following diagram illustrates the RX path: + +.. code:: + + +--------------------+ + | incoming packet | + +--------------------+ + | + V + +--------------------+ + | SO_J1939_PROMISC? | + +--------------------+ + | | + no | | yes + | | + .---------' `---------. + | | + +---------------------------+ | + | bind() + connect() + | | + | SOCK_BROADCAST filter | | + +---------------------------+ | + | | + |<---------------------' + V + +---------------------------+ + | SO_J1939_FILTER | + +---------------------------+ + | + V + +---------------------------+ + | socket recv() | + +---------------------------+ + +TX path related options: +SO_J1939_SEND_PRIO - change default send priority for the socket + +Message Flags during send() and Related System Calls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently +supported flags are: + +* MSG_DONTWAIT, i.e. non-blocking operation. + +recvmsg(2) +^^^^^^^^^ + +In most cases recvmsg(2) is needed if you want to extract more information than +recvfrom(2) can provide. For example package priority and timestamp. The +Destination Address, name and packet priority (if applicable) are attached to +the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros, +with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR, +SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for +priority and dst_addr, and uint64_t for dst_name. + +.. code-block:: C + + uint8_t priority, dst_addr; + uint64_t dst_name; + + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + switch (cmsg->cmsg_level) { + case SOL_CAN_J1939: + if (cmsg->cmsg_type == SCM_J1939_DEST_ADDR) + dst_addr = *CMSG_DATA(cmsg); + else if (cmsg->cmsg_type == SCM_J1939_DEST_NAME) + memcpy(&dst_name, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0)); + else if (cmsg->cmsg_type == SCM_J1939_PRIO) + priority = *CMSG_DATA(cmsg); + break; + } + } + +Dynamic Addressing +------------------ + +Distinction has to be made between using the claimed address and doing an +address claim. To use an already claimed address, one has to fill in the +j1939.name member and provide it to bind(2). If the name had claimed an address +earlier, all further messages being sent will use that address. And the +j1939.addr member will be ignored. + +An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim +Address" message and the kernel will use the j1939.addr member for that PGN if +necessary. + +To claim an address following code example can be used: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = name, + .addr = J1939_IDLE_ADDR, + .pgn = J1939_NO_PGN, /* to disable bind() rx filter for PGN */ + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + + /* for Address Claiming broadcast must be allowed */ + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + + /* configured advanced RX filter with PGN needed for Address Claiming */ + const struct j1939_filter filt[] = { + { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_REQUEST, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_COMMANDED, + .pgn_mask = J1939_PGN_MAX, + }, + }; + + setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt)); + + uint64_t dat = htole64(name); + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .addr = J1939_NO_ADDR, + }, + }; + + /* Afterwards do a sendto(2) with data set to the NAME (Little Endian). If the + * NAME provided, does not match the j1939.name provided to bind(2), EPROTO + * will be returned. + */ + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); + +If no-one else contests the address claim within 250ms after transmission, the +kernel marks the NAME-SA assignment as valid. The valid assignment will be kept +among other valid NAME-SA assignments. From that point, any socket bound to the +NAME can send packets. + +If another ECU claims the address, the kernel will mark the NAME-SA expired. +No socket bound to the NAME can send packets (other than address claims). To +claim another address, some socket bound to NAME, must bind(2) again, but with +only j1939.addr changed to the new SA, and must then send a valid address claim +packet. This restarts the state machine in the kernel (and any other +participant on the bus) for this NAME. + +can-utils also include the jacd tool, so it can be used as code example or as +default Address Claiming daemon. + +Send Examples +------------- + +Static Addressing +^^^^^^^^^^^^^^^^^ + +This example will send a PGN (0x12300) from SA 0x20 to DA 0x30. + +Bind: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME, + .addr = 0x20, + .pgn = J1939_NO_PGN, + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + +Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called, +at this point we can use only sendto(2) or sendmsg(2). + +Send: + +.. code-block:: C + + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME; + .pgn = 0x30, + .addr = 0x12300, + }, + }; + + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); diff --git a/MAINTAINERS b/MAINTAINERS index a081c477d1d1..844f416437c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3669,6 +3669,16 @@ F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h F: include/uapi/linux/can/gw.h +CAN-J1939 NETWORK LAYER +M: Robin van der Gracht +M: Oleksij Rempel +R: Pengutronix Kernel Team +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/networking/j1939.txt +F: net/can/j1939/ +F: include/uapi/linux/can/j1939.h + CAPABILITIES M: Serge Hallyn L: linux-security-module@vger.kernel.org diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 79ccf6bfa232..2f5d731ae251 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -60,6 +60,9 @@ struct can_dev_rcv_lists { struct can_ml_priv { struct can_dev_rcv_lists dev_rcv_lists; +#ifdef CAN_J1939 + struct j1939_priv *j1939_priv; +#endif }; #endif /* CAN_ML_H */ diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h new file mode 100644 index 000000000000..c32325342d30 --- /dev/null +++ b/include/uapi/linux/can/j1939.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * j1939.h + * + * Copyright (c) 2010-2011 EIA Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UAPI_CAN_J1939_H_ +#define _UAPI_CAN_J1939_H_ + +#include +#include +#include + +#define J1939_MAX_UNICAST_ADDR 0xfd +#define J1939_IDLE_ADDR 0xfe +#define J1939_NO_ADDR 0xff /* == broadcast or no addr */ +#define J1939_NO_NAME 0 +#define J1939_PGN_REQUEST 0x0ea00 /* Request PG */ +#define J1939_PGN_ADDRESS_CLAIMED 0x0ee00 /* Address Claimed */ +#define J1939_PGN_ADDRESS_COMMANDED 0x0fed8 /* Commanded Address */ +#define J1939_PGN_PDU1_MAX 0x3ff00 +#define J1939_PGN_MAX 0x3ffff +#define J1939_NO_PGN 0x40000 + +/* J1939 Parameter Group Number + * + * bit 0-7 : PDU Specific (PS) + * bit 8-15 : PDU Format (PF) + * bit 16 : Data Page (DP) + * bit 17 : Reserved (R) + * bit 19-31 : set to zero + */ +typedef __u32 pgn_t; + +/* J1939 Priority + * + * bit 0-2 : Priority (P) + * bit 3-7 : set to zero + */ +typedef __u8 priority_t; + +/* J1939 NAME + * + * bit 0-20 : Identity Number + * bit 21-31 : Manufacturer Code + * bit 32-34 : ECU Instance + * bit 35-39 : Function Instance + * bit 40-47 : Function + * bit 48 : Reserved + * bit 49-55 : Vehicle System + * bit 56-59 : Vehicle System Instance + * bit 60-62 : Industry Group + * bit 63 : Arbitrary Address Capable + */ +typedef __u64 name_t; + +/* J1939 socket options */ +#define SOL_CAN_J1939 (SOL_CAN_BASE + CAN_J1939) +enum { + SO_J1939_FILTER = 1, /* set filters */ + SO_J1939_PROMISC = 2, /* set/clr promiscuous mode */ + SO_J1939_SEND_PRIO = 3, + SO_J1939_ERRQUEUE = 4, +}; + +enum { + SCM_J1939_DEST_ADDR = 1, + SCM_J1939_DEST_NAME = 2, + SCM_J1939_PRIO = 3, + SCM_J1939_ERRQUEUE = 4, +}; + +enum { + J1939_NLA_PAD, + J1939_NLA_BYTES_ACKED, +}; + +enum { + J1939_EE_INFO_NONE, + J1939_EE_INFO_TX_ABORT, +}; + +struct j1939_filter { + name_t name; + name_t name_mask; + pgn_t pgn; + pgn_t pgn_mask; + __u8 addr; + __u8 addr_mask; +}; + +#define J1939_FILTER_MAX 512 /* maximum number of j1939_filter set via setsockopt() */ + +#endif /* !_UAPI_CAN_J1939_H_ */ diff --git a/net/can/Kconfig b/net/can/Kconfig index d4319aa3e1b1..d77042752457 100644 --- a/net/can/Kconfig +++ b/net/can/Kconfig @@ -53,6 +53,8 @@ config CAN_GW They can be modified with AND/OR/XOR/SET operations as configured by the netlink configuration interface known e.g. from iptables. +source "net/can/j1939/Kconfig" + source "drivers/net/can/Kconfig" endif diff --git a/net/can/Makefile b/net/can/Makefile index 1242bbbfe57f..08bd217fc051 100644 --- a/net/can/Makefile +++ b/net/can/Makefile @@ -15,3 +15,5 @@ can-bcm-y := bcm.o obj-$(CONFIG_CAN_GW) += can-gw.o can-gw-y := gw.o + +obj-$(CONFIG_CAN_J1939) += j1939/ diff --git a/net/can/j1939/Kconfig b/net/can/j1939/Kconfig new file mode 100644 index 000000000000..2998298b71ec --- /dev/null +++ b/net/can/j1939/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# SAE J1939 network layer core configuration +# + +config CAN_J1939 + tristate "SAE J1939" + depends on CAN + help + SAE J1939 + Say Y to have in-kernel support for j1939 socket type. This + allows communication according to SAE j1939. + The relevant parts in kernel are + SAE j1939-21 (datalink & transport protocol) + & SAE j1939-81 (network management). diff --git a/net/can/j1939/Makefile b/net/can/j1939/Makefile new file mode 100644 index 000000000000..19181bdae173 --- /dev/null +++ b/net/can/j1939/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_J1939) += can-j1939.o + +can-j1939-objs := \ + address-claim.o \ + bus.o \ + main.o \ + socket.o \ + transport.o diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c new file mode 100644 index 000000000000..f33c47327927 --- /dev/null +++ b/net/can/j1939/address-claim.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* J1939 Address Claiming. + * Address Claiming in the kernel + * - keeps track of the AC states of ECU's, + * - resolves NAME<=>SA taking into account the AC states of ECU's. + * + * All Address Claim msgs (including host-originated msg) are processed + * at the receive path (a sent msg is always received again via CAN echo). + * As such, the processing of AC msgs is done in the order on which msgs + * are sent on the bus. + * + * This module doesn't send msgs itself (e.g. replies on Address Claims), + * this is the responsibility of a user space application or daemon. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "j1939-priv.h" + +static inline name_t j1939_skb_to_name(const struct sk_buff *skb) +{ + return le64_to_cpup((__le64 *)skb->data); +} + +static inline bool j1939_ac_msg_is_request(struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int req_pgn; + + if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST) + return false; + + req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16); + + return req_pgn == J1939_PGN_ADDRESS_CLAIMED; +} + +static int j1939_ac_verify_outgoing(struct j1939_priv *priv, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + + if (skb->len != 8) { + netdev_notice(priv->ndev, "tx address claim with dlc %i\n", + skb->len); + return -EPROTO; + } + + if (skcb->addr.src_name != j1939_skb_to_name(skb)) { + netdev_notice(priv->ndev, "tx address claim with different name\n"); + return -EPROTO; + } + + if (skcb->addr.sa == J1939_NO_ADDR) { + netdev_notice(priv->ndev, "tx address claim with broadcast sa\n"); + return -EPROTO; + } + + /* ac must always be a broadcast */ + if (skcb->addr.dst_name || skcb->addr.da != J1939_NO_ADDR) { + netdev_notice(priv->ndev, "tx address claim with dest, not broadcast\n"); + return -EPROTO; + } + return 0; +} + +int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int ret; + u8 addr; + + /* network mgmt: address claiming msgs */ + if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) { + struct j1939_ecu *ecu; + + ret = j1939_ac_verify_outgoing(priv, skb); + /* return both when failure & when successful */ + if (ret < 0) + return ret; + ecu = j1939_ecu_get_by_name(priv, skcb->addr.src_name); + if (!ecu) + return -ENODEV; + + if (ecu->addr != skcb->addr.sa) + /* hold further traffic for ecu, remove from parent */ + j1939_ecu_unmap(ecu); + j1939_ecu_put(ecu); + } else if (skcb->addr.src_name) { + /* assign source address */ + addr = j1939_name_to_addr(priv, skcb->addr.src_name); + if (!j1939_address_is_unicast(addr) && + !j1939_ac_msg_is_request(skb)) { + netdev_notice(priv->ndev, "tx drop: invalid sa for name 0x%016llx\n", + skcb->addr.src_name); + return -EADDRNOTAVAIL; + } + skcb->addr.sa = addr; + } + + /* assign destination address */ + if (skcb->addr.dst_name) { + addr = j1939_name_to_addr(priv, skcb->addr.dst_name); + if (!j1939_address_is_unicast(addr)) { + netdev_notice(priv->ndev, "tx drop: invalid da for name 0x%016llx\n", + skcb->addr.dst_name); + return -EADDRNOTAVAIL; + } + skcb->addr.da = addr; + } + return 0; +} + +static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_ecu *ecu, *prev; + name_t name; + + if (skb->len != 8) { + netdev_notice(priv->ndev, "rx address claim with wrong dlc %i\n", + skb->len); + return; + } + + name = j1939_skb_to_name(skb); + skcb->addr.src_name = name; + if (!name) { + netdev_notice(priv->ndev, "rx address claim without name\n"); + return; + } + + if (!j1939_address_is_valid(skcb->addr.sa)) { + netdev_notice(priv->ndev, "rx address claim with broadcast sa\n"); + return; + } + + write_lock_bh(&priv->lock); + + /* Few words on the ECU ref counting: + * + * First we get an ECU handle, either with + * j1939_ecu_get_by_name_locked() (increments the ref counter) + * or j1939_ecu_create_locked() (initializes an ECU object + * with a ref counter of 1). + * + * j1939_ecu_unmap_locked() will decrement the ref counter, + * but only if the ECU was mapped before. So "ecu" still + * belongs to us. + * + * j1939_ecu_timer_start() will increment the ref counter + * before it starts the timer, so we can put the ecu when + * leaving this function. + */ + ecu = j1939_ecu_get_by_name_locked(priv, name); + if (!ecu && j1939_address_is_unicast(skcb->addr.sa)) + ecu = j1939_ecu_create_locked(priv, name); + + if (IS_ERR_OR_NULL(ecu)) + goto out_unlock_bh; + + /* cancel pending (previous) address claim */ + j1939_ecu_timer_cancel(ecu); + + if (j1939_address_is_idle(skcb->addr.sa)) { + j1939_ecu_unmap_locked(ecu); + goto out_ecu_put; + } + + /* save new addr */ + if (ecu->addr != skcb->addr.sa) + j1939_ecu_unmap_locked(ecu); + ecu->addr = skcb->addr.sa; + + prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa); + if (prev) { + if (ecu->name > prev->name) { + j1939_ecu_unmap_locked(ecu); + j1939_ecu_put(prev); + goto out_ecu_put; + } else { + /* kick prev if less or equal */ + j1939_ecu_unmap_locked(prev); + j1939_ecu_put(prev); + } + } + + j1939_ecu_timer_start(ecu); + out_ecu_put: + j1939_ecu_put(ecu); + out_unlock_bh: + write_unlock_bh(&priv->lock); +} + +void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_ecu *ecu; + + /* network mgmt */ + if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) { + j1939_ac_process(priv, skb); + } else if (j1939_address_is_unicast(skcb->addr.sa)) { + /* assign source name */ + ecu = j1939_ecu_get_by_addr(priv, skcb->addr.sa); + if (ecu) { + skcb->addr.src_name = ecu->name; + j1939_ecu_put(ecu); + } + } + + /* assign destination name */ + ecu = j1939_ecu_get_by_addr(priv, skcb->addr.da); + if (ecu) { + skcb->addr.dst_name = ecu->name; + j1939_ecu_put(ecu); + } +} diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c new file mode 100644 index 000000000000..486687901602 --- /dev/null +++ b/net/can/j1939/bus.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* bus for j1939 remote devices + * Since rtnetlink, no real bus is used. + */ + +#include + +#include "j1939-priv.h" + +static void __j1939_ecu_release(struct kref *kref) +{ + struct j1939_ecu *ecu = container_of(kref, struct j1939_ecu, kref); + struct j1939_priv *priv = ecu->priv; + + list_del(&ecu->list); + kfree(ecu); + j1939_priv_put(priv); +} + +void j1939_ecu_put(struct j1939_ecu *ecu) +{ + kref_put(&ecu->kref, __j1939_ecu_release); +} + +static void j1939_ecu_get(struct j1939_ecu *ecu) +{ + kref_get(&ecu->kref); +} + +static bool j1939_ecu_is_mapped_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + + lockdep_assert_held(&priv->lock); + + return j1939_ecu_find_by_addr_locked(priv, ecu->addr) == ecu; +} + +/* ECU device interface */ +/* map ECU to a bus address space */ +static void j1939_ecu_map_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + struct j1939_addr_ent *ent; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(ecu->addr)) + return; + + ent = &priv->ents[ecu->addr]; + + if (ent->ecu) { + netdev_warn(priv->ndev, "Trying to map already mapped ECU, addr: 0x%02x, name: 0x%016llx. Skip it.\n", + ecu->addr, ecu->name); + return; + } + + j1939_ecu_get(ecu); + ent->ecu = ecu; + ent->nusers += ecu->nusers; +} + +/* unmap ECU from a bus address space */ +void j1939_ecu_unmap_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + struct j1939_addr_ent *ent; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(ecu->addr)) + return; + + if (!j1939_ecu_is_mapped_locked(ecu)) + return; + + ent = &priv->ents[ecu->addr]; + ent->ecu = NULL; + ent->nusers -= ecu->nusers; + j1939_ecu_put(ecu); +} + +void j1939_ecu_unmap(struct j1939_ecu *ecu) +{ + write_lock_bh(&ecu->priv->lock); + j1939_ecu_unmap_locked(ecu); + write_unlock_bh(&ecu->priv->lock); +} + +void j1939_ecu_unmap_all(struct j1939_priv *priv) +{ + int i; + + write_lock_bh(&priv->lock); + for (i = 0; i < ARRAY_SIZE(priv->ents); i++) + if (priv->ents[i].ecu) + j1939_ecu_unmap_locked(priv->ents[i].ecu); + write_unlock_bh(&priv->lock); +} + +void j1939_ecu_timer_start(struct j1939_ecu *ecu) +{ + /* The ECU is held here and released in the + * j1939_ecu_timer_handler() or j1939_ecu_timer_cancel(). + */ + j1939_ecu_get(ecu); + + /* Schedule timer in 250 msec to commit address change. */ + hrtimer_start(&ecu->ac_timer, ms_to_ktime(250), + HRTIMER_MODE_REL_SOFT); +} + +void j1939_ecu_timer_cancel(struct j1939_ecu *ecu) +{ + if (hrtimer_cancel(&ecu->ac_timer)) + j1939_ecu_put(ecu); +} + +static enum hrtimer_restart j1939_ecu_timer_handler(struct hrtimer *hrtimer) +{ + struct j1939_ecu *ecu = + container_of(hrtimer, struct j1939_ecu, ac_timer); + struct j1939_priv *priv = ecu->priv; + + write_lock_bh(&priv->lock); + /* TODO: can we test if ecu->addr is unicast before starting + * the timer? + */ + j1939_ecu_map_locked(ecu); + + /* The corresponding j1939_ecu_get() is in + * j1939_ecu_timer_start(). + */ + j1939_ecu_put(ecu); + write_unlock_bh(&priv->lock); + + return HRTIMER_NORESTART; +} + +struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + ecu = kzalloc(sizeof(*ecu), gfp_any()); + if (!ecu) + return ERR_PTR(-ENOMEM); + kref_init(&ecu->kref); + ecu->addr = J1939_IDLE_ADDR; + ecu->name = name; + + hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); + ecu->ac_timer.function = j1939_ecu_timer_handler; + INIT_LIST_HEAD(&ecu->list); + + j1939_priv_get(priv); + ecu->priv = priv; + list_add_tail(&ecu->list, &priv->ecus); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv, + u8 addr) +{ + lockdep_assert_held(&priv->lock); + + return priv->ents[addr].ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, u8 addr) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(addr)) + return NULL; + + ecu = j1939_ecu_find_by_addr_locked(priv, addr); + if (ecu) + j1939_ecu_get(ecu); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr) +{ + struct j1939_ecu *ecu; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_get_by_addr_locked(priv, addr); + read_unlock_bh(&priv->lock); + + return ecu; +} + +/* get pointer to ecu without increasing ref counter */ +static struct j1939_ecu *j1939_ecu_find_by_name_locked(struct j1939_priv *priv, + name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + list_for_each_entry(ecu, &priv->ecus, list) { + if (ecu->name == name) + return ecu; + } + + return NULL; +} + +struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv, + name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + if (!name) + return NULL; + + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (ecu) + j1939_ecu_get(ecu); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_get_by_name_locked(priv, name); + read_unlock_bh(&priv->lock); + + return ecu; +} + +u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + int addr = J1939_IDLE_ADDR; + + if (!name) + return J1939_NO_ADDR; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (ecu && j1939_ecu_is_mapped_locked(ecu)) + /* ecu's SA is registered */ + addr = ecu->addr; + + read_unlock_bh(&priv->lock); + + return addr; +} + +/* TX addr/name accounting + * Transport protocol needs to know if a SA is local or not + * These functions originate from userspace manipulating sockets, + * so locking is straigforward + */ + +int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa) +{ + struct j1939_ecu *ecu; + int err = 0; + + write_lock_bh(&priv->lock); + + if (j1939_address_is_unicast(sa)) + priv->ents[sa].nusers++; + + if (!name) + goto done; + + ecu = j1939_ecu_get_by_name_locked(priv, name); + if (!ecu) + ecu = j1939_ecu_create_locked(priv, name); + err = PTR_ERR_OR_ZERO(ecu); + if (err) + goto done; + + ecu->nusers++; + /* TODO: do we care if ecu->addr != sa? */ + if (j1939_ecu_is_mapped_locked(ecu)) + /* ecu's sa is active already */ + priv->ents[ecu->addr].nusers++; + + done: + write_unlock_bh(&priv->lock); + + return err; +} + +void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa) +{ + struct j1939_ecu *ecu; + + write_lock_bh(&priv->lock); + + if (j1939_address_is_unicast(sa)) + priv->ents[sa].nusers--; + + if (!name) + goto done; + + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (WARN_ON_ONCE(!ecu)) + goto done; + + ecu->nusers--; + /* TODO: do we care if ecu->addr != sa? */ + if (j1939_ecu_is_mapped_locked(ecu)) + /* ecu's sa is active already */ + priv->ents[ecu->addr].nusers--; + j1939_ecu_put(ecu); + + done: + write_unlock_bh(&priv->lock); +} diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h new file mode 100644 index 000000000000..12369b604ce9 --- /dev/null +++ b/net/can/j1939/j1939-priv.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#ifndef _J1939_PRIV_H_ +#define _J1939_PRIV_H_ + +#include +#include + +/* Timeout to receive the abort signal over loop back. In case CAN + * bus is open, the timeout should be triggered. + */ +#define J1939_XTP_ABORT_TIMEOUT_MS 500 +#define J1939_SIMPLE_ECHO_TIMEOUT_MS (10 * 1000) + +struct j1939_session; +enum j1939_sk_errqueue_type { + J1939_ERRQUEUE_ACK, + J1939_ERRQUEUE_SCHED, + J1939_ERRQUEUE_ABORT, +}; + +/* j1939 devices */ +struct j1939_ecu { + struct list_head list; + name_t name; + u8 addr; + + /* indicates that this ecu successfully claimed @sa as its address */ + struct hrtimer ac_timer; + struct kref kref; + struct j1939_priv *priv; + + /* count users, to help transport protocol decide for interaction */ + int nusers; +}; + +struct j1939_priv { + struct list_head ecus; + /* local list entry in priv + * These allow irq (& softirq) context lookups on j1939 devices + * This approach (separate lists) is done as the other 2 alternatives + * are not easier or even wrong + * 1) using the pure kobject methods involves mutexes, which are not + * allowed in irq context. + * 2) duplicating data structures would require a lot of synchronization + * code + * usage: + */ + + /* segments need a lock to protect the above list */ + rwlock_t lock; + + struct net_device *ndev; + + /* list of 256 ecu ptrs, that cache the claimed addresses. + * also protected by the above lock + */ + struct j1939_addr_ent { + struct j1939_ecu *ecu; + /* count users, to help transport protocol */ + int nusers; + } ents[256]; + + struct kref kref; + + /* List of active sessions to prevent start of conflicting + * one. + * + * Do not start two sessions of same type, addresses and + * direction. + */ + struct list_head active_session_list; + + /* protects active_session_list */ + spinlock_t active_session_list_lock; + + unsigned int tp_max_packet_size; + + /* lock for j1939_socks list */ + spinlock_t j1939_socks_lock; + struct list_head j1939_socks; + + struct kref rx_kref; +}; + +void j1939_ecu_put(struct j1939_ecu *ecu); + +/* keep the cache of what is local */ +int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa); +void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa); + +static inline bool j1939_address_is_unicast(u8 addr) +{ + return addr <= J1939_MAX_UNICAST_ADDR; +} + +static inline bool j1939_address_is_idle(u8 addr) +{ + return addr == J1939_IDLE_ADDR; +} + +static inline bool j1939_address_is_valid(u8 addr) +{ + return addr != J1939_NO_ADDR; +} + +static inline bool j1939_pgn_is_pdu1(pgn_t pgn) +{ + /* ignore dp & res bits for this */ + return (pgn & 0xff00) < 0xf000; +} + +/* utility to correctly unmap an ECU */ +void j1939_ecu_unmap_locked(struct j1939_ecu *ecu); +void j1939_ecu_unmap(struct j1939_ecu *ecu); + +u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name); +struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv, + u8 addr); +struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr); +struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, + u8 addr); +struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name); +struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv, + name_t name); + +enum j1939_transfer_type { + J1939_TP, + J1939_ETP, + J1939_SIMPLE, +}; + +struct j1939_addr { + name_t src_name; + name_t dst_name; + pgn_t pgn; + + u8 sa; + u8 da; + + u8 type; +}; + +/* control buffer of the sk_buff */ +struct j1939_sk_buff_cb { + /* Offset in bytes within one ETP session */ + u32 offset; + + /* for tx, MSG_SYN will be used to sync on sockets */ + u32 msg_flags; + u32 tskey; + + struct j1939_addr addr; + + /* Flags for quick lookups during skb processing. + * These are set in the receive path only. + */ +#define J1939_ECU_LOCAL_SRC BIT(0) +#define J1939_ECU_LOCAL_DST BIT(1) + u8 flags; + + priority_t priority; +}; + +static inline +struct j1939_sk_buff_cb *j1939_skb_to_cb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb)); + + return (struct j1939_sk_buff_cb *)skb->cb; +} + +int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb); +bool j1939_sk_recv_match(struct j1939_priv *priv, + struct j1939_sk_buff_cb *skcb); +void j1939_sk_send_loop_abort(struct sock *sk, int err); +void j1939_sk_errqueue(struct j1939_session *session, + enum j1939_sk_errqueue_type type); +void j1939_sk_queue_activate_next(struct j1939_session *session); + +/* stack entries */ +struct j1939_session *j1939_tp_send(struct j1939_priv *priv, + struct sk_buff *skb, size_t size); +int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb); +int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb); + +/* network management */ +struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name); + +void j1939_ecu_timer_start(struct j1939_ecu *ecu); +void j1939_ecu_timer_cancel(struct j1939_ecu *ecu); +void j1939_ecu_unmap_all(struct j1939_priv *priv); + +struct j1939_priv *j1939_netdev_start(struct net_device *ndev); +void j1939_netdev_stop(struct j1939_priv *priv); + +void j1939_priv_put(struct j1939_priv *priv); +void j1939_priv_get(struct j1939_priv *priv); + +/* notify/alert all j1939 sockets bound to ifindex */ +void j1939_sk_netdev_event_netdown(struct j1939_priv *priv); +int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk); +void j1939_tp_init(struct j1939_priv *priv); + +/* decrement pending skb for a j1939 socket */ +void j1939_sock_pending_del(struct sock *sk); + +enum j1939_session_state { + J1939_SESSION_NEW, + J1939_SESSION_ACTIVE, + /* waiting for abort signal on the bus */ + J1939_SESSION_WAITING_ABORT, + J1939_SESSION_ACTIVE_MAX, + J1939_SESSION_DONE, +}; + +struct j1939_session { + struct j1939_priv *priv; + struct list_head active_session_list_entry; + struct list_head sk_session_queue_entry; + struct kref kref; + struct sock *sk; + + /* ifindex, src, dst, pgn define the session block + * the are _never_ modified after insertion in the list + * this decreases locking problems a _lot_ + */ + struct j1939_sk_buff_cb skcb; + struct sk_buff_head skb_queue; + + /* all tx related stuff (last_txcmd, pkt.tx) + * is protected (modified only) with the txtimer hrtimer + * 'total' & 'block' are never changed, + * last_cmd, last & block are protected by ->lock + * this means that the tx may run after cts is received that should + * have stopped tx, but this time discrepancy is never avoided anyhow + */ + u8 last_cmd, last_txcmd; + bool transmission; + bool extd; + /* Total message size, number of bytes */ + unsigned int total_message_size; + /* Total number of bytes queue from socket to the session */ + unsigned int total_queued_size; + unsigned int tx_retry; + + int err; + u32 tskey; + enum j1939_session_state state; + + /* Packets counters for a (extended) transfer session. The packet is + * maximal of 7 bytes. + */ + struct { + /* total - total number of packets for this session */ + unsigned int total; + /* last - last packet of a transfer block after which + * responder should send ETP.CM_CTS and originator + * ETP.CM_DPO + */ + unsigned int last; + /* tx - number of packets send by originator node. + * this counter can be set back if responder node + * didn't received all packets send by originator. + */ + unsigned int tx; + unsigned int tx_acked; + /* rx - number of packets received */ + unsigned int rx; + /* block - amount of packets expected in one block */ + unsigned int block; + /* dpo - ETP.CM_DPO, Data Packet Offset */ + unsigned int dpo; + } pkt; + struct hrtimer txtimer, rxtimer; +}; + +struct j1939_sock { + struct sock sk; /* must be first to skip with memset */ + struct j1939_priv *priv; + struct list_head list; + +#define J1939_SOCK_BOUND BIT(0) +#define J1939_SOCK_CONNECTED BIT(1) +#define J1939_SOCK_PROMISC BIT(2) +#define J1939_SOCK_ERRQUEUE BIT(3) + int state; + + int ifindex; + struct j1939_addr addr; + struct j1939_filter *filters; + int nfilters; + pgn_t pgn_rx_filter; + + /* j1939 may emit equal PGN (!= equal CAN-id's) out of order + * when transport protocol comes in. + * To allow emitting in order, keep a 'pending' nr. of packets + */ + atomic_t skb_pending; + wait_queue_head_t waitq; + + /* lock for the sk_session_queue list */ + spinlock_t sk_session_queue_lock; + struct list_head sk_session_queue; +}; + +static inline struct j1939_sock *j1939_sk(const struct sock *sk) +{ + return container_of(sk, struct j1939_sock, sk); +} + +void j1939_session_get(struct j1939_session *session); +void j1939_session_put(struct j1939_session *session); +void j1939_session_skb_queue(struct j1939_session *session, + struct sk_buff *skb); +int j1939_session_activate(struct j1939_session *session); +void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec); +void j1939_session_timers_cancel(struct j1939_session *session); + +#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff) +#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff) + +#define J1939_REGULAR 0 +#define J1939_EXTENDED 1 + +/* CAN protocol */ +extern const struct can_proto j1939_can_proto; + +#endif /* _J1939_PRIV_H_ */ diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c new file mode 100644 index 000000000000..def2f813ffce --- /dev/null +++ b/net/can/j1939/main.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* Core of can-j1939 that links j1939 to CAN. */ + +#include +#include +#include +#include +#include + +#include "j1939-priv.h" + +MODULE_DESCRIPTION("PF_CAN SAE J1939"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("EIA Electronics (Kurt Van Dijck & Pieter Beyens)"); +MODULE_ALIAS("can-proto-" __stringify(CAN_J1939)); + +/* LOWLEVEL CAN interface */ + +/* CAN_HDR: #bytes before can_frame data part */ +#define J1939_CAN_HDR (offsetof(struct can_frame, data)) + +/* CAN_FTR: #bytes beyond data part */ +#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \ + sizeof(((struct can_frame *)0)->data)) + +/* lowest layer */ +static void j1939_can_recv(struct sk_buff *iskb, void *data) +{ + struct j1939_priv *priv = data; + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb, *iskcb; + struct can_frame *cf; + + /* create a copy of the skb + * j1939 only delivers the real data bytes, + * the header goes into sockaddr. + * j1939 may not touch the incoming skb in such way + */ + skb = skb_clone(iskb, GFP_ATOMIC); + if (!skb) + return; + + can_skb_set_owner(skb, iskb->sk); + + /* get a pointer to the header of the skb + * the skb payload (pointer) is moved, so that the next skb_data + * returns the actual payload + */ + cf = (void *)skb->data; + skb_pull(skb, J1939_CAN_HDR); + + /* fix length, set to dlc, with 8 maximum */ + skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8)); + + /* set addr */ + skcb = j1939_skb_to_cb(skb); + memset(skcb, 0, sizeof(*skcb)); + + iskcb = j1939_skb_to_cb(iskb); + skcb->tskey = iskcb->tskey; + skcb->priority = (cf->can_id >> 26) & 0x7; + skcb->addr.sa = cf->can_id; + skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX; + /* set default message type */ + skcb->addr.type = J1939_TP; + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) { + /* Type 1: with destination address */ + skcb->addr.da = skcb->addr.pgn; + /* normalize pgn: strip dst address */ + skcb->addr.pgn &= 0x3ff00; + } else { + /* set broadcast address */ + skcb->addr.da = J1939_NO_ADDR; + } + + /* update localflags */ + read_lock_bh(&priv->lock); + if (j1939_address_is_unicast(skcb->addr.sa) && + priv->ents[skcb->addr.sa].nusers) + skcb->flags |= J1939_ECU_LOCAL_SRC; + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + read_unlock_bh(&priv->lock); + + /* deliver into the j1939 stack ... */ + j1939_ac_recv(priv, skb); + + if (j1939_tp_recv(priv, skb)) + /* this means the transport layer processed the message */ + goto done; + + j1939_simple_recv(priv, skb); + j1939_sk_recv(priv, skb); + done: + kfree_skb(skb); +} + +/* NETDEV MANAGEMENT */ + +/* values for can_rx_(un)register */ +#define J1939_CAN_ID CAN_EFF_FLAG +#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG) + +static DEFINE_SPINLOCK(j1939_netdev_lock); + +static struct j1939_priv *j1939_priv_create(struct net_device *ndev) +{ + struct j1939_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + rwlock_init(&priv->lock); + INIT_LIST_HEAD(&priv->ecus); + priv->ndev = ndev; + kref_init(&priv->kref); + kref_init(&priv->rx_kref); + dev_hold(ndev); + + netdev_dbg(priv->ndev, "%s : 0x%p\n", __func__, priv); + + return priv; +} + +static inline void j1939_priv_set(struct net_device *ndev, + struct j1939_priv *priv) +{ + struct can_ml_priv *can_ml_priv = ndev->ml_priv; + + can_ml_priv->j1939_priv = priv; +} + +static void __j1939_priv_release(struct kref *kref) +{ + struct j1939_priv *priv = container_of(kref, struct j1939_priv, kref); + struct net_device *ndev = priv->ndev; + + netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv); + + dev_put(ndev); + kfree(priv); +} + +void j1939_priv_put(struct j1939_priv *priv) +{ + kref_put(&priv->kref, __j1939_priv_release); +} + +void j1939_priv_get(struct j1939_priv *priv) +{ + kref_get(&priv->kref); +} + +static int j1939_can_rx_register(struct j1939_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int ret; + + j1939_priv_get(priv); + ret = can_rx_register(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK, + j1939_can_recv, priv, "j1939", NULL); + if (ret < 0) { + j1939_priv_put(priv); + return ret; + } + + return 0; +} + +static void j1939_can_rx_unregister(struct j1939_priv *priv) +{ + struct net_device *ndev = priv->ndev; + + can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK, + j1939_can_recv, priv); + + j1939_priv_put(priv); +} + +static void __j1939_rx_release(struct kref *kref) + __releases(&j1939_netdev_lock) +{ + struct j1939_priv *priv = container_of(kref, struct j1939_priv, + rx_kref); + + j1939_can_rx_unregister(priv); + j1939_ecu_unmap_all(priv); + j1939_priv_set(priv->ndev, NULL); + spin_unlock(&j1939_netdev_lock); +} + +/* get pointer to priv without increasing ref counter */ +static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev) +{ + struct can_ml_priv *can_ml_priv = ndev->ml_priv; + + return can_ml_priv->j1939_priv; +} + +static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) +{ + struct j1939_priv *priv; + + lockdep_assert_held(&j1939_netdev_lock); + + if (ndev->type != ARPHRD_CAN) + return NULL; + + priv = j1939_ndev_to_priv(ndev); + if (priv) + j1939_priv_get(priv); + + return priv; +} + +static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev) +{ + struct j1939_priv *priv; + + spin_lock(&j1939_netdev_lock); + priv = j1939_priv_get_by_ndev_locked(ndev); + spin_unlock(&j1939_netdev_lock); + + return priv; +} + +struct j1939_priv *j1939_netdev_start(struct net_device *ndev) +{ + struct j1939_priv *priv, *priv_new; + int ret; + + priv = j1939_priv_get_by_ndev(ndev); + if (priv) { + kref_get(&priv->rx_kref); + return priv; + } + + priv = j1939_priv_create(ndev); + if (!priv) + return ERR_PTR(-ENOMEM); + + j1939_tp_init(priv); + spin_lock_init(&priv->j1939_socks_lock); + INIT_LIST_HEAD(&priv->j1939_socks); + + spin_lock(&j1939_netdev_lock); + priv_new = j1939_priv_get_by_ndev_locked(ndev); + if (priv_new) { + /* Someone was faster than us, use their priv and roll + * back our's. + */ + spin_unlock(&j1939_netdev_lock); + dev_put(ndev); + kfree(priv); + kref_get(&priv_new->rx_kref); + return priv_new; + } + j1939_priv_set(ndev, priv); + spin_unlock(&j1939_netdev_lock); + + ret = j1939_can_rx_register(priv); + if (ret < 0) + goto out_priv_put; + + return priv; + + out_priv_put: + j1939_priv_set(ndev, NULL); + dev_put(ndev); + kfree(priv); + + return ERR_PTR(ret); +} + +void j1939_netdev_stop(struct j1939_priv *priv) +{ + kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); + j1939_priv_put(priv); +} + +int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb) +{ + int ret, dlc; + canid_t canid; + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct can_frame *cf; + + /* apply sanity checks */ + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) + skcb->addr.pgn &= J1939_PGN_PDU1_MAX; + else + skcb->addr.pgn &= J1939_PGN_MAX; + + if (skcb->priority > 7) + skcb->priority = 6; + + ret = j1939_ac_fixup(priv, skb); + if (unlikely(ret)) + goto failed; + dlc = skb->len; + + /* re-claim the CAN_HDR from the SKB */ + cf = skb_push(skb, J1939_CAN_HDR); + + /* make it a full can frame again */ + skb_put(skb, J1939_CAN_FTR + (8 - dlc)); + + canid = CAN_EFF_FLAG | + (skcb->priority << 26) | + (skcb->addr.pgn << 8) | + skcb->addr.sa; + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) + canid |= skcb->addr.da << 8; + + cf->can_id = canid; + cf->can_dlc = dlc; + + return can_send(skb, 1); + + failed: + kfree_skb(skb); + return ret; +} + +static int j1939_netdev_notify(struct notifier_block *nb, + unsigned long msg, void *data) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(data); + struct j1939_priv *priv; + + priv = j1939_priv_get_by_ndev(ndev); + if (!priv) + goto notify_done; + + if (ndev->type != ARPHRD_CAN) + goto notify_put; + + switch (msg) { + case NETDEV_DOWN: + j1939_cancel_active_session(priv, NULL); + j1939_sk_netdev_event_netdown(priv); + j1939_ecu_unmap_all(priv); + break; + } + +notify_put: + j1939_priv_put(priv); + +notify_done: + return NOTIFY_DONE; +} + +static struct notifier_block j1939_netdev_notifier = { + .notifier_call = j1939_netdev_notify, +}; + +/* MODULE interface */ +static __init int j1939_module_init(void) +{ + int ret; + + pr_info("can: SAE J1939\n"); + + ret = register_netdevice_notifier(&j1939_netdev_notifier); + if (ret) + goto fail_notifier; + + ret = can_proto_register(&j1939_can_proto); + if (ret < 0) { + pr_err("can: registration of j1939 protocol failed\n"); + goto fail_sk; + } + + return 0; + + fail_sk: + unregister_netdevice_notifier(&j1939_netdev_notifier); + fail_notifier: + return ret; +} + +static __exit void j1939_module_exit(void) +{ + can_proto_unregister(&j1939_can_proto); + + unregister_netdevice_notifier(&j1939_netdev_notifier); +} + +module_init(j1939_module_init); +module_exit(j1939_module_exit); diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c new file mode 100644 index 000000000000..37c1040bcb9c --- /dev/null +++ b/net/can/j1939/socket.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "j1939-priv.h" + +#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939) + +/* conversion function between struct sock::sk_priority from linux and + * j1939 priority field + */ +static inline priority_t j1939_prio(u32 sk_priority) +{ + sk_priority = min(sk_priority, 7U); + + return 7 - sk_priority; +} + +static inline u32 j1939_to_sk_priority(priority_t prio) +{ + return 7 - prio; +} + +/* function to see if pgn is to be evaluated */ +static inline bool j1939_pgn_is_valid(pgn_t pgn) +{ + return pgn <= J1939_PGN_MAX; +} + +/* test function to avoid non-zero DA placeholder for pdu1 pgn's */ +static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn) +{ + if (j1939_pgn_is_pdu1(pgn)) + return !(pgn & 0xff); + else + return true; +} + +static inline void j1939_sock_pending_add(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + atomic_inc(&jsk->skb_pending); +} + +static int j1939_sock_pending_get(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + return atomic_read(&jsk->skb_pending); +} + +void j1939_sock_pending_del(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + /* atomic_dec_return returns the new value */ + if (!atomic_dec_return(&jsk->skb_pending)) + wake_up(&jsk->waitq); /* no pending SKB's */ +} + +static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) +{ + jsk->state |= J1939_SOCK_BOUND; + j1939_priv_get(priv); + jsk->priv = priv; + + spin_lock_bh(&priv->j1939_socks_lock); + list_add_tail(&jsk->list, &priv->j1939_socks); + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) +{ + spin_lock_bh(&priv->j1939_socks_lock); + list_del_init(&jsk->list); + spin_unlock_bh(&priv->j1939_socks_lock); + + jsk->priv = NULL; + j1939_priv_put(priv); + jsk->state &= ~J1939_SOCK_BOUND; +} + +static bool j1939_sk_queue_session(struct j1939_session *session) +{ + struct j1939_sock *jsk = j1939_sk(session->sk); + bool empty; + + spin_lock_bh(&jsk->sk_session_queue_lock); + empty = list_empty(&jsk->sk_session_queue); + j1939_session_get(session); + list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue); + spin_unlock_bh(&jsk->sk_session_queue_lock); + j1939_sock_pending_add(&jsk->sk); + + return empty; +} + +static struct +j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk) +{ + struct j1939_session *session = NULL; + + spin_lock_bh(&jsk->sk_session_queue_lock); + if (!list_empty(&jsk->sk_session_queue)) { + session = list_last_entry(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + if (session->total_queued_size == session->total_message_size) + session = NULL; + else + j1939_session_get(session); + } + spin_unlock_bh(&jsk->sk_session_queue_lock); + + return session; +} + +static void j1939_sk_queue_drop_all(struct j1939_priv *priv, + struct j1939_sock *jsk, int err) +{ + struct j1939_session *session, *tmp; + + netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err); + spin_lock_bh(&jsk->sk_session_queue_lock); + list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue, + sk_session_queue_entry) { + list_del_init(&session->sk_session_queue_entry); + session->err = err; + j1939_session_put(session); + } + spin_unlock_bh(&jsk->sk_session_queue_lock); +} + +static void j1939_sk_queue_activate_next_locked(struct j1939_session *session) +{ + struct j1939_sock *jsk; + struct j1939_session *first; + int err; + + /* RX-Session don't have a socket (yet) */ + if (!session->sk) + return; + + jsk = j1939_sk(session->sk); + lockdep_assert_held(&jsk->sk_session_queue_lock); + + err = session->err; + + first = list_first_entry_or_null(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + + /* Some else has already activated the next session */ + if (first != session) + return; + +activate_next: + list_del_init(&first->sk_session_queue_entry); + j1939_session_put(first); + first = list_first_entry_or_null(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + if (!first) + return; + + if (WARN_ON_ONCE(j1939_session_activate(first))) { + first->err = -EBUSY; + goto activate_next; + } else { + /* Give receiver some time (arbitrary chosen) to recover */ + int time_ms = 0; + + if (err) + time_ms = 10 + prandom_u32_max(16); + + j1939_tp_schedule_txtimer(first, time_ms); + } +} + +void j1939_sk_queue_activate_next(struct j1939_session *session) +{ + struct j1939_sock *jsk; + + if (!session->sk) + return; + + jsk = j1939_sk(session->sk); + + spin_lock_bh(&jsk->sk_session_queue_lock); + j1939_sk_queue_activate_next_locked(session); + spin_unlock_bh(&jsk->sk_session_queue_lock); +} + +static bool j1939_sk_match_dst(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + if ((jsk->state & J1939_SOCK_PROMISC)) + return true; + + /* Destination address filter */ + if (jsk->addr.src_name && skcb->addr.dst_name) { + if (jsk->addr.src_name != skcb->addr.dst_name) + return false; + } else { + /* receive (all sockets) if + * - all packages that match our bind() address + * - all broadcast on a socket if SO_BROADCAST + * is set + */ + if (j1939_address_is_unicast(skcb->addr.da)) { + if (jsk->addr.sa != skcb->addr.da) + return false; + } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) { + /* receiving broadcast without SO_BROADCAST + * flag is not allowed + */ + return false; + } + } + + /* Source address filter */ + if (jsk->state & J1939_SOCK_CONNECTED) { + /* receive (all sockets) if + * - all packages that match our connect() name or address + */ + if (jsk->addr.dst_name && skcb->addr.src_name) { + if (jsk->addr.dst_name != skcb->addr.src_name) + return false; + } else { + if (jsk->addr.da != skcb->addr.sa) + return false; + } + } + + /* PGN filter */ + if (j1939_pgn_is_valid(jsk->pgn_rx_filter) && + jsk->pgn_rx_filter != skcb->addr.pgn) + return false; + + return true; +} + +/* matches skb control buffer (addr) with a j1939 filter */ +static bool j1939_sk_match_filter(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + const struct j1939_filter *f = jsk->filters; + int nfilter = jsk->nfilters; + + if (!nfilter) + /* receive all when no filters are assigned */ + return true; + + for (; nfilter; ++f, --nfilter) { + if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) + continue; + if ((skcb->addr.sa & f->addr_mask) != f->addr) + continue; + if ((skcb->addr.src_name & f->name_mask) != f->name) + continue; + return true; + } + return false; +} + +static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + if (!(jsk->state & J1939_SOCK_BOUND)) + return false; + + if (!j1939_sk_match_dst(jsk, skcb)) + return false; + + if (!j1939_sk_match_filter(jsk, skcb)) + return false; + + return true; +} + +static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) +{ + const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb); + struct j1939_sk_buff_cb *skcb; + struct sk_buff *skb; + + if (oskb->sk == &jsk->sk) + return; + + if (!j1939_sk_recv_match_one(jsk, oskcb)) + return; + + skb = skb_clone(oskb, GFP_ATOMIC); + if (!skb) { + pr_warn("skb clone failed\n"); + return; + } + can_skb_set_owner(skb, oskb->sk); + + skcb = j1939_skb_to_cb(skb); + skcb->msg_flags &= ~(MSG_DONTROUTE); + if (skb->sk) + skcb->msg_flags |= MSG_DONTROUTE; + + if (sock_queue_rcv_skb(&jsk->sk, skb) < 0) + kfree_skb(skb); +} + +bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) +{ + struct j1939_sock *jsk; + bool match = false; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + match = j1939_sk_recv_match_one(jsk, skcb); + if (match) + break; + } + spin_unlock_bh(&priv->j1939_socks_lock); + + return match; +} + +void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sock *jsk; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + j1939_sk_recv_one(jsk, skb); + } + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static int j1939_sk_init(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + /* Ensure that "sk" is first member in "struct j1939_sock", so that we + * can skip it during memset(). + */ + BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0); + memset((void *)jsk + sizeof(jsk->sk), 0x0, + sizeof(*jsk) - sizeof(jsk->sk)); + + INIT_LIST_HEAD(&jsk->list); + init_waitqueue_head(&jsk->waitq); + jsk->sk.sk_priority = j1939_to_sk_priority(6); + jsk->sk.sk_reuse = 1; /* per default */ + jsk->addr.sa = J1939_NO_ADDR; + jsk->addr.da = J1939_NO_ADDR; + jsk->addr.pgn = J1939_NO_PGN; + jsk->pgn_rx_filter = J1939_NO_PGN; + atomic_set(&jsk->skb_pending, 0); + spin_lock_init(&jsk->sk_session_queue_lock); + INIT_LIST_HEAD(&jsk->sk_session_queue); + + return 0; +} + +static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len) +{ + if (!addr) + return -EDESTADDRREQ; + if (len < J1939_MIN_NAMELEN) + return -EINVAL; + if (addr->can_family != AF_CAN) + return -EINVAL; + if (!addr->can_ifindex) + return -ENODEV; + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && + !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) + return -EINVAL; + + return 0; +} + +static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct j1939_sock *jsk = j1939_sk(sock->sk); + struct j1939_priv *priv = jsk->priv; + struct sock *sk = sock->sk; + struct net *net = sock_net(sk); + int ret = 0; + + ret = j1939_sk_sanity_check(addr, len); + if (ret) + return ret; + + lock_sock(sock->sk); + + /* Already bound to an interface? */ + if (jsk->state & J1939_SOCK_BOUND) { + /* A re-bind() to a different interface is not + * supported. + */ + if (jsk->ifindex != addr->can_ifindex) { + ret = -EINVAL; + goto out_release_sock; + } + + /* drop old references */ + j1939_jsk_del(priv, jsk); + j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); + } else { + struct net_device *ndev; + + ndev = dev_get_by_index(net, addr->can_ifindex); + if (!ndev) { + ret = -ENODEV; + goto out_release_sock; + } + + if (ndev->type != ARPHRD_CAN) { + dev_put(ndev); + ret = -ENODEV; + goto out_release_sock; + } + + priv = j1939_netdev_start(ndev); + dev_put(ndev); + if (IS_ERR(priv)) { + ret = PTR_ERR(priv); + goto out_release_sock; + } + + jsk->ifindex = addr->can_ifindex; + } + + /* set default transmit pgn */ + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + jsk->pgn_rx_filter = addr->can_addr.j1939.pgn; + jsk->addr.src_name = addr->can_addr.j1939.name; + jsk->addr.sa = addr->can_addr.j1939.addr; + + /* get new references */ + ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa); + if (ret) { + j1939_netdev_stop(priv); + goto out_release_sock; + } + + j1939_jsk_add(priv, jsk); + + out_release_sock: /* fall through */ + release_sock(sock->sk); + + return ret; +} + +static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, + int len, int flags) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct j1939_sock *jsk = j1939_sk(sock->sk); + int ret = 0; + + ret = j1939_sk_sanity_check(addr, len); + if (ret) + return ret; + + lock_sock(sock->sk); + + /* bind() before connect() is mandatory */ + if (!(jsk->state & J1939_SOCK_BOUND)) { + ret = -EINVAL; + goto out_release_sock; + } + + /* A connect() to a different interface is not supported. */ + if (jsk->ifindex != addr->can_ifindex) { + ret = -EINVAL; + goto out_release_sock; + } + + if (!addr->can_addr.j1939.name && + addr->can_addr.j1939.addr == J1939_NO_ADDR && + !sock_flag(&jsk->sk, SOCK_BROADCAST)) { + /* broadcast, but SO_BROADCAST not set */ + ret = -EACCES; + goto out_release_sock; + } + + jsk->addr.dst_name = addr->can_addr.j1939.name; + jsk->addr.da = addr->can_addr.j1939.addr; + + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + jsk->addr.pgn = addr->can_addr.j1939.pgn; + + jsk->state |= J1939_SOCK_CONNECTED; + + out_release_sock: /* fall through */ + release_sock(sock->sk); + + return ret; +} + +static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, + const struct j1939_sock *jsk, int peer) +{ + addr->can_family = AF_CAN; + addr->can_ifindex = jsk->ifindex; + addr->can_addr.j1939.pgn = jsk->addr.pgn; + if (peer) { + addr->can_addr.j1939.name = jsk->addr.dst_name; + addr->can_addr.j1939.addr = jsk->addr.da; + } else { + addr->can_addr.j1939.name = jsk->addr.src_name; + addr->can_addr.j1939.addr = jsk->addr.sa; + } +} + +static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr, + int peer) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int ret = 0; + + lock_sock(sk); + + if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) { + ret = -EADDRNOTAVAIL; + goto failure; + } + + j1939_sk_sock2sockaddr_can(addr, jsk, peer); + ret = J1939_MIN_NAMELEN; + + failure: + release_sock(sk); + + return ret; +} + +static int j1939_sk_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk; + + if (!sk) + return 0; + + jsk = j1939_sk(sk); + lock_sock(sk); + + if (jsk->state & J1939_SOCK_BOUND) { + struct j1939_priv *priv = jsk->priv; + + if (wait_event_interruptible(jsk->waitq, + !j1939_sock_pending_get(&jsk->sk))) { + j1939_cancel_active_session(priv, sk); + j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN); + } + + j1939_jsk_del(priv, jsk); + + j1939_local_ecu_put(priv, jsk->addr.src_name, + jsk->addr.sa); + + j1939_netdev_stop(priv); + } + + sock_orphan(sk); + sock->sk = NULL; + + release_sock(sk); + sock_put(sk); + + return 0; +} + +static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, + unsigned int optlen, int flag) +{ + int tmp; + + if (optlen != sizeof(tmp)) + return -EINVAL; + if (copy_from_user(&tmp, optval, optlen)) + return -EFAULT; + lock_sock(&jsk->sk); + if (tmp) + jsk->state |= flag; + else + jsk->state &= ~flag; + release_sock(&jsk->sk); + return tmp; +} + +static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int tmp, count = 0, ret = 0; + struct j1939_filter *filters = NULL, *ofilters; + + if (level != SOL_CAN_J1939) + return -EINVAL; + + switch (optname) { + case SO_J1939_FILTER: + if (optval) { + struct j1939_filter *f; + int c; + + if (optlen % sizeof(*filters) != 0) + return -EINVAL; + + if (optlen > J1939_FILTER_MAX * + sizeof(struct j1939_filter)) + return -EINVAL; + + count = optlen / sizeof(*filters); + filters = memdup_user(optval, optlen); + if (IS_ERR(filters)) + return PTR_ERR(filters); + + for (f = filters, c = count; c; f++, c--) { + f->name &= f->name_mask; + f->pgn &= f->pgn_mask; + f->addr &= f->addr_mask; + } + } + + lock_sock(&jsk->sk); + ofilters = jsk->filters; + jsk->filters = filters; + jsk->nfilters = count; + release_sock(&jsk->sk); + kfree(ofilters); + return 0; + case SO_J1939_PROMISC: + return j1939_sk_setsockopt_flag(jsk, optval, optlen, + J1939_SOCK_PROMISC); + case SO_J1939_ERRQUEUE: + ret = j1939_sk_setsockopt_flag(jsk, optval, optlen, + J1939_SOCK_ERRQUEUE); + if (ret < 0) + return ret; + + if (!(jsk->state & J1939_SOCK_ERRQUEUE)) + skb_queue_purge(&sk->sk_error_queue); + return ret; + case SO_J1939_SEND_PRIO: + if (optlen != sizeof(tmp)) + return -EINVAL; + if (copy_from_user(&tmp, optval, optlen)) + return -EFAULT; + if (tmp < 0 || tmp > 7) + return -EDOM; + if (tmp < 2 && !capable(CAP_NET_ADMIN)) + return -EPERM; + lock_sock(&jsk->sk); + jsk->sk.sk_priority = j1939_to_sk_priority(tmp); + release_sock(&jsk->sk); + return 0; + default: + return -ENOPROTOOPT; + } +} + +static int j1939_sk_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int ret, ulen; + /* set defaults for using 'int' properties */ + int tmp = 0; + int len = sizeof(tmp); + void *val = &tmp; + + if (level != SOL_CAN_J1939) + return -EINVAL; + if (get_user(ulen, optlen)) + return -EFAULT; + if (ulen < 0) + return -EINVAL; + + lock_sock(&jsk->sk); + switch (optname) { + case SO_J1939_PROMISC: + tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0; + break; + case SO_J1939_ERRQUEUE: + tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0; + break; + case SO_J1939_SEND_PRIO: + tmp = j1939_prio(jsk->sk.sk_priority); + break; + default: + ret = -ENOPROTOOPT; + goto no_copy; + } + + /* copy to user, based on 'len' & 'val' + * but most sockopt's are 'int' properties, and have 'len' & 'val' + * left unchanged, but instead modified 'tmp' + */ + if (len > ulen) + ret = -EFAULT; + else if (put_user(len, optlen)) + ret = -EFAULT; + else if (copy_to_user(optval, val, len)) + ret = -EFAULT; + else + ret = 0; + no_copy: + release_sock(&jsk->sk); + return ret; +} + +static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + int ret = 0; + + if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE)) + return -EINVAL; + + if (flags & MSG_ERRQUEUE) + return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939, + SCM_J1939_ERRQUEUE); + + skb = skb_recv_datagram(sk, flags, 0, &ret); + if (!skb) + return ret; + + if (size < skb->len) + msg->msg_flags |= MSG_TRUNC; + else + size = skb->len; + + ret = memcpy_to_msg(msg, skb->data, size); + if (ret < 0) { + skb_free_datagram(sk, skb); + return ret; + } + + skcb = j1939_skb_to_cb(skb); + if (j1939_address_is_valid(skcb->addr.da)) + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR, + sizeof(skcb->addr.da), &skcb->addr.da); + + if (skcb->addr.dst_name) + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME, + sizeof(skcb->addr.dst_name), &skcb->addr.dst_name); + + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO, + sizeof(skcb->priority), &skcb->priority); + + if (msg->msg_name) { + struct sockaddr_can *paddr = msg->msg_name; + + msg->msg_namelen = J1939_MIN_NAMELEN; + memset(msg->msg_name, 0, msg->msg_namelen); + paddr->can_family = AF_CAN; + paddr->can_ifindex = skb->skb_iif; + paddr->can_addr.j1939.name = skcb->addr.src_name; + paddr->can_addr.j1939.addr = skcb->addr.sa; + paddr->can_addr.j1939.pgn = skcb->addr.pgn; + } + + sock_recv_ts_and_drops(msg, sk, skb); + msg->msg_flags |= skcb->msg_flags; + skb_free_datagram(sk, skb); + + return size; +} + +static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev, + struct sock *sk, + struct msghdr *msg, size_t size, + int *errcode) +{ + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_sk_buff_cb *skcb; + struct sk_buff *skb; + int ret; + + skb = sock_alloc_send_skb(sk, + size + + sizeof(struct can_frame) - + sizeof(((struct can_frame *)NULL)->data) + + sizeof(struct can_skb_priv), + msg->msg_flags & MSG_DONTWAIT, &ret); + if (!skb) + goto failure; + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = ndev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + skb_reserve(skb, offsetof(struct can_frame, data)); + + ret = memcpy_from_msg(skb_put(skb, size), msg, size); + if (ret < 0) + goto free_skb; + + skb->dev = ndev; + + skcb = j1939_skb_to_cb(skb); + memset(skcb, 0, sizeof(*skcb)); + skcb->addr = jsk->addr; + skcb->priority = j1939_prio(sk->sk_priority); + + if (msg->msg_name) { + struct sockaddr_can *addr = msg->msg_name; + + if (addr->can_addr.j1939.name || + addr->can_addr.j1939.addr != J1939_NO_ADDR) { + skcb->addr.dst_name = addr->can_addr.j1939.name; + skcb->addr.da = addr->can_addr.j1939.addr; + } + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + skcb->addr.pgn = addr->can_addr.j1939.pgn; + } + + *errcode = ret; + return skb; + +free_skb: + kfree_skb(skb); +failure: + *errcode = ret; + return NULL; +} + +static size_t j1939_sk_opt_stats_get_size(void) +{ + return + nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */ + 0; +} + +static struct sk_buff * +j1939_sk_get_timestamping_opt_stats(struct j1939_session *session) +{ + struct sk_buff *stats; + u32 size; + + stats = alloc_skb(j1939_sk_opt_stats_get_size(), GFP_ATOMIC); + if (!stats) + return NULL; + + if (session->skcb.addr.type == J1939_SIMPLE) + size = session->total_message_size; + else + size = min(session->pkt.tx_acked * 7, + session->total_message_size); + + nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size); + + return stats; +} + +void j1939_sk_errqueue(struct j1939_session *session, + enum j1939_sk_errqueue_type type) +{ + struct j1939_priv *priv = session->priv; + struct sock *sk = session->sk; + struct j1939_sock *jsk; + struct sock_exterr_skb *serr; + struct sk_buff *skb; + char *state = "UNK"; + int err; + + /* currently we have no sk for the RX session */ + if (!sk) + return; + + jsk = j1939_sk(sk); + + if (!(jsk->state & J1939_SOCK_ERRQUEUE)) + return; + + skb = j1939_sk_get_timestamping_opt_stats(session); + if (!skb) + return; + + skb->tstamp = ktime_get_real(); + + BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + switch (type) { + case J1939_ERRQUEUE_ACK: + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) + return; + + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = SCM_TSTAMP_ACK; + state = "ACK"; + break; + case J1939_ERRQUEUE_SCHED: + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) + return; + + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = SCM_TSTAMP_SCHED; + state = "SCH"; + break; + case J1939_ERRQUEUE_ABORT: + serr->ee.ee_errno = session->err; + serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; + serr->ee.ee_info = J1939_EE_INFO_TX_ABORT; + state = "ABT"; + break; + default: + netdev_err(priv->ndev, "Unknown errqueue type %i\n", type); + } + + serr->opt_stats = true; + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) + serr->ee.ee_data = session->tskey; + + netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n", + __func__, session, session->tskey, state); + err = sock_queue_err_skb(sk, skb); + + if (err) + kfree_skb(skb); +}; + +void j1939_sk_send_loop_abort(struct sock *sk, int err) +{ + sk->sk_err = err; + + sk->sk_error_report(sk); +} + +static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, + struct msghdr *msg, size_t size) + +{ + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_session *session = j1939_sk_get_incomplete_session(jsk); + struct sk_buff *skb; + size_t segment_size, todo_size; + int ret = 0; + + if (session && + session->total_message_size != session->total_queued_size + size) { + j1939_session_put(session); + return -EIO; + } + + todo_size = size; + + while (todo_size) { + struct j1939_sk_buff_cb *skcb; + + segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE, + todo_size); + + /* Allocate skb for one segment */ + skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size, + &ret); + if (ret) + break; + + skcb = j1939_skb_to_cb(skb); + + if (!session) { + /* at this point the size should be full size + * of the session + */ + skcb->offset = 0; + session = j1939_tp_send(priv, skb, size); + if (IS_ERR(session)) { + ret = PTR_ERR(session); + goto kfree_skb; + } + if (j1939_sk_queue_session(session)) { + /* try to activate session if we a + * fist in the queue + */ + if (!j1939_session_activate(session)) { + j1939_tp_schedule_txtimer(session, 0); + } else { + ret = -EBUSY; + session->err = ret; + j1939_sk_queue_drop_all(priv, jsk, + EBUSY); + break; + } + } + } else { + skcb->offset = session->total_queued_size; + j1939_session_skb_queue(session, skb); + } + + todo_size -= segment_size; + session->total_queued_size += segment_size; + } + + switch (ret) { + case 0: /* OK */ + if (todo_size) + netdev_warn(priv->ndev, + "no error found and not completely queued?! %zu\n", + todo_size); + ret = size; + break; + case -ERESTARTSYS: + ret = -EINTR; + /* fall through */ + case -EAGAIN: /* OK */ + if (todo_size != size) + ret = size - todo_size; + break; + default: /* ERROR */ + break; + } + + if (session) + j1939_session_put(session); + + return ret; + + kfree_skb: + kfree_skb(skb); + return ret; +} + +static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg, + size_t size) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_priv *priv = jsk->priv; + int ifindex; + int ret; + + /* various socket state tests */ + if (!(jsk->state & J1939_SOCK_BOUND)) + return -EBADFD; + + ifindex = jsk->ifindex; + + if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) + /* no source address assigned yet */ + return -EBADFD; + + /* deal with provided destination address info */ + if (msg->msg_name) { + struct sockaddr_can *addr = msg->msg_name; + + if (msg->msg_namelen < J1939_MIN_NAMELEN) + return -EINVAL; + + if (addr->can_family != AF_CAN) + return -EINVAL; + + if (addr->can_ifindex && addr->can_ifindex != ifindex) + return -EBADFD; + + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && + !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) + return -EINVAL; + + if (!addr->can_addr.j1939.name && + addr->can_addr.j1939.addr == J1939_NO_ADDR && + !sock_flag(sk, SOCK_BROADCAST)) + /* broadcast, but SO_BROADCAST not set */ + return -EACCES; + } else { + if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR && + !sock_flag(sk, SOCK_BROADCAST)) + /* broadcast, but SO_BROADCAST not set */ + return -EACCES; + } + + ret = j1939_sk_send_loop(priv, sk, msg, size); + + return ret; +} + +void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) +{ + struct j1939_sock *jsk; + int error_code = ENETDOWN; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + jsk->sk.sk_err = error_code; + if (!sock_flag(&jsk->sk, SOCK_DEAD)) + jsk->sk.sk_error_report(&jsk->sk); + + j1939_sk_queue_drop_all(priv, jsk, error_code); + } + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + /* no ioctls for socket layer -> hand it down to NIC layer */ + return -ENOIOCTLCMD; +} + +static const struct proto_ops j1939_ops = { + .family = PF_CAN, + .release = j1939_sk_release, + .bind = j1939_sk_bind, + .connect = j1939_sk_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = j1939_sk_getname, + .poll = datagram_poll, + .ioctl = j1939_sk_no_ioctlcmd, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = j1939_sk_setsockopt, + .getsockopt = j1939_sk_getsockopt, + .sendmsg = j1939_sk_sendmsg, + .recvmsg = j1939_sk_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +static struct proto j1939_proto __read_mostly = { + .name = "CAN_J1939", + .owner = THIS_MODULE, + .obj_size = sizeof(struct j1939_sock), + .init = j1939_sk_init, +}; + +const struct can_proto j1939_can_proto = { + .type = SOCK_DGRAM, + .protocol = CAN_J1939, + .ops = &j1939_ops, + .prot = &j1939_proto, +}; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c new file mode 100644 index 000000000000..fe000ea757ea --- /dev/null +++ b/net/can/j1939/transport.c @@ -0,0 +1,2027 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#include + +#include "j1939-priv.h" + +#define J1939_XTP_TX_RETRY_LIMIT 100 + +#define J1939_ETP_PGN_CTL 0xc800 +#define J1939_ETP_PGN_DAT 0xc700 +#define J1939_TP_PGN_CTL 0xec00 +#define J1939_TP_PGN_DAT 0xeb00 + +#define J1939_TP_CMD_RTS 0x10 +#define J1939_TP_CMD_CTS 0x11 +#define J1939_TP_CMD_EOMA 0x13 +#define J1939_TP_CMD_BAM 0x20 +#define J1939_TP_CMD_ABORT 0xff + +#define J1939_ETP_CMD_RTS 0x14 +#define J1939_ETP_CMD_CTS 0x15 +#define J1939_ETP_CMD_DPO 0x16 +#define J1939_ETP_CMD_EOMA 0x17 +#define J1939_ETP_CMD_ABORT 0xff + +enum j1939_xtp_abort { + J1939_XTP_NO_ABORT = 0, + J1939_XTP_ABORT_BUSY = 1, + /* Already in one or more connection managed sessions and + * cannot support another. + * + * EALREADY: + * Operation already in progress + */ + + J1939_XTP_ABORT_RESOURCE = 2, + /* System resources were needed for another task so this + * connection managed session was terminated. + * + * EMSGSIZE: + * The socket type requires that message be sent atomically, + * and the size of the message to be sent made this + * impossible. + */ + + J1939_XTP_ABORT_TIMEOUT = 3, + /* A timeout occurred and this is the connection abort to + * close the session. + * + * EHOSTUNREACH: + * The destination host cannot be reached (probably because + * the host is down or a remote router cannot reach it). + */ + + J1939_XTP_ABORT_GENERIC = 4, + /* CTS messages received when data transfer is in progress + * + * EBADMSG: + * Not a data message + */ + + J1939_XTP_ABORT_FAULT = 5, + /* Maximal retransmit request limit reached + * + * ENOTRECOVERABLE: + * State not recoverable + */ + + J1939_XTP_ABORT_UNEXPECTED_DATA = 6, + /* Unexpected data transfer packet + * + * ENOTCONN: + * Transport endpoint is not connected + */ + + J1939_XTP_ABORT_BAD_SEQ = 7, + /* Bad sequence number (and software is not able to recover) + * + * EILSEQ: + * Illegal byte sequence + */ + + J1939_XTP_ABORT_DUP_SEQ = 8, + /* Duplicate sequence number (and software is not able to + * recover) + */ + + J1939_XTP_ABORT_EDPO_UNEXPECTED = 9, + /* Unexpected EDPO packet (ETP) or Message size > 1785 bytes + * (TP) + */ + + J1939_XTP_ABORT_BAD_EDPO_PGN = 10, + /* Unexpected EDPO PGN (PGN in EDPO is bad) */ + + J1939_XTP_ABORT_EDPO_OUTOF_CTS = 11, + /* EDPO number of packets is greater than CTS */ + + J1939_XTP_ABORT_BAD_EDPO_OFFSET = 12, + /* Bad EDPO offset */ + + J1939_XTP_ABORT_OTHER_DEPRECATED = 13, + /* Deprecated. Use 250 instead (Any other reason) */ + + J1939_XTP_ABORT_ECTS_UNXPECTED_PGN = 14, + /* Unexpected ECTS PGN (PGN in ECTS is bad) */ + + J1939_XTP_ABORT_ECTS_TOO_BIG = 15, + /* ECTS requested packets exceeds message size */ + + J1939_XTP_ABORT_OTHER = 250, + /* Any other reason (if a Connection Abort reason is + * identified that is not listed in the table use code 250) + */ +}; + +static unsigned int j1939_tp_block = 255; +static unsigned int j1939_tp_packet_delay; +static unsigned int j1939_tp_padding = 1; + +/* helpers */ +static const char *j1939_xtp_abort_to_str(enum j1939_xtp_abort abort) +{ + switch (abort) { + case J1939_XTP_ABORT_BUSY: + return "Already in one or more connection managed sessions and cannot support another."; + case J1939_XTP_ABORT_RESOURCE: + return "System resources were needed for another task so this connection managed session was terminated."; + case J1939_XTP_ABORT_TIMEOUT: + return "A timeout occurred and this is the connection abort to close the session."; + case J1939_XTP_ABORT_GENERIC: + return "CTS messages received when data transfer is in progress"; + case J1939_XTP_ABORT_FAULT: + return "Maximal retransmit request limit reached"; + case J1939_XTP_ABORT_UNEXPECTED_DATA: + return "Unexpected data transfer packet"; + case J1939_XTP_ABORT_BAD_SEQ: + return "Bad sequence number (and software is not able to recover)"; + case J1939_XTP_ABORT_DUP_SEQ: + return "Duplicate sequence number (and software is not able to recover)"; + case J1939_XTP_ABORT_EDPO_UNEXPECTED: + return "Unexpected EDPO packet (ETP) or Message size > 1785 bytes (TP)"; + case J1939_XTP_ABORT_BAD_EDPO_PGN: + return "Unexpected EDPO PGN (PGN in EDPO is bad)"; + case J1939_XTP_ABORT_EDPO_OUTOF_CTS: + return "EDPO number of packets is greater than CTS"; + case J1939_XTP_ABORT_BAD_EDPO_OFFSET: + return "Bad EDPO offset"; + case J1939_XTP_ABORT_OTHER_DEPRECATED: + return "Deprecated. Use 250 instead (Any other reason)"; + case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN: + return "Unexpected ECTS PGN (PGN in ECTS is bad)"; + case J1939_XTP_ABORT_ECTS_TOO_BIG: + return "ECTS requested packets exceeds message size"; + case J1939_XTP_ABORT_OTHER: + return "Any other reason (if a Connection Abort reason is identified that is not listed in the table use code 250)"; + default: + return ""; + } +} + +static int j1939_xtp_abort_to_errno(struct j1939_priv *priv, + enum j1939_xtp_abort abort) +{ + int err; + + switch (abort) { + case J1939_XTP_NO_ABORT: + WARN_ON_ONCE(abort == J1939_XTP_NO_ABORT); + err = 0; + break; + case J1939_XTP_ABORT_BUSY: + err = EALREADY; + break; + case J1939_XTP_ABORT_RESOURCE: + err = EMSGSIZE; + break; + case J1939_XTP_ABORT_TIMEOUT: + err = EHOSTUNREACH; + break; + case J1939_XTP_ABORT_GENERIC: + err = EBADMSG; + break; + case J1939_XTP_ABORT_FAULT: + err = ENOTRECOVERABLE; + break; + case J1939_XTP_ABORT_UNEXPECTED_DATA: + err = ENOTCONN; + break; + case J1939_XTP_ABORT_BAD_SEQ: + err = EILSEQ; + break; + case J1939_XTP_ABORT_DUP_SEQ: + err = EPROTO; + break; + case J1939_XTP_ABORT_EDPO_UNEXPECTED: + err = EPROTO; + break; + case J1939_XTP_ABORT_BAD_EDPO_PGN: + err = EPROTO; + break; + case J1939_XTP_ABORT_EDPO_OUTOF_CTS: + err = EPROTO; + break; + case J1939_XTP_ABORT_BAD_EDPO_OFFSET: + err = EPROTO; + break; + case J1939_XTP_ABORT_OTHER_DEPRECATED: + err = EPROTO; + break; + case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN: + err = EPROTO; + break; + case J1939_XTP_ABORT_ECTS_TOO_BIG: + err = EPROTO; + break; + case J1939_XTP_ABORT_OTHER: + err = EPROTO; + break; + default: + netdev_warn(priv->ndev, "Unknown abort code %i", abort); + err = EPROTO; + } + + return err; +} + +static inline void j1939_session_list_lock(struct j1939_priv *priv) +{ + spin_lock_bh(&priv->active_session_list_lock); +} + +static inline void j1939_session_list_unlock(struct j1939_priv *priv) +{ + spin_unlock_bh(&priv->active_session_list_lock); +} + +void j1939_session_get(struct j1939_session *session) +{ + kref_get(&session->kref); +} + +/* session completion functions */ +static void __j1939_session_drop(struct j1939_session *session) +{ + if (!session->transmission) + return; + + j1939_sock_pending_del(session->sk); +} + +static void j1939_session_destroy(struct j1939_session *session) +{ + if (session->err) + j1939_sk_errqueue(session, J1939_ERRQUEUE_ABORT); + else + j1939_sk_errqueue(session, J1939_ERRQUEUE_ACK); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + skb_queue_purge(&session->skb_queue); + __j1939_session_drop(session); + j1939_priv_put(session->priv); + kfree(session); +} + +static void __j1939_session_release(struct kref *kref) +{ + struct j1939_session *session = container_of(kref, struct j1939_session, + kref); + + j1939_session_destroy(session); +} + +void j1939_session_put(struct j1939_session *session) +{ + kref_put(&session->kref, __j1939_session_release); +} + +static void j1939_session_txtimer_cancel(struct j1939_session *session) +{ + if (hrtimer_cancel(&session->txtimer)) + j1939_session_put(session); +} + +static void j1939_session_rxtimer_cancel(struct j1939_session *session) +{ + if (hrtimer_cancel(&session->rxtimer)) + j1939_session_put(session); +} + +void j1939_session_timers_cancel(struct j1939_session *session) +{ + j1939_session_txtimer_cancel(session); + j1939_session_rxtimer_cancel(session); +} + +static inline bool j1939_cb_is_broadcast(const struct j1939_sk_buff_cb *skcb) +{ + return (!skcb->addr.dst_name && (skcb->addr.da == 0xff)); +} + +static void j1939_session_skb_drop_old(struct j1939_session *session) +{ + struct sk_buff *do_skb; + struct j1939_sk_buff_cb *do_skcb; + unsigned int offset_start; + unsigned long flags; + + if (skb_queue_len(&session->skb_queue) < 2) + return; + + offset_start = session->pkt.tx_acked * 7; + + spin_lock_irqsave(&session->skb_queue.lock, flags); + do_skb = skb_peek(&session->skb_queue); + do_skcb = j1939_skb_to_cb(do_skb); + + if ((do_skcb->offset + do_skb->len) < offset_start) { + __skb_unlink(do_skb, &session->skb_queue); + kfree_skb(do_skb); + } + spin_unlock_irqrestore(&session->skb_queue.lock, flags); +} + +void j1939_session_skb_queue(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_priv *priv = session->priv; + + j1939_ac_fixup(priv, skb); + + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + + skcb->flags |= J1939_ECU_LOCAL_SRC; + + skb_queue_tail(&session->skb_queue, skb); +} + +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *skb = NULL; + struct sk_buff *do_skb; + struct j1939_sk_buff_cb *do_skcb; + unsigned int offset_start; + unsigned long flags; + + offset_start = session->pkt.dpo * 7; + + spin_lock_irqsave(&session->skb_queue.lock, flags); + skb_queue_walk(&session->skb_queue, do_skb) { + do_skcb = j1939_skb_to_cb(do_skb); + + if (offset_start >= do_skcb->offset && + offset_start < (do_skcb->offset + do_skb->len)) { + skb = do_skb; + } + } + spin_unlock_irqrestore(&session->skb_queue.lock, flags); + + if (!skb) + netdev_dbg(priv->ndev, "%s: 0x%p: no skb found for start: %i, queue size: %i\n", + __func__, session, offset_start, + skb_queue_len(&session->skb_queue)); + + return skb; +} + +/* see if we are receiver + * returns 0 for broadcasts, although we will receive them + */ +static inline int j1939_tp_im_receiver(const struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & J1939_ECU_LOCAL_DST; +} + +/* see if we are sender */ +static inline int j1939_tp_im_transmitter(const struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & J1939_ECU_LOCAL_SRC; +} + +/* see if we are involved as either receiver or transmitter */ +static int j1939_tp_im_involved(const struct j1939_sk_buff_cb *skcb, bool swap) +{ + if (swap) + return j1939_tp_im_receiver(skcb); + else + return j1939_tp_im_transmitter(skcb); +} + +static int j1939_tp_im_involved_anydir(struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & (J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST); +} + +/* extract pgn from flow-ctl message */ +static inline pgn_t j1939_xtp_ctl_to_pgn(const u8 *dat) +{ + pgn_t pgn; + + pgn = (dat[7] << 16) | (dat[6] << 8) | (dat[5] << 0); + if (j1939_pgn_is_pdu1(pgn)) + pgn &= 0xffff00; + return pgn; +} + +static inline unsigned int j1939_tp_ctl_to_size(const u8 *dat) +{ + return (dat[2] << 8) + (dat[1] << 0); +} + +static inline unsigned int j1939_etp_ctl_to_packet(const u8 *dat) +{ + return (dat[4] << 16) | (dat[3] << 8) | (dat[2] << 0); +} + +static inline unsigned int j1939_etp_ctl_to_size(const u8 *dat) +{ + return (dat[4] << 24) | (dat[3] << 16) | + (dat[2] << 8) | (dat[1] << 0); +} + +/* find existing session: + * reverse: swap cb's src & dst + * there is no problem with matching broadcasts, since + * broadcasts (no dst, no da) would never call this + * with reverse == true + */ +static bool j1939_session_match(struct j1939_addr *se_addr, + struct j1939_addr *sk_addr, bool reverse) +{ + if (se_addr->type != sk_addr->type) + return false; + + if (reverse) { + if (se_addr->src_name) { + if (se_addr->src_name != sk_addr->dst_name) + return false; + } else if (se_addr->sa != sk_addr->da) { + return false; + } + + if (se_addr->dst_name) { + if (se_addr->dst_name != sk_addr->src_name) + return false; + } else if (se_addr->da != sk_addr->sa) { + return false; + } + } else { + if (se_addr->src_name) { + if (se_addr->src_name != sk_addr->src_name) + return false; + } else if (se_addr->sa != sk_addr->sa) { + return false; + } + + if (se_addr->dst_name) { + if (se_addr->dst_name != sk_addr->dst_name) + return false; + } else if (se_addr->da != sk_addr->da) { + return false; + } + } + + return true; +} + +static struct +j1939_session *j1939_session_get_by_addr_locked(struct j1939_priv *priv, + struct list_head *root, + struct j1939_addr *addr, + bool reverse, bool transmitter) +{ + struct j1939_session *session; + + lockdep_assert_held(&priv->active_session_list_lock); + + list_for_each_entry(session, root, active_session_list_entry) { + j1939_session_get(session); + if (j1939_session_match(&session->skcb.addr, addr, reverse) && + session->transmission == transmitter) + return session; + j1939_session_put(session); + } + + return NULL; +} + +static struct +j1939_session *j1939_session_get_simple(struct j1939_priv *priv, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + lockdep_assert_held(&priv->active_session_list_lock); + + list_for_each_entry(session, &priv->active_session_list, + active_session_list_entry) { + j1939_session_get(session); + if (session->skcb.addr.type == J1939_SIMPLE && + session->tskey == skcb->tskey && session->sk == skb->sk) + return session; + j1939_session_put(session); + } + + return NULL; +} + +static struct +j1939_session *j1939_session_get_by_addr(struct j1939_priv *priv, + struct j1939_addr *addr, + bool reverse, bool transmitter) +{ + struct j1939_session *session; + + j1939_session_list_lock(priv); + session = j1939_session_get_by_addr_locked(priv, + &priv->active_session_list, + addr, reverse, transmitter); + j1939_session_list_unlock(priv); + + return session; +} + +static void j1939_skbcb_swap(struct j1939_sk_buff_cb *skcb) +{ + u8 tmp = 0; + + swap(skcb->addr.dst_name, skcb->addr.src_name); + swap(skcb->addr.da, skcb->addr.sa); + + /* swap SRC and DST flags, leave other untouched */ + if (skcb->flags & J1939_ECU_LOCAL_SRC) + tmp |= J1939_ECU_LOCAL_DST; + if (skcb->flags & J1939_ECU_LOCAL_DST) + tmp |= J1939_ECU_LOCAL_SRC; + skcb->flags &= ~(J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST); + skcb->flags |= tmp; +} + +static struct +sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool ctl, + bool swap_src_dst) +{ + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + + skb = alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv), + GFP_ATOMIC); + if (unlikely(!skb)) + return ERR_PTR(-ENOMEM); + + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + /* reserve CAN header */ + skb_reserve(skb, offsetof(struct can_frame, data)); + + memcpy(skb->cb, re_skcb, sizeof(skb->cb)); + skcb = j1939_skb_to_cb(skb); + if (swap_src_dst) + j1939_skbcb_swap(skcb); + + if (ctl) { + if (skcb->addr.type == J1939_ETP) + skcb->addr.pgn = J1939_ETP_PGN_CTL; + else + skcb->addr.pgn = J1939_TP_PGN_CTL; + } else { + if (skcb->addr.type == J1939_ETP) + skcb->addr.pgn = J1939_ETP_PGN_DAT; + else + skcb->addr.pgn = J1939_TP_PGN_DAT; + } + + return skb; +} + +/* TP transmit packet functions */ +static int j1939_tp_tx_dat(struct j1939_session *session, + const u8 *dat, int len) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *skb; + + skb = j1939_tp_tx_dat_new(priv, &session->skcb, + false, false); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + skb_put_data(skb, dat, len); + if (j1939_tp_padding && len < 8) + memset(skb_put(skb, 8 - len), 0xff, 8 - len); + + return j1939_send_one(priv, skb); +} + +static int j1939_xtp_do_tx_ctl(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool swap_src_dst, pgn_t pgn, const u8 *dat) +{ + struct sk_buff *skb; + u8 *skdat; + + if (!j1939_tp_im_involved(re_skcb, swap_src_dst)) + return 0; + + skb = j1939_tp_tx_dat_new(priv, re_skcb, true, swap_src_dst); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + skdat = skb_put(skb, 8); + memcpy(skdat, dat, 5); + skdat[5] = (pgn >> 0); + skdat[6] = (pgn >> 8); + skdat[7] = (pgn >> 16); + + return j1939_send_one(priv, skb); +} + +static inline int j1939_tp_tx_ctl(struct j1939_session *session, + bool swap_src_dst, const u8 *dat) +{ + struct j1939_priv *priv = session->priv; + + return j1939_xtp_do_tx_ctl(priv, &session->skcb, + swap_src_dst, + session->skcb.addr.pgn, dat); +} + +static int j1939_xtp_tx_abort(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool swap_src_dst, + enum j1939_xtp_abort err, + pgn_t pgn) +{ + u8 dat[5]; + + if (!j1939_tp_im_involved(re_skcb, swap_src_dst)) + return 0; + + memset(dat, 0xff, sizeof(dat)); + dat[0] = J1939_TP_CMD_ABORT; + dat[1] = err; + return j1939_xtp_do_tx_ctl(priv, re_skcb, swap_src_dst, pgn, dat); +} + +void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec) +{ + j1939_session_get(session); + hrtimer_start(&session->txtimer, ms_to_ktime(msec), + HRTIMER_MODE_REL_SOFT); +} + +static inline void j1939_tp_set_rxtimeout(struct j1939_session *session, + int msec) +{ + j1939_session_rxtimer_cancel(session); + j1939_session_get(session); + hrtimer_start(&session->rxtimer, ms_to_ktime(msec), + HRTIMER_MODE_REL_SOFT); +} + +static int j1939_session_tx_rts(struct j1939_session *session) +{ + u8 dat[8]; + int ret; + + memset(dat, 0xff, sizeof(dat)); + + dat[1] = (session->total_message_size >> 0); + dat[2] = (session->total_message_size >> 8); + dat[3] = session->pkt.total; + + if (session->skcb.addr.type == J1939_ETP) { + dat[0] = J1939_ETP_CMD_RTS; + dat[1] = (session->total_message_size >> 0); + dat[2] = (session->total_message_size >> 8); + dat[3] = (session->total_message_size >> 16); + dat[4] = (session->total_message_size >> 24); + } else if (j1939_cb_is_broadcast(&session->skcb)) { + dat[0] = J1939_TP_CMD_BAM; + /* fake cts for broadcast */ + session->pkt.tx = 0; + } else { + dat[0] = J1939_TP_CMD_RTS; + dat[4] = dat[3]; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, false, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + if (dat[0] == J1939_TP_CMD_BAM) + j1939_tp_schedule_txtimer(session, 50); + + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_dpo(struct j1939_session *session) +{ + unsigned int pkt; + u8 dat[8]; + int ret; + + memset(dat, 0xff, sizeof(dat)); + + dat[0] = J1939_ETP_CMD_DPO; + session->pkt.dpo = session->pkt.tx_acked; + pkt = session->pkt.dpo; + dat[1] = session->pkt.last - session->pkt.tx_acked; + dat[2] = (pkt >> 0); + dat[3] = (pkt >> 8); + dat[4] = (pkt >> 16); + + ret = j1939_tp_tx_ctl(session, false, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + j1939_tp_set_rxtimeout(session, 1250); + session->pkt.tx = session->pkt.tx_acked; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_dat(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *skcb; + int offset, pkt_done, pkt_end; + unsigned int len, pdelay; + struct sk_buff *se_skb; + const u8 *tpdat; + int ret = 0; + u8 dat[8]; + + se_skb = j1939_session_skb_find(session); + if (!se_skb) + return -ENOBUFS; + + skcb = j1939_skb_to_cb(se_skb); + tpdat = se_skb->data; + ret = 0; + pkt_done = 0; + if (session->skcb.addr.type != J1939_ETP && + j1939_cb_is_broadcast(&session->skcb)) + pkt_end = session->pkt.total; + else + pkt_end = session->pkt.last; + + while (session->pkt.tx < pkt_end) { + dat[0] = session->pkt.tx - session->pkt.dpo + 1; + offset = (session->pkt.tx * 7) - skcb->offset; + len = se_skb->len - offset; + if (len > 7) + len = 7; + + memcpy(&dat[1], &tpdat[offset], len); + ret = j1939_tp_tx_dat(session, dat, len + 1); + if (ret < 0) { + /* ENOBUS == CAN interface TX queue is full */ + if (ret != -ENOBUFS) + netdev_alert(priv->ndev, + "%s: 0x%p: queue data error: %i\n", + __func__, session, ret); + break; + } + + session->last_txcmd = 0xff; + pkt_done++; + session->pkt.tx++; + pdelay = j1939_cb_is_broadcast(&session->skcb) ? 50 : + j1939_tp_packet_delay; + + if (session->pkt.tx < session->pkt.total && pdelay) { + j1939_tp_schedule_txtimer(session, pdelay); + break; + } + } + + if (pkt_done) + j1939_tp_set_rxtimeout(session, 250); + + return ret; +} + +static int j1939_xtp_txnext_transmiter(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (!j1939_tp_im_transmitter(&session->skcb)) { + netdev_alert(priv->ndev, "%s: 0x%p: called by not transmitter!\n", + __func__, session); + return -EINVAL; + } + + switch (session->last_cmd) { + case 0: + ret = j1939_session_tx_rts(session); + break; + + case J1939_ETP_CMD_CTS: + if (session->last_txcmd != J1939_ETP_CMD_DPO) { + ret = j1939_session_tx_dpo(session); + if (ret) + return ret; + } + + /* fall through */ + case J1939_TP_CMD_CTS: + case 0xff: /* did some data */ + case J1939_ETP_CMD_DPO: + case J1939_TP_CMD_BAM: + ret = j1939_session_tx_dat(session); + + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n", + __func__, session, session->last_cmd); + } + + return ret; +} + +static int j1939_session_tx_cts(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + unsigned int pkt, len; + int ret; + u8 dat[8]; + + if (!j1939_sk_recv_match(priv, &session->skcb)) + return -ENOENT; + + len = session->pkt.total - session->pkt.rx; + len = min3(len, session->pkt.block, j1939_tp_block ?: 255); + memset(dat, 0xff, sizeof(dat)); + + if (session->skcb.addr.type == J1939_ETP) { + pkt = session->pkt.rx + 1; + dat[0] = J1939_ETP_CMD_CTS; + dat[1] = len; + dat[2] = (pkt >> 0); + dat[3] = (pkt >> 8); + dat[4] = (pkt >> 16); + } else { + dat[0] = J1939_TP_CMD_CTS; + dat[1] = len; + dat[2] = session->pkt.rx + 1; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, true, dat); + if (ret < 0) + return ret; + + if (len) + /* only mark cts done when len is set */ + session->last_txcmd = dat[0]; + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_eoma(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + u8 dat[8]; + int ret; + + if (!j1939_sk_recv_match(priv, &session->skcb)) + return -ENOENT; + + memset(dat, 0xff, sizeof(dat)); + + if (session->skcb.addr.type == J1939_ETP) { + dat[0] = J1939_ETP_CMD_EOMA; + dat[1] = session->total_message_size >> 0; + dat[2] = session->total_message_size >> 8; + dat[3] = session->total_message_size >> 16; + dat[4] = session->total_message_size >> 24; + } else { + dat[0] = J1939_TP_CMD_EOMA; + dat[1] = session->total_message_size; + dat[2] = session->total_message_size >> 8; + dat[3] = session->pkt.total; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, true, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + + /* wait for the EOMA packet to come in */ + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%p: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_xtp_txnext_receiver(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (!j1939_tp_im_receiver(&session->skcb)) { + netdev_alert(priv->ndev, "%s: 0x%p: called by not receiver!\n", + __func__, session); + return -EINVAL; + } + + switch (session->last_cmd) { + case J1939_TP_CMD_RTS: + case J1939_ETP_CMD_RTS: + ret = j1939_session_tx_cts(session); + break; + + case J1939_ETP_CMD_CTS: + case J1939_TP_CMD_CTS: + case 0xff: /* did some data */ + case J1939_ETP_CMD_DPO: + if ((session->skcb.addr.type == J1939_TP && + j1939_cb_is_broadcast(&session->skcb))) + break; + + if (session->pkt.rx >= session->pkt.total) { + ret = j1939_session_tx_eoma(session); + } else if (session->pkt.rx >= session->pkt.last) { + session->last_txcmd = 0; + ret = j1939_session_tx_cts(session); + } + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n", + __func__, session, session->last_cmd); + } + + return ret; +} + +static int j1939_simple_txnext(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *se_skb = j1939_session_skb_find(session); + struct sk_buff *skb; + int ret; + + if (!se_skb) + return 0; + + skb = skb_clone(se_skb, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + can_skb_set_owner(skb, se_skb->sk); + + j1939_tp_set_rxtimeout(session, J1939_SIMPLE_ECHO_TIMEOUT_MS); + + ret = j1939_send_one(priv, skb); + if (ret) + return ret; + + j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED); + j1939_sk_queue_activate_next(session); + + return 0; +} + +static bool j1939_session_deactivate_locked(struct j1939_session *session) +{ + bool active = false; + + lockdep_assert_held(&session->priv->active_session_list_lock); + + if (session->state >= J1939_SESSION_ACTIVE && + session->state < J1939_SESSION_ACTIVE_MAX) { + active = true; + + list_del_init(&session->active_session_list_entry); + session->state = J1939_SESSION_DONE; + j1939_session_put(session); + } + + return active; +} + +static bool j1939_session_deactivate(struct j1939_session *session) +{ + bool active; + + j1939_session_list_lock(session->priv); + active = j1939_session_deactivate_locked(session); + j1939_session_list_unlock(session->priv); + + return active; +} + +static void +j1939_session_deactivate_activate_next(struct j1939_session *session) +{ + if (j1939_session_deactivate(session)) + j1939_sk_queue_activate_next(session); +} + +static void j1939_session_cancel(struct j1939_session *session, + enum j1939_xtp_abort err) +{ + struct j1939_priv *priv = session->priv; + + WARN_ON_ONCE(!err); + + session->err = j1939_xtp_abort_to_errno(priv, err); + /* do not send aborts on incoming broadcasts */ + if (!j1939_cb_is_broadcast(&session->skcb)) { + session->state = J1939_SESSION_WAITING_ABORT; + j1939_xtp_tx_abort(priv, &session->skcb, + !session->transmission, + err, session->skcb.addr.pgn); + } + + if (session->sk) + j1939_sk_send_loop_abort(session->sk, session->err); +} + +static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) +{ + struct j1939_session *session = + container_of(hrtimer, struct j1939_session, txtimer); + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (session->skcb.addr.type == J1939_SIMPLE) { + ret = j1939_simple_txnext(session); + } else { + if (session->transmission) + ret = j1939_xtp_txnext_transmiter(session); + else + ret = j1939_xtp_txnext_receiver(session); + } + + switch (ret) { + case -ENOBUFS: + /* Retry limit is currently arbitrary chosen */ + if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) { + session->tx_retry++; + j1939_tp_schedule_txtimer(session, + 10 + prandom_u32_max(16)); + } else { + netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n", + __func__, session); + session->err = -ENETUNREACH; + j1939_session_rxtimer_cancel(session); + j1939_session_deactivate_activate_next(session); + } + break; + case -ENETDOWN: + /* In this case we should get a netdev_event(), all active + * sessions will be cleared by + * j1939_cancel_all_active_sessions(). So handle this as an + * error, but let j1939_cancel_all_active_sessions() do the + * cleanup including propagation of the error to user space. + */ + break; + case 0: + session->tx_retry = 0; + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n", + __func__, session, ret); + if (session->skcb.addr.type != J1939_SIMPLE) { + j1939_tp_set_rxtimeout(session, + J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_OTHER); + } else { + session->err = ret; + j1939_session_rxtimer_cancel(session); + j1939_session_deactivate_activate_next(session); + } + } + + j1939_session_put(session); + + return HRTIMER_NORESTART; +} + +static void j1939_session_completed(struct j1939_session *session) +{ + struct sk_buff *skb; + + if (!session->transmission) { + skb = j1939_session_skb_find(session); + /* distribute among j1939 receivers */ + j1939_sk_recv(session->priv, skb); + } + + j1939_session_deactivate_activate_next(session); +} + +static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer) +{ + struct j1939_session *session = container_of(hrtimer, + struct j1939_session, + rxtimer); + struct j1939_priv *priv = session->priv; + + if (session->state == J1939_SESSION_WAITING_ABORT) { + netdev_alert(priv->ndev, "%s: 0x%p: abort rx timeout. Force session deactivation\n", + __func__, session); + + j1939_session_deactivate_activate_next(session); + + } else if (session->skcb.addr.type == J1939_SIMPLE) { + netdev_alert(priv->ndev, "%s: 0x%p: Timeout. Failed to send simple message.\n", + __func__, session); + + /* The message is probably stuck in the CAN controller and can + * be send as soon as CAN bus is in working state again. + */ + session->err = -ETIME; + j1939_session_deactivate(session); + } else { + netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n", + __func__, session); + + j1939_session_list_lock(session->priv); + if (session->state >= J1939_SESSION_ACTIVE && + session->state < J1939_SESSION_ACTIVE_MAX) { + j1939_session_get(session); + hrtimer_start(&session->rxtimer, + ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS), + HRTIMER_MODE_REL_SOFT); + j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT); + } + j1939_session_list_unlock(session->priv); + } + + j1939_session_put(session); + + return HRTIMER_NORESTART; +} + +static bool j1939_xtp_rx_cmd_bad_pgn(struct j1939_session *session, + const struct sk_buff *skb) +{ + const struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + pgn_t pgn = j1939_xtp_ctl_to_pgn(skb->data); + struct j1939_priv *priv = session->priv; + enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT; + u8 cmd = skb->data[0]; + + if (session->skcb.addr.pgn == pgn) + return false; + + switch (cmd) { + case J1939_TP_CMD_BAM: + abort = J1939_XTP_NO_ABORT; + break; + + case J1939_ETP_CMD_RTS: + case J1939_TP_CMD_RTS: /* fall through */ + abort = J1939_XTP_ABORT_BUSY; + break; + + case J1939_ETP_CMD_CTS: + case J1939_TP_CMD_CTS: /* fall through */ + abort = J1939_XTP_ABORT_ECTS_UNXPECTED_PGN; + break; + + case J1939_ETP_CMD_DPO: + abort = J1939_XTP_ABORT_BAD_EDPO_PGN; + break; + + case J1939_ETP_CMD_EOMA: + case J1939_TP_CMD_EOMA: /* fall through */ + abort = J1939_XTP_ABORT_OTHER; + break; + + case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */ + abort = J1939_XTP_NO_ABORT; + break; + + default: + WARN_ON_ONCE(1); + break; + } + + netdev_warn(priv->ndev, "%s: 0x%p: CMD 0x%02x with PGN 0x%05x for running session with different PGN 0x%05x.\n", + __func__, session, cmd, pgn, session->skcb.addr.pgn); + if (abort != J1939_XTP_NO_ABORT) + j1939_xtp_tx_abort(priv, skcb, true, abort, pgn); + + return true; +} + +static void j1939_xtp_rx_abort_one(struct j1939_priv *priv, struct sk_buff *skb, + bool reverse, bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + u8 abort = skb->data[1]; + + session = j1939_session_get_by_addr(priv, &skcb->addr, reverse, + transmitter); + if (!session) + return; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + goto abort_put; + + netdev_info(priv->ndev, "%s: 0x%p: 0x%05x: (%u) %s\n", __func__, + session, j1939_xtp_ctl_to_pgn(skb->data), abort, + j1939_xtp_abort_to_str(abort)); + + j1939_session_timers_cancel(session); + session->err = j1939_xtp_abort_to_errno(priv, abort); + if (session->sk) + j1939_sk_send_loop_abort(session->sk, session->err); + j1939_session_deactivate_activate_next(session); + +abort_put: + j1939_session_put(session); +} + +/* abort packets may come in 2 directions */ +static void +j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + j1939_xtp_rx_abort_one(priv, skb, false, transmitter); + j1939_xtp_rx_abort_one(priv, skb, true, transmitter); +} + +static void +j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb) +{ + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + session->pkt.tx_acked = session->pkt.total; + j1939_session_timers_cancel(session); + /* transmitted without problems */ + j1939_session_completed(session); +} + +static void +j1939_xtp_rx_eoma(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, true, + transmitter); + if (!session) + return; + + j1939_xtp_rx_eoma_one(session, skb); + j1939_session_put(session); +} + +static void +j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb) +{ + enum j1939_xtp_abort err = J1939_XTP_ABORT_FAULT; + unsigned int pkt; + const u8 *dat; + + dat = skb->data; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + if (session->last_cmd == dat[0]) { + err = J1939_XTP_ABORT_DUP_SEQ; + goto out_session_cancel; + } + + if (session->skcb.addr.type == J1939_ETP) + pkt = j1939_etp_ctl_to_packet(dat); + else + pkt = dat[2]; + + if (!pkt) + goto out_session_cancel; + else if (dat[1] > session->pkt.block /* 0xff for etp */) + goto out_session_cancel; + + /* set packet counters only when not CTS(0) */ + session->pkt.tx_acked = pkt - 1; + j1939_session_skb_drop_old(session); + session->pkt.last = session->pkt.tx_acked + dat[1]; + if (session->pkt.last > session->pkt.total) + /* safety measure */ + session->pkt.last = session->pkt.total; + /* TODO: do not set tx here, do it in txtimer */ + session->pkt.tx = session->pkt.tx_acked; + + session->last_cmd = dat[0]; + if (dat[1]) { + j1939_tp_set_rxtimeout(session, 1250); + if (session->transmission) { + if (session->pkt.tx_acked) + j1939_sk_errqueue(session, + J1939_ERRQUEUE_SCHED); + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + } else { + /* CTS(0) */ + j1939_tp_set_rxtimeout(session, 550); + } + return; + + out_session_cancel: + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, err); +} + +static void +j1939_xtp_rx_cts(struct j1939_priv *priv, struct sk_buff *skb, bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, true, + transmitter); + if (!session) + return; + j1939_xtp_rx_cts_one(session, skb); + j1939_session_put(session); +} + +static struct j1939_session *j1939_session_new(struct j1939_priv *priv, + struct sk_buff *skb, size_t size) +{ + struct j1939_session *session; + struct j1939_sk_buff_cb *skcb; + + session = kzalloc(sizeof(*session), gfp_any()); + if (!session) + return NULL; + + INIT_LIST_HEAD(&session->active_session_list_entry); + INIT_LIST_HEAD(&session->sk_session_queue_entry); + kref_init(&session->kref); + + j1939_priv_get(priv); + session->priv = priv; + session->total_message_size = size; + session->state = J1939_SESSION_NEW; + + skb_queue_head_init(&session->skb_queue); + skb_queue_tail(&session->skb_queue, skb); + + skcb = j1939_skb_to_cb(skb); + memcpy(&session->skcb, skcb, sizeof(session->skcb)); + + hrtimer_init(&session->txtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_SOFT); + session->txtimer.function = j1939_tp_txtimer; + hrtimer_init(&session->rxtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_SOFT); + session->rxtimer.function = j1939_tp_rxtimer; + + netdev_dbg(priv->ndev, "%s: 0x%p: sa: %02x, da: %02x\n", + __func__, session, skcb->addr.sa, skcb->addr.da); + + return session; +} + +static struct +j1939_session *j1939_session_fresh_new(struct j1939_priv *priv, + int size, + const struct j1939_sk_buff_cb *rel_skcb) +{ + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + struct j1939_session *session; + + skb = alloc_skb(size + sizeof(struct can_skb_priv), GFP_ATOMIC); + if (unlikely(!skb)) + return NULL; + + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + skcb = j1939_skb_to_cb(skb); + memcpy(skcb, rel_skcb, sizeof(*skcb)); + + session = j1939_session_new(priv, skb, skb->len); + if (!session) { + kfree_skb(skb); + return NULL; + } + + /* alloc data area */ + skb_put(skb, size); + /* skb is recounted in j1939_session_new() */ + return session; +} + +int j1939_session_activate(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct j1939_session *active = NULL; + int ret = 0; + + j1939_session_list_lock(priv); + if (session->skcb.addr.type != J1939_SIMPLE) + active = j1939_session_get_by_addr_locked(priv, + &priv->active_session_list, + &session->skcb.addr, false, + session->transmission); + if (active) { + j1939_session_put(active); + ret = -EAGAIN; + } else { + WARN_ON_ONCE(session->state != J1939_SESSION_NEW); + list_add_tail(&session->active_session_list_entry, + &priv->active_session_list); + j1939_session_get(session); + session->state = J1939_SESSION_ACTIVE; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", + __func__, session); + } + j1939_session_list_unlock(priv); + + return ret; +} + +static struct +j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv, + struct sk_buff *skb) +{ + enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT; + struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb); + struct j1939_session *session; + const u8 *dat; + pgn_t pgn; + int len; + + netdev_dbg(priv->ndev, "%s\n", __func__); + + dat = skb->data; + pgn = j1939_xtp_ctl_to_pgn(dat); + skcb.addr.pgn = pgn; + + if (!j1939_sk_recv_match(priv, &skcb)) + return NULL; + + if (skcb.addr.type == J1939_ETP) { + len = j1939_etp_ctl_to_size(dat); + if (len > J1939_MAX_ETP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + else if (len > priv->tp_max_packet_size) + abort = J1939_XTP_ABORT_RESOURCE; + else if (len <= J1939_MAX_TP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + } else { + len = j1939_tp_ctl_to_size(dat); + if (len > J1939_MAX_TP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + else if (len > priv->tp_max_packet_size) + abort = J1939_XTP_ABORT_RESOURCE; + } + + if (abort != J1939_XTP_NO_ABORT) { + j1939_xtp_tx_abort(priv, &skcb, true, abort, pgn); + return NULL; + } + + session = j1939_session_fresh_new(priv, len, &skcb); + if (!session) { + j1939_xtp_tx_abort(priv, &skcb, true, + J1939_XTP_ABORT_RESOURCE, pgn); + return NULL; + } + + /* initialize the control buffer: plain copy */ + session->pkt.total = (len + 6) / 7; + session->pkt.block = 0xff; + if (skcb.addr.type != J1939_ETP) { + if (dat[3] != session->pkt.total) + netdev_alert(priv->ndev, "%s: 0x%p: strange total, %u != %u\n", + __func__, session, session->pkt.total, + dat[3]); + session->pkt.total = dat[3]; + session->pkt.block = min(dat[3], dat[4]); + } + + session->pkt.rx = 0; + session->pkt.tx = 0; + + WARN_ON_ONCE(j1939_session_activate(session)); + + return session; +} + +static int j1939_xtp_rx_rts_session_active(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_priv *priv = session->priv; + + if (!session->transmission) { + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return -EBUSY; + + /* RTS on active session */ + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_BUSY); + } + + if (session->last_cmd != 0) { + /* we received a second rts on the same connection */ + netdev_alert(priv->ndev, "%s: 0x%p: connection exists (%02x %02x). last cmd: %x\n", + __func__, session, skcb->addr.sa, skcb->addr.da, + session->last_cmd); + + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_BUSY); + + return -EBUSY; + } + + if (session->skcb.addr.sa != skcb->addr.sa || + session->skcb.addr.da != skcb->addr.da) + netdev_warn(priv->ndev, "%s: 0x%p: session->skcb.addr.sa=0x%02x skcb->addr.sa=0x%02x session->skcb.addr.da=0x%02x skcb->addr.da=0x%02x\n", + __func__, session, + session->skcb.addr.sa, skcb->addr.sa, + session->skcb.addr.da, skcb->addr.da); + /* make sure 'sa' & 'da' are correct ! + * They may be 'not filled in yet' for sending + * skb's, since they did not pass the Address Claim ever. + */ + session->skcb.addr.sa = skcb->addr.sa; + session->skcb.addr.da = skcb->addr.da; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + u8 cmd = skb->data[0]; + + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + transmitter); + + if (!session) { + if (transmitter) { + /* If we're the transmitter and this function is called, + * we received our own RTS. A session has already been + * created. + * + * For some reasons however it might have been destroyed + * already. So don't create a new one here (using + * "j1939_xtp_rx_rts_session_new()") as this will be a + * receiver session. + * + * The reasons the session is already destroyed might + * be: + * - user space closed socket was and the session was + * aborted + * - session was aborted due to external abort message + */ + return; + } + session = j1939_xtp_rx_rts_session_new(priv, skb); + if (!session) + return; + } else { + if (j1939_xtp_rx_rts_session_active(session, skb)) { + j1939_session_put(session); + return; + } + } + session->last_cmd = cmd; + + j1939_tp_set_rxtimeout(session, 1250); + + if (cmd != J1939_TP_CMD_BAM && !session->transmission) { + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + + j1939_session_put(session); +} + +static void j1939_xtp_rx_dpo_one(struct j1939_session *session, + struct sk_buff *skb) +{ + const u8 *dat = skb->data; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + /* transmitted without problems */ + session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data); + session->last_cmd = dat[0]; + j1939_tp_set_rxtimeout(session, 750); +} + +static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + transmitter); + if (!session) { + netdev_info(priv->ndev, + "%s: no connection found\n", __func__); + return; + } + + j1939_xtp_rx_dpo_one(session, skb); + j1939_session_put(session); +} + +static void j1939_xtp_rx_dat_one(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *skcb; + struct sk_buff *se_skb; + const u8 *dat; + u8 *tpdat; + int offset; + int nbytes; + bool final = false; + bool do_cts_eoma = false; + int packet; + + skcb = j1939_skb_to_cb(skb); + dat = skb->data; + if (skb->len <= 1) + /* makes no sense */ + goto out_session_cancel; + + switch (session->last_cmd) { + case 0xff: + break; + case J1939_ETP_CMD_DPO: + if (skcb->addr.type == J1939_ETP) + break; + /* fall through */ + case J1939_TP_CMD_BAM: /* fall through */ + case J1939_TP_CMD_CTS: /* fall through */ + if (skcb->addr.type != J1939_ETP) + break; + /* fall through */ + default: + netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__, + session, session->last_cmd); + goto out_session_cancel; + } + + packet = (dat[0] - 1 + session->pkt.dpo); + if (packet > session->pkt.total || + (session->pkt.rx + 1) > session->pkt.total) { + netdev_info(priv->ndev, "%s: 0x%p: should have been completed\n", + __func__, session); + goto out_session_cancel; + } + se_skb = j1939_session_skb_find(session); + if (!se_skb) { + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__, + session); + goto out_session_cancel; + } + + skcb = j1939_skb_to_cb(se_skb); + offset = packet * 7 - skcb->offset; + nbytes = se_skb->len - offset; + if (nbytes > 7) + nbytes = 7; + if (nbytes <= 0 || (nbytes + 1) > skb->len) { + netdev_info(priv->ndev, "%s: 0x%p: nbytes %i, len %i\n", + __func__, session, nbytes, skb->len); + goto out_session_cancel; + } + + tpdat = se_skb->data; + memcpy(&tpdat[offset], &dat[1], nbytes); + if (packet == session->pkt.rx) + session->pkt.rx++; + + if (skcb->addr.type != J1939_ETP && + j1939_cb_is_broadcast(&session->skcb)) { + if (session->pkt.rx >= session->pkt.total) + final = true; + } else { + /* never final, an EOMA must follow */ + if (session->pkt.rx >= session->pkt.last) + do_cts_eoma = true; + } + + if (final) { + j1939_session_completed(session); + } else if (do_cts_eoma) { + j1939_tp_set_rxtimeout(session, 1250); + if (!session->transmission) + j1939_tp_schedule_txtimer(session, 0); + } else { + j1939_tp_set_rxtimeout(session, 250); + } + session->last_cmd = 0xff; + j1939_session_put(session); + + return; + + out_session_cancel: + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_FAULT); + j1939_session_put(session); +} + +static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb; + struct j1939_session *session; + + skcb = j1939_skb_to_cb(skb); + + if (j1939_tp_im_transmitter(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + true); + if (!session) + netdev_info(priv->ndev, "%s: no tx connection found\n", + __func__); + else + j1939_xtp_rx_dat_one(session, skb); + } + + if (j1939_tp_im_receiver(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + false); + if (!session) + netdev_info(priv->ndev, "%s: no rx connection found\n", + __func__); + else + j1939_xtp_rx_dat_one(session, skb); + } +} + +/* j1939 main intf */ +struct j1939_session *j1939_tp_send(struct j1939_priv *priv, + struct sk_buff *skb, size_t size) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + int ret; + + if (skcb->addr.pgn == J1939_TP_PGN_DAT || + skcb->addr.pgn == J1939_TP_PGN_CTL || + skcb->addr.pgn == J1939_ETP_PGN_DAT || + skcb->addr.pgn == J1939_ETP_PGN_CTL) + /* avoid conflict */ + return ERR_PTR(-EDOM); + + if (size > priv->tp_max_packet_size) + return ERR_PTR(-EMSGSIZE); + + if (size <= 8) + skcb->addr.type = J1939_SIMPLE; + else if (size > J1939_MAX_TP_PACKET_SIZE) + skcb->addr.type = J1939_ETP; + else + skcb->addr.type = J1939_TP; + + if (skcb->addr.type == J1939_ETP && + j1939_cb_is_broadcast(skcb)) + return ERR_PTR(-EDESTADDRREQ); + + /* fill in addresses from names */ + ret = j1939_ac_fixup(priv, skb); + if (unlikely(ret)) + return ERR_PTR(ret); + + /* fix DST flags, it may be used there soon */ + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + + /* src is always local, I'm sending ... */ + skcb->flags |= J1939_ECU_LOCAL_SRC; + + /* prepare new session */ + session = j1939_session_new(priv, skb, size); + if (!session) + return ERR_PTR(-ENOMEM); + + /* skb is recounted in j1939_session_new() */ + session->sk = skb->sk; + session->transmission = true; + session->pkt.total = (size + 6) / 7; + session->pkt.block = skcb->addr.type == J1939_ETP ? 255 : + min(j1939_tp_block ?: 255, session->pkt.total); + + if (j1939_cb_is_broadcast(&session->skcb)) + /* set the end-packet for broadcast */ + session->pkt.last = session->pkt.total; + + skcb->tskey = session->sk->sk_tskey++; + session->tskey = skcb->tskey; + + return session; +} + +static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int extd = J1939_TP; + u8 cmd = skb->data[0]; + + switch (cmd) { + case J1939_ETP_CMD_RTS: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_BAM: /* fall through */ + case J1939_TP_CMD_RTS: /* fall through */ + if (skcb->addr.type != extd) + return; + + if (cmd == J1939_TP_CMD_RTS && j1939_cb_is_broadcast(skcb)) { + netdev_alert(priv->ndev, "%s: rts without destination (%02x)\n", + __func__, skcb->addr.sa); + return; + } + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_rts(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_rts(priv, skb, false); + + break; + + case J1939_ETP_CMD_CTS: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_CTS: + if (skcb->addr.type != extd) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_cts(priv, skb, false); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_cts(priv, skb, true); + + break; + + case J1939_ETP_CMD_DPO: + if (skcb->addr.type != J1939_ETP) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_dpo(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_dpo(priv, skb, false); + + break; + + case J1939_ETP_CMD_EOMA: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_EOMA: + if (skcb->addr.type != extd) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_eoma(priv, skb, false); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_eoma(priv, skb, true); + + break; + + case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */ + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_abort(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_abort(priv, skb, false); + + break; + default: + return; + } +} + +int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + + if (!j1939_tp_im_involved_anydir(skcb)) + return 0; + + switch (skcb->addr.pgn) { + case J1939_ETP_PGN_DAT: + skcb->addr.type = J1939_ETP; + /* fall through */ + case J1939_TP_PGN_DAT: + j1939_xtp_rx_dat(priv, skb); + break; + + case J1939_ETP_PGN_CTL: + skcb->addr.type = J1939_ETP; + /* fall through */ + case J1939_TP_PGN_CTL: + if (skb->len < 8) + return 0; /* Don't care. Nothing to extract here */ + + j1939_tp_cmd_recv(priv, skb); + break; + default: + return 0; /* no problem */ + } + return 1; /* "I processed the message" */ +} + +void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_session *session; + + if (!skb->sk) + return; + + j1939_session_list_lock(priv); + session = j1939_session_get_simple(priv, skb); + j1939_session_list_unlock(priv); + if (!session) { + netdev_warn(priv->ndev, + "%s: Received already invalidated message\n", + __func__); + return; + } + + j1939_session_timers_cancel(session); + j1939_session_deactivate(session); + j1939_session_put(session); +} + +int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk) +{ + struct j1939_session *session, *saved; + + netdev_dbg(priv->ndev, "%s, sk: %p\n", __func__, sk); + j1939_session_list_lock(priv); + list_for_each_entry_safe(session, saved, + &priv->active_session_list, + active_session_list_entry) { + if (!sk || sk == session->sk) { + j1939_session_timers_cancel(session); + session->err = ESHUTDOWN; + j1939_session_deactivate_locked(session); + } + } + j1939_session_list_unlock(priv); + return NOTIFY_DONE; +} + +void j1939_tp_init(struct j1939_priv *priv) +{ + spin_lock_init(&priv->active_session_list_lock); + INIT_LIST_HEAD(&priv->active_session_list); + priv->tp_max_packet_size = J1939_MAX_ETP_PACKET_SIZE; +} -- cgit v1.2.3-59-g8ed1b From df69ba43217d3cf4215c83c0627ce98a26e56e7c Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 3 Sep 2019 15:28:04 -0700 Subject: ionic: Add basic framework for IONIC Network device driver This patch adds a basic driver framework for the Pensando IONIC network device. There is no functionality right now other than the ability to load and unload. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- Documentation/networking/device_drivers/index.rst | 1 + .../networking/device_drivers/pensando/ionic.rst | 43 ++++++++++++++++ MAINTAINERS | 8 +++ drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/pensando/Kconfig | 32 ++++++++++++ drivers/net/ethernet/pensando/Makefile | 6 +++ drivers/net/ethernet/pensando/ionic/Makefile | 6 +++ drivers/net/ethernet/pensando/ionic/ionic.h | 27 ++++++++++ drivers/net/ethernet/pensando/ionic/ionic_bus.h | 10 ++++ .../net/ethernet/pensando/ionic/ionic_bus_pci.c | 58 ++++++++++++++++++++++ .../net/ethernet/pensando/ionic/ionic_devlink.c | 35 +++++++++++++ .../net/ethernet/pensando/ionic/ionic_devlink.h | 12 +++++ drivers/net/ethernet/pensando/ionic/ionic_main.c | 32 ++++++++++++ 14 files changed, 272 insertions(+) create mode 100644 Documentation/networking/device_drivers/pensando/ionic.rst create mode 100644 drivers/net/ethernet/pensando/Kconfig create mode 100644 drivers/net/ethernet/pensando/Makefile create mode 100644 drivers/net/ethernet/pensando/ionic/Makefile create mode 100644 drivers/net/ethernet/pensando/ionic/ionic.h create mode 100644 drivers/net/ethernet/pensando/ionic/ionic_bus.h create mode 100644 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c create mode 100644 drivers/net/ethernet/pensando/ionic/ionic_devlink.c create mode 100644 drivers/net/ethernet/pensando/ionic/ionic_devlink.h create mode 100644 drivers/net/ethernet/pensando/ionic/ionic_main.c (limited to 'MAINTAINERS') diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 2b7fefe72351..57fec66c5419 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -23,6 +23,7 @@ Contents: intel/ice google/gve mellanox/mlx5 + pensando/ionic .. only:: subproject diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/pensando/ionic.rst new file mode 100644 index 000000000000..67b6839d516b --- /dev/null +++ b/Documentation/networking/device_drivers/pensando/ionic.rst @@ -0,0 +1,43 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +========================================================== +Linux* Driver for the Pensando(R) Ethernet adapter family +========================================================== + +Pensando Linux Ethernet driver. +Copyright(c) 2019 Pensando Systems, Inc + +Contents +======== + +- Identifying the Adapter +- Support + +Identifying the Adapter +======================= + +To find if one or more Pensando PCI Ethernet devices are installed on the +host, check for the PCI devices:: + + $ lspci -d 1dd8: + b5:00.0 Ethernet controller: Device 1dd8:1002 + b6:00.0 Ethernet controller: Device 1dd8:1002 + +If such devices are listed as above, then the ionic.ko driver should find +and configure them for use. There should be log entries in the kernel +messages such as these:: + + $ dmesg | grep ionic + ionic Pensando Ethernet NIC Driver, ver 0.15.0-k + ionic 0000:b5:00.0 enp181s0: renamed from eth0 + ionic 0000:b6:00.0 enp182s0: renamed from eth0 + +Support +======= +For general Linux networking support, please use the netdev mailing +list, which is monitored by Pensando personnel:: + netdev@vger.kernel.org + +For more specific support needs, please use the Pensando driver support +email:: + drivers@pensando.io diff --git a/MAINTAINERS b/MAINTAINERS index a081c477d1d1..c1fb8fb3b2ee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12608,6 +12608,14 @@ L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/peaq-wmi.c +PENSANDO ETHERNET DRIVERS +M: Shannon Nelson +M: Pensando Drivers +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/device_drivers/pensando/ionic.rst +F: drivers/net/ethernet/pensando/ + PER-CPU MEMORY ALLOCATOR M: Dennis Zhou M: Tejun Heo diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 93a2d4deb27c..2830dc283ce6 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -168,6 +168,7 @@ config ETHOC source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" +source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index fb9155cffcff..061edd22f507 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -97,3 +97,4 @@ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ +obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig new file mode 100644 index 000000000000..5ea570be8379 --- /dev/null +++ b/drivers/net/ethernet/pensando/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 Pensando Systems, Inc +# +# Pensando device configuration +# + +config NET_VENDOR_PENSANDO + bool "Pensando devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Pensando cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_PENSANDO + +config IONIC + tristate "Pensando Ethernet IONIC Support" + depends on 64BIT && PCI + help + This enables the support for the Pensando family of Ethernet + adapters. More specific information on this driver can be + found in + . + + To compile this driver as a module, choose M here. The module + will be called ionic. + +endif # NET_VENDOR_PENSANDO diff --git a/drivers/net/ethernet/pensando/Makefile b/drivers/net/ethernet/pensando/Makefile new file mode 100644 index 000000000000..21ce7499c122 --- /dev/null +++ b/drivers/net/ethernet/pensando/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Pensando network device drivers. +# + +obj-$(CONFIG_IONIC) += ionic/ diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile new file mode 100644 index 000000000000..f174e8f7bce1 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2017 - 2019 Pensando Systems, Inc + +obj-$(CONFIG_IONIC) := ionic.o + +ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h new file mode 100644 index 000000000000..56ccf27b7571 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +#include "ionic_devlink.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION "0.15.0-k" + +#define PCI_VENDOR_ID_PENSANDO 0x1dd8 + +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 + +#define IONIC_SUBDEV_ID_NAPLES_25 0x4000 +#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001 +#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002 + +struct ionic { + struct pci_dev *pdev; + struct device *dev; +}; + +#endif /* _IONIC_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus.h b/drivers/net/ethernet/pensando/ionic/ionic_bus.h new file mode 100644 index 000000000000..94ba0afc6f38 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_BUS_H_ +#define _IONIC_BUS_H_ + +int ionic_bus_register_driver(void); +void ionic_bus_unregister_driver(void); + +#endif /* _IONIC_BUS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c new file mode 100644 index 000000000000..2946ce37b06c --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" + +/* Supported devices */ +static const struct pci_device_id ionic_id_table[] = { + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) }, + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) }, + { 0, } /* end of table */ +}; +MODULE_DEVICE_TABLE(pci, ionic_id_table); + +static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ionic *ionic; + + ionic = ionic_devlink_alloc(dev); + if (!ionic) + return -ENOMEM; + + ionic->pdev = pdev; + ionic->dev = dev; + pci_set_drvdata(pdev, ionic); + + return 0; +} + +static void ionic_remove(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + + ionic_devlink_free(ionic); +} + +static struct pci_driver ionic_driver = { + .name = IONIC_DRV_NAME, + .id_table = ionic_id_table, + .probe = ionic_probe, + .remove = ionic_remove, +}; + +int ionic_bus_register_driver(void) +{ + return pci_register_driver(&ionic_driver); +} + +void ionic_bus_unregister_driver(void) +{ + pci_unregister_driver(&ionic_driver); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c new file mode 100644 index 000000000000..d2665cee517a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_devlink.h" + +static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + return devlink_info_driver_name_put(req, IONIC_DRV_NAME); +} + +static const struct devlink_ops ionic_dl_ops = { + .info_get = ionic_dl_info_get, +}; + +struct ionic *ionic_devlink_alloc(struct device *dev) +{ + struct devlink *dl; + + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic)); + + return devlink_priv(dl); +} + +void ionic_devlink_free(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_free(dl); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h new file mode 100644 index 000000000000..1df50874260a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEVLINK_H_ +#define _IONIC_DEVLINK_H_ + +#include + +struct ionic *ionic_devlink_alloc(struct device *dev); +void ionic_devlink_free(struct ionic *ionic); + +#endif /* _IONIC_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c new file mode 100644 index 000000000000..332b528ce921 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include +#include +#include +#include + +#include "ionic.h" +#include "ionic_bus.h" + +MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION); +MODULE_AUTHOR("Pensando Systems, Inc"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IONIC_DRV_VERSION); + +static int __init ionic_init_module(void) +{ + pr_info("%s %s, ver %s\n", + IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION); + return ionic_bus_register_driver(); +} + +static void __exit ionic_cleanup_module(void) +{ + ionic_bus_unregister_driver(); + + pr_info("%s removed\n", IONIC_DRV_NAME); +} + +module_init(ionic_init_module); +module_exit(ionic_cleanup_module); -- cgit v1.2.3-59-g8ed1b From 655e023ed49d4f8a193945c3302784773b0ded95 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Fri, 13 Sep 2019 13:47:27 +0100 Subject: MAINTAINERS: xen-netback: update my email address My Citrix email address will expire shortly. Signed-off-by: Paul Durrant Acked-by: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a50e97a63bc8..63ab65dbc966 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17646,7 +17646,7 @@ F: Documentation/ABI/testing/sysfs-hypervisor-xen XEN NETWORK BACKEND DRIVER M: Wei Liu -M: Paul Durrant +M: Paul Durrant L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Supported -- cgit v1.2.3-59-g8ed1b From 28c9eb9042a954d4e9fbec91484bddce280f1beb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 13 Sep 2019 16:28:17 +0300 Subject: net/wan: dscc4: remove broken dscc4 driver Using static analysis, I discovered that the "dpriv->pci_priv->pdev" pointer is always NULL. This pointer was supposed to be initialized during probe and is essential for the driver to work. It would be easy to add a "ppriv->pdev = pdev;" to dscc4_found1() but this driver has been broken since before we started using git and no one has complained so probably we should just remove it. Signed-off-by: Dan Carpenter Acked-by: Francois Romieu Signed-off-by: David S. Miller --- MAINTAINERS | 6 - drivers/net/wan/Kconfig | 14 - drivers/net/wan/Makefile | 1 - drivers/net/wan/dscc4.c | 2057 ---------------------------------------------- 4 files changed, 2078 deletions(-) delete mode 100644 drivers/net/wan/dscc4.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 16fc09defd39..32f4f05fb3da 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5582,12 +5582,6 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/dsbr100.c -DSCC4 DRIVER -M: Francois Romieu -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/wan/dscc4.c - DT3155 MEDIA DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 09fdd619ac67..dd1a147f2971 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -267,20 +267,6 @@ config FARSYNC To compile this driver as a module, choose M here: the module will be called farsync. -config DSCC4 - tristate "Etinc PCISYNC serial board support" - depends on HDLC && PCI && m - help - Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens) - DSCC4 chipset. - - This is supposed to work with the four port card. Take a look at - for further information about the - driver. - - To compile this driver as a module, choose M here: the - module will be called dscc4. - config FSL_UCC_HDLC tristate "Freescale QUICC Engine HDLC support" depends on HDLC diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 9532e69fda87..701f5d2fe3b6 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o -obj-$(CONFIG_DSCC4) += dscc4.o obj-$(CONFIG_X25_ASY) += x25_asy.o obj-$(CONFIG_LANMEDIA) += lmc/ diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c deleted file mode 100644 index fa78d2b14136..000000000000 --- a/drivers/net/wan/dscc4.c +++ /dev/null @@ -1,2057 +0,0 @@ -/* - * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux - * - * This software may be used and distributed according to the terms of the - * GNU General Public License. - * - * The author may be reached as romieu@cogenit.fr. - * Specific bug reports/asian food will be welcome. - * - * Special thanks to the nice people at CS-Telecom for the hardware and the - * access to the test/measure tools. - * - * - * Theory of Operation - * - * I. Board Compatibility - * - * This device driver is designed for the Siemens PEB20534 4 ports serial - * controller as found on Etinc PCISYNC cards. The documentation for the - * chipset is available at http://www.infineon.com: - * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with - * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1"; - * - Application Hint "Management of DSCC4 on-chip FIFO resources". - * - Errata sheet DS5 (courtesy of Michael Skerritt). - * Jens David has built an adapter based on the same chipset. Take a look - * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific - * driver. - * Sample code (2 revisions) is available at Infineon. - * - * II. Board-specific settings - * - * Pcisync can transmit some clock signal to the outside world on the - * *first two* ports provided you put a quartz and a line driver on it and - * remove the jumpers. The operation is described on Etinc web site. If you - * go DCE on these ports, don't forget to use an adequate cable. - * - * Sharing of the PCI interrupt line for this board is possible. - * - * III. Driver operation - * - * The rx/tx operations are based on a linked list of descriptors. The driver - * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more - * I tried to fix it, the more it started to look like (convoluted) software - * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider - * this a rfc2119 MUST. - * - * Tx direction - * When the tx ring is full, the xmit routine issues a call to netdev_stop. - * The device is supposed to be enabled again during an ALLS irq (we could - * use HI but as it's easy to lose events, it's fscked). - * - * Rx direction - * The received frames aren't supposed to span over multiple receiving areas. - * I may implement it some day but it isn't the highest ranked item. - * - * IV. Notes - * The current error (XDU, RFO) recovery code is untested. - * So far, RDO takes his RX channel down and the right sequence to enable it - * again is still a mystery. If RDO happens, plan a reboot. More details - * in the code (NB: as this happens, TX still works). - * Don't mess the cables during operation, especially on DTE ports. I don't - * suggest it for DCE either but at least one can get some messages instead - * of a complete instant freeze. - * Tests are done on Rev. 20 of the silicium. The RDO handling changes with - * the documentation/chipset releases. - * - * TODO: - * - test X25. - * - use polling at high irq/s, - * - performance analysis, - * - endianness. - * - * 2001/12/10 Daniela Squassoni - * - Contribution to support the new generic HDLC layer. - * - * 2002/01 Ueimor - * - old style interface removal - * - dscc4_release_ring fix (related to DMA mapping) - * - hard_start_xmit fix (hint: TxSizeMax) - * - misc crapectomy. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* Version */ -static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; -static int debug; -static int quartz; - -#ifdef CONFIG_DSCC4_PCI_RST -static DEFINE_MUTEX(dscc4_mutex); -static u32 dscc4_pci_config_store[16]; -#endif - -#define DRV_NAME "dscc4" - -#undef DSCC4_POLLING - -/* Module parameters */ - -MODULE_AUTHOR("Maintainer: Francois Romieu "); -MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); -MODULE_LICENSE("GPL"); -module_param(debug, int, 0); -MODULE_PARM_DESC(debug,"Enable/disable extra messages"); -module_param(quartz, int, 0); -MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)"); - -/* Structures */ - -struct thingie { - int define; - u32 bits; -}; - -struct TxFD { - __le32 state; - __le32 next; - __le32 data; - __le32 complete; - u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */ - /* FWIW, datasheet calls that "dummy" and says that card - * never looks at it; neither does the driver */ -}; - -struct RxFD { - __le32 state1; - __le32 next; - __le32 data; - __le32 state2; - __le32 end; -}; - -#define DUMMY_SKB_SIZE 64 -#define TX_LOW 8 -#define TX_RING_SIZE 32 -#define RX_RING_SIZE 32 -#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD) -#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD) -#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */ -#define TX_TIMEOUT (HZ/10) -#define DSCC4_HZ_MAX 33000000 -#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */ -#define dev_per_card 4 -#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */ - -#define SOURCE_ID(flags) (((flags) >> 28) & 0x03) -#define TO_SIZE(state) (((state) >> 16) & 0x1fff) - -/* - * Given the operating range of Linux HDLC, the 2 defines below could be - * made simpler. However they are a fine reminder for the limitations of - * the driver: it's better to stay < TxSizeMax and < RxSizeMax. - */ -#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16) -#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16) -#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */ -#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET) - -struct dscc4_pci_priv { - __le32 *iqcfg; - int cfg_cur; - spinlock_t lock; - struct pci_dev *pdev; - - struct dscc4_dev_priv *root; - dma_addr_t iqcfg_dma; - u32 xtal_hz; -}; - -struct dscc4_dev_priv { - struct sk_buff *rx_skbuff[RX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - - struct RxFD *rx_fd; - struct TxFD *tx_fd; - __le32 *iqrx; - __le32 *iqtx; - - /* FIXME: check all the volatile are required */ - volatile u32 tx_current; - u32 rx_current; - u32 iqtx_current; - u32 iqrx_current; - - volatile u32 tx_dirty; - volatile u32 ltda; - u32 rx_dirty; - u32 lrda; - - dma_addr_t tx_fd_dma; - dma_addr_t rx_fd_dma; - dma_addr_t iqtx_dma; - dma_addr_t iqrx_dma; - - u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */ - - struct dscc4_pci_priv *pci_priv; - spinlock_t lock; - - int dev_id; - volatile u32 flags; - u32 timer_help; - - unsigned short encoding; - unsigned short parity; - struct net_device *dev; - sync_serial_settings settings; - void __iomem *base_addr; - u32 __pad __attribute__ ((aligned (4))); -}; - -/* GLOBAL registers definitions */ -#define GCMDR 0x00 -#define GSTAR 0x04 -#define GMODE 0x08 -#define IQLENR0 0x0C -#define IQLENR1 0x10 -#define IQRX0 0x14 -#define IQTX0 0x24 -#define IQCFG 0x3c -#define FIFOCR1 0x44 -#define FIFOCR2 0x48 -#define FIFOCR3 0x4c -#define FIFOCR4 0x34 -#define CH0CFG 0x50 -#define CH0BRDA 0x54 -#define CH0BTDA 0x58 -#define CH0FRDA 0x98 -#define CH0FTDA 0xb0 -#define CH0LRDA 0xc8 -#define CH0LTDA 0xe0 - -/* SCC registers definitions */ -#define SCC_START 0x0100 -#define SCC_OFFSET 0x80 -#define CMDR 0x00 -#define STAR 0x04 -#define CCR0 0x08 -#define CCR1 0x0c -#define CCR2 0x10 -#define BRR 0x2C -#define RLCR 0x40 -#define IMR 0x54 -#define ISR 0x58 - -#define GPDIR 0x0400 -#define GPDATA 0x0404 -#define GPIM 0x0408 - -/* Bit masks */ -#define EncodingMask 0x00700000 -#define CrcMask 0x00000003 - -#define IntRxScc0 0x10000000 -#define IntTxScc0 0x01000000 - -#define TxPollCmd 0x00000400 -#define RxActivate 0x08000000 -#define MTFi 0x04000000 -#define Rdr 0x00400000 -#define Rdt 0x00200000 -#define Idr 0x00100000 -#define Idt 0x00080000 -#define TxSccRes 0x01000000 -#define RxSccRes 0x00010000 -#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */ -#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */ - -#define Ccr0ClockMask 0x0000003f -#define Ccr1LoopMask 0x00000200 -#define IsrMask 0x000fffff -#define BrrExpMask 0x00000f00 -#define BrrMultMask 0x0000003f -#define EncodingMask 0x00700000 -#define Hold cpu_to_le32(0x40000000) -#define SccBusy 0x10000000 -#define PowerUp 0x80000000 -#define Vis 0x00001000 -#define FrameOk (FrameVfr | FrameCrc) -#define FrameVfr 0x80 -#define FrameRdo 0x40 -#define FrameCrc 0x20 -#define FrameRab 0x10 -#define FrameAborted cpu_to_le32(0x00000200) -#define FrameEnd cpu_to_le32(0x80000000) -#define DataComplete cpu_to_le32(0x40000000) -#define LengthCheck 0x00008000 -#define SccEvt 0x02000000 -#define NoAck 0x00000200 -#define Action 0x00000001 -#define HiDesc cpu_to_le32(0x20000000) - -/* SCC events */ -#define RxEvt 0xf0000000 -#define TxEvt 0x0f000000 -#define Alls 0x00040000 -#define Xdu 0x00010000 -#define Cts 0x00004000 -#define Xmr 0x00002000 -#define Xpr 0x00001000 -#define Rdo 0x00000080 -#define Rfs 0x00000040 -#define Cd 0x00000004 -#define Rfo 0x00000002 -#define Flex 0x00000001 - -/* DMA core events */ -#define Cfg 0x00200000 -#define Hi 0x00040000 -#define Fi 0x00020000 -#define Err 0x00010000 -#define Arf 0x00000002 -#define ArAck 0x00000001 - -/* State flags */ -#define Ready 0x00000000 -#define NeedIDR 0x00000001 -#define NeedIDT 0x00000002 -#define RdoSet 0x00000004 -#define FakeReset 0x00000008 - -/* Don't mask RDO. Ever. */ -#ifdef DSCC4_POLLING -#define EventsMask 0xfffeef7f -#else -#define EventsMask 0xfffa8f7a -#endif - -/* Functions prototypes */ -static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); -static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *); -static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr); -static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); -static int dscc4_open(struct net_device *); -static netdev_tx_t dscc4_start_xmit(struct sk_buff *, - struct net_device *); -static int dscc4_close(struct net_device *); -static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static int dscc4_init_ring(struct net_device *); -static void dscc4_release_ring(struct dscc4_dev_priv *); -static void dscc4_tx_timeout(struct net_device *); -static irqreturn_t dscc4_irq(int irq, void *dev_id); -static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short); -static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *); -#ifdef DSCC4_POLLING -static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *); -#endif - -static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev) -{ - return dev_to_hdlc(dev)->priv; -} - -static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p) -{ - return p->dev; -} - -static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv, - struct net_device *dev, int offset) -{ - u32 state; - - /* Cf scc_writel for concern regarding thread-safety */ - state = dpriv->scc_regs[offset >> 2]; - state &= ~mask; - state |= value; - dpriv->scc_regs[offset >> 2] = state; - writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset); -} - -static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv, - struct net_device *dev, int offset) -{ - /* - * Thread-UNsafe. - * As of 2002/02/16, there are no thread racing for access. - */ - dpriv->scc_regs[offset >> 2] = bits; - writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset); -} - -static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset) -{ - return dpriv->scc_regs[offset >> 2]; -} - -static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - /* Cf errata DS5 p.4 */ - readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); - return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR); -} - -static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - dpriv->ltda = dpriv->tx_fd_dma + - ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); - writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); - /* Flush posted writes *NOW* */ - readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4); -} - -static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - dpriv->lrda = dpriv->rx_fd_dma + - ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD); - writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); -} - -static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv) -{ - return dpriv->tx_current == dpriv->tx_dirty; -} - -static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; -} - -static int state_check(u32 state, struct dscc4_dev_priv *dpriv, - struct net_device *dev, const char *msg) -{ - int ret = 0; - - if (debug > 1) { - if (SOURCE_ID(state) != dpriv->dev_id) { - printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", - dev->name, msg, SOURCE_ID(state), state); - ret = -1; - } - if (state & 0x0df80c00) { - printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", - dev->name, msg, state); - ret = -1; - } - } - return ret; -} - -static void dscc4_tx_print(struct net_device *dev, - struct dscc4_dev_priv *dpriv, - char *msg) -{ - printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", - dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); -} - -static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) -{ - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd = dpriv->tx_fd; - struct RxFD *rx_fd = dpriv->rx_fd; - struct sk_buff **skbuff; - int i; - - dma_free_coherent(d, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); - dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); - - skbuff = dpriv->tx_skbuff; - for (i = 0; i < TX_RING_SIZE; i++) { - if (*skbuff) { - dma_unmap_single(d, le32_to_cpu(tx_fd->data), - (*skbuff)->len, DMA_TO_DEVICE); - dev_kfree_skb(*skbuff); - } - skbuff++; - tx_fd++; - } - - skbuff = dpriv->rx_skbuff; - for (i = 0; i < RX_RING_SIZE; i++) { - if (*skbuff) { - dma_unmap_single(d, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), - DMA_FROM_DEVICE); - dev_kfree_skb(*skbuff); - } - skbuff++; - rx_fd++; - } -} - -static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; - struct device *d = &dpriv->pci_priv->pdev->dev; - struct RxFD *rx_fd = dpriv->rx_fd + dirty; - const int len = RX_MAX(HDLC_MAX_MRU); - struct sk_buff *skb; - dma_addr_t addr; - - skb = dev_alloc_skb(len); - if (!skb) - goto err_out; - - skb->protocol = hdlc_type_trans(skb, dev); - addr = dma_map_single(d, skb->data, len, DMA_FROM_DEVICE); - if (dma_mapping_error(d, addr)) - goto err_free_skb; - - dpriv->rx_skbuff[dirty] = skb; - rx_fd->data = cpu_to_le32(addr); - return 0; - -err_free_skb: - dev_kfree_skb_any(skb); -err_out: - rx_fd->data = 0; - return -1; -} - -/* - * IRQ/thread/whatever safe - */ -static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, - struct net_device *dev, char *msg) -{ - s8 i = 0; - - do { - if (!(scc_readl_star(dpriv, dev) & SccBusy)) { - printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, - msg, i); - goto done; - } - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - rmb(); - } while (++i > 0); - netdev_err(dev, "%s timeout\n", msg); -done: - return (i >= 0) ? i : -EAGAIN; -} - -static int dscc4_do_action(struct net_device *dev, char *msg) -{ - void __iomem *ioaddr = dscc4_priv(dev)->base_addr; - s16 i = 0; - - writel(Action, ioaddr + GCMDR); - ioaddr += GSTAR; - do { - u32 state = readl(ioaddr); - - if (state & ArAck) { - netdev_dbg(dev, "%s ack\n", msg); - writel(ArAck, ioaddr); - goto done; - } else if (state & Arf) { - netdev_err(dev, "%s failed\n", msg); - writel(Arf, ioaddr); - i = -1; - goto done; - } - rmb(); - } while (++i > 0); - netdev_err(dev, "%s timeout\n", msg); -done: - return i; -} - -static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) -{ - int cur = dpriv->iqtx_current%IRQ_RING_SIZE; - s8 i = 0; - - do { - if (!(dpriv->flags & (NeedIDR | NeedIDT)) || - (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) - break; - smp_rmb(); - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - } while (++i > 0); - - return (i >= 0 ) ? i : -EAGAIN; -} - -#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */ -static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&dpriv->pci_priv->lock, flags); - /* Cf errata DS5 p.6 */ - writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); - scc_patchl(PowerUp, 0, dpriv, dev, CCR0); - readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4); - writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); - writel(Action, dpriv->base_addr + GCMDR); - spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags); -} - -#endif - -#if 0 -static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - u16 i = 0; - - /* Cf errata DS5 p.7 */ - scc_patchl(PowerUp, 0, dpriv, dev, CCR0); - scc_writel(0x00050000, dpriv, dev, CCR2); - /* - * Must be longer than the time required to fill the fifo. - */ - while (!dscc4_tx_quiescent(dpriv, dev) && ++i) { - udelay(1); - wmb(); - } - - writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG); - if (dscc4_do_action(dev, "Rdt") < 0) - netdev_err(dev, "Tx reset failed\n"); -} -#endif - -/* TODO: (ab)use this function to refill a completely depleted RX ring. */ -static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; - struct device *d = &dpriv->pci_priv->pdev->dev; - struct sk_buff *skb; - int pkt_len; - - skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; - if (!skb) { - printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__); - goto refill; - } - pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); - dma_unmap_single(d, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), DMA_FROM_DEVICE); - if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; - skb_put(skb, pkt_len); - if (netif_running(dev)) - skb->protocol = hdlc_type_trans(skb, dev); - netif_rx(skb); - } else { - if (skb->data[pkt_len] & FrameRdo) - dev->stats.rx_fifo_errors++; - else if (!(skb->data[pkt_len] & FrameCrc)) - dev->stats.rx_crc_errors++; - else if ((skb->data[pkt_len] & (FrameVfr | FrameRab)) != - (FrameVfr | FrameRab)) - dev->stats.rx_length_errors++; - dev->stats.rx_errors++; - dev_kfree_skb_irq(skb); - } -refill: - while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) { - if (try_get_rx_skb(dpriv, dev) < 0) - break; - dpriv->rx_dirty++; - } - dscc4_rx_update(dpriv, dev); - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); -} - -static void dscc4_free1(struct pci_dev *pdev) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - int i; - - ppriv = pci_get_drvdata(pdev); - root = ppriv->root; - - for (i = 0; i < dev_per_card; i++) - unregister_hdlc_device(dscc4_to_dev(root + i)); - - for (i = 0; i < dev_per_card; i++) - free_netdev(root[i].dev); - kfree(root); - kfree(ppriv); -} - -static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct dscc4_pci_priv *priv; - struct dscc4_dev_priv *dpriv; - void __iomem *ioaddr; - int i, rc; - - printk(KERN_DEBUG "%s", version); - - rc = pci_enable_device(pdev); - if (rc < 0) - goto out; - - rc = pci_request_region(pdev, 0, "registers"); - if (rc < 0) { - pr_err("can't reserve MMIO region (regs)\n"); - goto err_disable_0; - } - rc = pci_request_region(pdev, 1, "LBI interface"); - if (rc < 0) { - pr_err("can't reserve MMIO region (lbi)\n"); - goto err_free_mmio_region_1; - } - - ioaddr = pci_ioremap_bar(pdev, 0); - if (!ioaddr) { - pr_err("cannot remap MMIO region %llx @ %llx\n", - (unsigned long long)pci_resource_len(pdev, 0), - (unsigned long long)pci_resource_start(pdev, 0)); - rc = -EIO; - goto err_free_mmio_regions_2; - } - printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#llx (regs), %#llx (lbi), IRQ %d\n", - (unsigned long long)pci_resource_start(pdev, 0), - (unsigned long long)pci_resource_start(pdev, 1), pdev->irq); - - /* Cf errata DS5 p.2 */ - pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8); - pci_set_master(pdev); - - rc = dscc4_found1(pdev, ioaddr); - if (rc < 0) - goto err_iounmap_3; - - priv = pci_get_drvdata(pdev); - - rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root); - if (rc < 0) { - pr_warn("IRQ %d busy\n", pdev->irq); - goto err_release_4; - } - - /* power up/little endian/dma core controlled via lrda/ltda */ - writel(0x00000001, ioaddr + GMODE); - /* Shared interrupt queue */ - { - u32 bits; - - bits = (IRQ_RING_SIZE >> 5) - 1; - bits |= bits << 4; - bits |= bits << 8; - bits |= bits << 16; - writel(bits, ioaddr + IQLENR0); - } - /* Global interrupt queue */ - writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); - - rc = -ENOMEM; - - priv->iqcfg = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma, GFP_KERNEL); - if (!priv->iqcfg) - goto err_free_irq_5; - writel(priv->iqcfg_dma, ioaddr + IQCFG); - - /* - * SCC 0-3 private rx/tx irq structures - * IQRX/TXi needs to be set soon. Learned it the hard way... - */ - for (i = 0; i < dev_per_card; i++) { - dpriv = priv->root + i; - dpriv->iqtx = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma, - GFP_KERNEL); - if (!dpriv->iqtx) - goto err_free_iqtx_6; - writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); - } - for (i = 0; i < dev_per_card; i++) { - dpriv = priv->root + i; - dpriv->iqrx = (__le32 *)dma_alloc_coherent(&pdev->dev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma, - GFP_KERNEL); - if (!dpriv->iqrx) - goto err_free_iqrx_7; - writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); - } - - /* Cf application hint. Beware of hard-lock condition on threshold. */ - writel(0x42104000, ioaddr + FIFOCR1); - //writel(0x9ce69800, ioaddr + FIFOCR2); - writel(0xdef6d800, ioaddr + FIFOCR2); - //writel(0x11111111, ioaddr + FIFOCR4); - writel(0x18181818, ioaddr + FIFOCR4); - // FIXME: should depend on the chipset revision - writel(0x0000000e, ioaddr + FIFOCR3); - - writel(0xff200001, ioaddr + GCMDR); - - rc = 0; -out: - return rc; - -err_free_iqrx_7: - while (--i >= 0) { - dpriv = priv->root + i; - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - } - i = dev_per_card; -err_free_iqtx_6: - while (--i >= 0) { - dpriv = priv->root + i; - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); - } - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, - priv->iqcfg_dma); -err_free_irq_5: - free_irq(pdev->irq, priv->root); -err_release_4: - dscc4_free1(pdev); -err_iounmap_3: - iounmap (ioaddr); -err_free_mmio_regions_2: - pci_release_region(pdev, 1); -err_free_mmio_region_1: - pci_release_region(pdev, 0); -err_disable_0: - pci_disable_device(pdev); - goto out; -}; - -/* - * Let's hope the default values are decent enough to protect my - * feet from the user's gun - Ueimor - */ -static void dscc4_init_registers(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - /* No interrupts, SCC core disabled. Let's relax */ - scc_writel(0x00000000, dpriv, dev, CCR0); - - scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR); - - /* - * No address recognition/crc-CCITT/cts enabled - * Shared flags transmission disabled - cf errata DS5 p.11 - * Carrier detect disabled - cf errata p.14 - * FIXME: carrier detection/polarity may be handled more gracefully. - */ - scc_writel(0x02408000, dpriv, dev, CCR1); - - /* crc not forwarded - Cf errata DS5 p.11 */ - scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2); - // crc forwarded - //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2); -} - -static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz) -{ - int ret = 0; - - if ((hz < 0) || (hz > DSCC4_HZ_MAX)) - ret = -EOPNOTSUPP; - else - dpriv->pci_priv->xtal_hz = hz; - - return ret; -} - -static const struct net_device_ops dscc4_ops = { - .ndo_open = dscc4_open, - .ndo_stop = dscc4_close, - .ndo_start_xmit = hdlc_start_xmit, - .ndo_do_ioctl = dscc4_ioctl, - .ndo_tx_timeout = dscc4_tx_timeout, -}; - -static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - int i, ret = -ENOMEM; - - root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL); - if (!root) - goto err_out; - - for (i = 0; i < dev_per_card; i++) { - root[i].dev = alloc_hdlcdev(root + i); - if (!root[i].dev) - goto err_free_dev; - } - - ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL); - if (!ppriv) - goto err_free_dev; - - ppriv->root = root; - spin_lock_init(&ppriv->lock); - - for (i = 0; i < dev_per_card; i++) { - struct dscc4_dev_priv *dpriv = root + i; - struct net_device *d = dscc4_to_dev(dpriv); - hdlc_device *hdlc = dev_to_hdlc(d); - - d->base_addr = (unsigned long)ioaddr; - d->irq = pdev->irq; - d->netdev_ops = &dscc4_ops; - d->watchdog_timeo = TX_TIMEOUT; - SET_NETDEV_DEV(d, &pdev->dev); - - dpriv->dev_id = i; - dpriv->pci_priv = ppriv; - dpriv->base_addr = ioaddr; - spin_lock_init(&dpriv->lock); - - hdlc->xmit = dscc4_start_xmit; - hdlc->attach = dscc4_hdlc_attach; - - dscc4_init_registers(dpriv, d); - dpriv->parity = PARITY_CRC16_PR0_CCITT; - dpriv->encoding = ENCODING_NRZ; - - ret = dscc4_init_ring(d); - if (ret < 0) - goto err_unregister; - - ret = register_hdlc_device(d); - if (ret < 0) { - pr_err("unable to register\n"); - dscc4_release_ring(dpriv); - goto err_unregister; - } - } - - ret = dscc4_set_quartz(root, quartz); - if (ret < 0) - goto err_unregister; - - pci_set_drvdata(pdev, ppriv); - return ret; - -err_unregister: - while (i-- > 0) { - dscc4_release_ring(root + i); - unregister_hdlc_device(dscc4_to_dev(root + i)); - } - kfree(ppriv); - i = dev_per_card; -err_free_dev: - while (i-- > 0) - free_netdev(root[i].dev); - kfree(root); -err_out: - return ret; -}; - -static void dscc4_tx_timeout(struct net_device *dev) -{ - /* FIXME: something is missing there */ -} - -static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv) -{ - sync_serial_settings *settings = &dpriv->settings; - - if (settings->loopback && (settings->clock_type != CLOCK_INT)) { - struct net_device *dev = dscc4_to_dev(dpriv); - - netdev_info(dev, "loopback requires clock\n"); - return -1; - } - return 0; -} - -#ifdef CONFIG_DSCC4_PCI_RST -/* - * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together - * so as to provide a safe way to reset the asic while not the whole machine - * rebooting. - * - * This code doesn't need to be efficient. Keep It Simple - */ -static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) -{ - int i; - - mutex_lock(&dscc4_mutex); - for (i = 0; i < 16; i++) - pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); - - /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */ - writel(0x001c0000, ioaddr + GMODE); - /* Configure GPIO port as output */ - writel(0x0000ffff, ioaddr + GPDIR); - /* Disable interruption */ - writel(0x0000ffff, ioaddr + GPIM); - - writel(0x0000ffff, ioaddr + GPDATA); - writel(0x00000000, ioaddr + GPDATA); - - /* Flush posted writes */ - readl(ioaddr + GSTAR); - - schedule_timeout_uninterruptible(msecs_to_jiffies(100)); - - for (i = 0; i < 16; i++) - pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); - mutex_unlock(&dscc4_mutex); -} -#else -#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) -#endif /* CONFIG_DSCC4_PCI_RST */ - -static int dscc4_open(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - int ret = -EAGAIN; - - if ((dscc4_loopback_check(dpriv) < 0)) - goto err; - - if ((ret = hdlc_open(dev))) - goto err; - - /* - * Due to various bugs, there is no way to reliably reset a - * specific port (manufacturer's dependent special PCI #RST wiring - * apart: it affects all ports). Thus the device goes in the best - * silent mode possible at dscc4_close() time and simply claims to - * be up if it's opened again. It still isn't possible to change - * the HDLC configuration without rebooting but at least the ports - * can be up/down ifconfig'ed without killing the host. - */ - if (dpriv->flags & FakeReset) { - dpriv->flags &= ~FakeReset; - scc_patchl(0, PowerUp, dpriv, dev, CCR0); - scc_patchl(0, 0x00050000, dpriv, dev, CCR2); - scc_writel(EventsMask, dpriv, dev, IMR); - netdev_info(dev, "up again\n"); - goto done; - } - - /* IDT+IDR during XPR */ - dpriv->flags = NeedIDR | NeedIDT; - - scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0); - - /* - * The following is a bit paranoid... - * - * NB: the datasheet "...CEC will stay active if the SCC is in - * power-down mode or..." and CCR2.RAC = 1 are two different - * situations. - */ - if (scc_readl_star(dpriv, dev) & SccBusy) { - netdev_err(dev, "busy - try later\n"); - ret = -EAGAIN; - goto err_out; - } else - netdev_info(dev, "available - good\n"); - - scc_writel(EventsMask, dpriv, dev, IMR); - - /* Posted write is flushed in the wait_ack loop */ - scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR); - - if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0) - goto err_disable_scc_events; - - /* - * I would expect XPR near CE completion (before ? after ?). - * At worst, this code won't see a late XPR and people - * will have to re-issue an ifconfig (this is harmless). - * WARNING, a really missing XPR usually means a hardware - * reset is needed. Suggestions anyone ? - */ - if ((ret = dscc4_xpr_ack(dpriv)) < 0) { - pr_err("XPR timeout\n"); - goto err_disable_scc_events; - } - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Open"); - -done: - netif_start_queue(dev); - - netif_carrier_on(dev); - - return 0; - -err_disable_scc_events: - scc_writel(0xffffffff, dpriv, dev, IMR); - scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); -err_out: - hdlc_close(dev); -err: - return ret; -} - -#ifdef DSCC4_POLLING -static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - /* FIXME: it's gonna be easy (TM), for sure */ -} -#endif /* DSCC4_POLLING */ - -static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd; - dma_addr_t addr; - int next; - - addr = dma_map_single(d, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(d, addr)) { - dev_kfree_skb_any(skb); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; - } - - next = dpriv->tx_current%TX_RING_SIZE; - dpriv->tx_skbuff[next] = skb; - tx_fd = dpriv->tx_fd + next; - tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); - tx_fd->data = cpu_to_le32(addr); - tx_fd->complete = 0x00000000; - tx_fd->jiffies = jiffies; - mb(); - -#ifdef DSCC4_POLLING - spin_lock(&dpriv->lock); - while (dscc4_tx_poll(dpriv, dev)); - spin_unlock(&dpriv->lock); -#endif - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Xmit"); - /* To be cleaned(unsigned int)/optimized. Later, ok ? */ - if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) - netif_stop_queue(dev); - - if (dscc4_tx_quiescent(dpriv, dev)) - dscc4_do_tx(dpriv, dev); - - return NETDEV_TX_OK; -} - -static int dscc4_close(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - - netif_stop_queue(dev); - - scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0); - scc_patchl(0x00050000, 0, dpriv, dev, CCR2); - scc_writel(0xffffffff, dpriv, dev, IMR); - - dpriv->flags |= FakeReset; - - hdlc_close(dev); - - return 0; -} - -static inline int dscc4_check_clock_ability(int port) -{ - int ret = 0; - -#ifdef CONFIG_DSCC4_PCISYNC - if (port >= 2) - ret = -1; -#endif - return ret; -} - -/* - * DS1 p.137: "There are a total of 13 different clocking modes..." - * ^^ - * Design choices: - * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a). - * Clock mode 3b _should_ work but the testing seems to make this point - * dubious (DIY testing requires setting CCR0 at 0x00000033). - * This is supposed to provide least surprise "DTE like" behavior. - * - if line rate is specified, clocks are assumed to be locally generated. - * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing - * between these it automagically done according on the required frequency - * scaling. Of course some rounding may take place. - * - no high speed mode (40Mb/s). May be trivial to do but I don't have an - * appropriate external clocking device for testing. - * - no time-slot/clock mode 5: shameless laziness. - * - * The clock signals wiring can be (is ?) manufacturer dependent. Good luck. - * - * BIG FAT WARNING: if the device isn't provided enough clocking signal, it - * won't pass the init sequence. For example, straight back-to-back DTE without - * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is - * called. - * - * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153 - * DS0 for example) - * - * Clock mode related bits of CCR0: - * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only) - * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b - * | | +-------- High Speed: say 0 - * | | | +-+-+-- Clock Mode: 0..7 - * | | | | | | - * -+-+-+-+-+-+-+-+ - * x|x|5|4|3|2|1|0| lower bits - * - * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b) - * +-+-+-+------------------ M (0..15) - * | | | | +-+-+-+-+-+-- N (0..63) - * 0 0 0 0 | | | | 0 0 | | | | | | - * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits - * - */ -static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - int ret = -1; - u32 brr; - - *state &= ~Ccr0ClockMask; - if (*bps) { /* Clock generated - required for DCE */ - u32 n = 0, m = 0, divider; - int xtal; - - xtal = dpriv->pci_priv->xtal_hz; - if (!xtal) - goto done; - if (dscc4_check_clock_ability(dpriv->dev_id) < 0) - goto done; - divider = xtal / *bps; - if (divider > BRR_DIVIDER_MAX) { - divider >>= 4; - *state |= 0x00000036; /* Clock mode 6b (BRG/16) */ - } else - *state |= 0x00000037; /* Clock mode 7b (BRG) */ - if (divider >> 22) { - n = 63; - m = 15; - } else if (divider) { - /* Extraction of the 6 highest weighted bits */ - m = 0; - while (0xffffffc0 & divider) { - m++; - divider >>= 1; - } - n = divider; - } - brr = (m << 8) | n; - divider = n << m; - if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */ - divider <<= 4; - *bps = xtal / divider; - } else { - /* - * External clock - DTE - * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00). - * Nothing more to be done - */ - brr = 0; - } - scc_writel(brr, dpriv, dev, BRR); - ret = 0; -done: - return ret; -} - -static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - const size_t size = sizeof(dpriv->settings); - int ret = 0; - - if (dev->flags & IFF_UP) - return -EBUSY; - - if (cmd != SIOCWANDEV) - return -EOPNOTSUPP; - - switch(ifr->ifr_settings.type) { - case IF_GET_IFACE: - ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; - if (ifr->ifr_settings.size < size) { - ifr->ifr_settings.size = size; /* data size wanted */ - return -ENOBUFS; - } - if (copy_to_user(line, &dpriv->settings, size)) - return -EFAULT; - break; - - case IF_IFACE_SYNC_SERIAL: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (dpriv->flags & FakeReset) { - netdev_info(dev, "please reset the device before this command\n"); - return -EPERM; - } - if (copy_from_user(&dpriv->settings, line, size)) - return -EFAULT; - ret = dscc4_set_iface(dpriv, dev); - break; - - default: - ret = hdlc_ioctl(dev, ifr, cmd); - break; - } - - return ret; -} - -static int dscc4_match(const struct thingie *p, int value) -{ - int i; - - for (i = 0; p[i].define != -1; i++) { - if (value == p[i].define) - break; - } - if (p[i].define == -1) - return -1; - else - return i; -} - -static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - sync_serial_settings *settings = &dpriv->settings; - int ret = -EOPNOTSUPP; - u32 bps, state; - - bps = settings->clock_rate; - state = scc_readl(dpriv, CCR0); - if (dscc4_set_clock(dev, &bps, &state) < 0) - goto done; - if (bps) { /* DCE */ - printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name); - if (settings->clock_rate != bps) { - printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n", - dev->name, settings->clock_rate, bps); - settings->clock_rate = bps; - } - } else { /* DTE */ - state |= PowerUp | Vis; - printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name); - } - scc_writel(state, dpriv, dev, CCR0); - ret = 0; -done: - return ret; -} - -static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - static const struct thingie encoding[] = { - { ENCODING_NRZ, 0x00000000 }, - { ENCODING_NRZI, 0x00200000 }, - { ENCODING_FM_MARK, 0x00400000 }, - { ENCODING_FM_SPACE, 0x00500000 }, - { ENCODING_MANCHESTER, 0x00600000 }, - { -1, 0} - }; - int i, ret = 0; - - i = dscc4_match(encoding, dpriv->encoding); - if (i >= 0) - scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0); - else - ret = -EOPNOTSUPP; - return ret; -} - -static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - sync_serial_settings *settings = &dpriv->settings; - u32 state; - - state = scc_readl(dpriv, CCR1); - if (settings->loopback) { - printk(KERN_DEBUG "%s: loopback\n", dev->name); - state |= 0x00000100; - } else { - printk(KERN_DEBUG "%s: normal\n", dev->name); - state &= ~0x00000100; - } - scc_writel(state, dpriv, dev, CCR1); - return 0; -} - -static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, - struct net_device *dev) -{ - static const struct thingie crc[] = { - { PARITY_CRC16_PR0_CCITT, 0x00000010 }, - { PARITY_CRC16_PR1_CCITT, 0x00000000 }, - { PARITY_CRC32_PR0_CCITT, 0x00000011 }, - { PARITY_CRC32_PR1_CCITT, 0x00000001 } - }; - int i, ret = 0; - - i = dscc4_match(crc, dpriv->parity); - if (i >= 0) - scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1); - else - ret = -EOPNOTSUPP; - return ret; -} - -static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev) -{ - struct { - int (*action)(struct dscc4_dev_priv *, struct net_device *); - } *p, do_setting[] = { - { dscc4_encoding_setting }, - { dscc4_clock_setting }, - { dscc4_loopback_setting }, - { dscc4_crc_setting }, - { NULL } - }; - int ret = 0; - - for (p = do_setting; p->action; p++) { - if ((ret = p->action(dpriv, dev)) < 0) - break; - } - return ret; -} - -static irqreturn_t dscc4_irq(int irq, void *token) -{ - struct dscc4_dev_priv *root = token; - struct dscc4_pci_priv *priv; - struct net_device *dev; - void __iomem *ioaddr; - u32 state; - unsigned long flags; - int i, handled = 1; - - priv = root->pci_priv; - dev = dscc4_to_dev(root); - - spin_lock_irqsave(&priv->lock, flags); - - ioaddr = root->base_addr; - - state = readl(ioaddr + GSTAR); - if (!state) { - handled = 0; - goto out; - } - if (debug > 3) - printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state); - writel(state, ioaddr + GSTAR); - - if (state & Arf) { - netdev_err(dev, "failure (Arf). Harass the maintainer\n"); - goto out; - } - state &= ~ArAck; - if (state & Cfg) { - if (debug > 0) - printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME); - if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf)) - netdev_err(dev, "CFG failed\n"); - if (!(state &= ~Cfg)) - goto out; - } - if (state & RxEvt) { - i = dev_per_card - 1; - do { - dscc4_rx_irq(priv, root + i); - } while (--i >= 0); - state &= ~RxEvt; - } - if (state & TxEvt) { - i = dev_per_card - 1; - do { - dscc4_tx_irq(priv, root + i); - } while (--i >= 0); - state &= ~TxEvt; - } -out: - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_RETVAL(handled); -} - -static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, - struct dscc4_dev_priv *dpriv) -{ - struct net_device *dev = dscc4_to_dev(dpriv); - u32 state; - int cur, loop = 0; - -try: - cur = dpriv->iqtx_current%IRQ_RING_SIZE; - state = le32_to_cpu(dpriv->iqtx[cur]); - if (!state) { - if (debug > 4) - printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name, - state); - if ((debug > 1) && (loop > 1)) - printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop); - if (loop && netif_queue_stopped(dev)) - if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) - netif_wake_queue(dev); - - if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) && - !dscc4_tx_done(dpriv)) - dscc4_do_tx(dpriv, dev); - return; - } - loop++; - dpriv->iqtx[cur] = 0; - dpriv->iqtx_current++; - - if (state_check(state, dpriv, dev, "Tx") < 0) - return; - - if (state & SccEvt) { - if (state & Alls) { - struct sk_buff *skb; - struct TxFD *tx_fd; - - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Alls"); - /* - * DataComplete can't be trusted for Tx completion. - * Cf errata DS5 p.8 - */ - cur = dpriv->tx_dirty%TX_RING_SIZE; - tx_fd = dpriv->tx_fd + cur; - skb = dpriv->tx_skbuff[cur]; - if (skb) { - dma_unmap_single(&ppriv->pdev->dev, - le32_to_cpu(tx_fd->data), - skb->len, DMA_TO_DEVICE); - if (tx_fd->state & FrameEnd) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - } - dev_consume_skb_irq(skb); - dpriv->tx_skbuff[cur] = NULL; - ++dpriv->tx_dirty; - } else { - if (debug > 1) - netdev_err(dev, "Tx: NULL skb %d\n", - cur); - } - /* - * If the driver ends sending crap on the wire, it - * will be way easier to diagnose than the (not so) - * random freeze induced by null sized tx frames. - */ - tx_fd->data = tx_fd->next; - tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); - tx_fd->complete = 0x00000000; - tx_fd->jiffies = 0; - - if (!(state &= ~Alls)) - goto try; - } - /* - * Transmit Data Underrun - */ - if (state & Xdu) { - netdev_err(dev, "Tx Data Underrun. Ask maintainer\n"); - dpriv->flags = NeedIDT; - /* Tx reset */ - writel(MTFi | Rdt, - dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG); - writel(Action, dpriv->base_addr + GCMDR); - return; - } - if (state & Cts) { - netdev_info(dev, "CTS transition\n"); - if (!(state &= ~Cts)) /* DEBUG */ - goto try; - } - if (state & Xmr) { - /* Frame needs to be sent again - FIXME */ - netdev_err(dev, "Tx ReTx. Ask maintainer\n"); - if (!(state &= ~Xmr)) /* DEBUG */ - goto try; - } - if (state & Xpr) { - void __iomem *scc_addr; - unsigned long ring; - unsigned int i; - - /* - * - the busy condition happens (sometimes); - * - it doesn't seem to make the handler unreliable. - */ - for (i = 1; i; i <<= 1) { - if (!(scc_readl_star(dpriv, dev) & SccBusy)) - break; - } - if (!i) - netdev_info(dev, "busy in irq\n"); - - scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; - /* Keep this order: IDT before IDR */ - if (dpriv->flags & NeedIDT) { - if (debug > 2) - dscc4_tx_print(dev, dpriv, "Xpr"); - ring = dpriv->tx_fd_dma + - (dpriv->tx_dirty%TX_RING_SIZE)* - sizeof(struct TxFD); - writel(ring, scc_addr + CH0BTDA); - dscc4_do_tx(dpriv, dev); - writel(MTFi | Idt, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDT") < 0) - goto err_xpr; - dpriv->flags &= ~NeedIDT; - } - if (dpriv->flags & NeedIDR) { - ring = dpriv->rx_fd_dma + - (dpriv->rx_current%RX_RING_SIZE)* - sizeof(struct RxFD); - writel(ring, scc_addr + CH0BRDA); - dscc4_rx_update(dpriv, dev); - writel(MTFi | Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDR") < 0) - goto err_xpr; - dpriv->flags &= ~NeedIDR; - smp_wmb(); - /* Activate receiver and misc */ - scc_writel(0x08050008, dpriv, dev, CCR2); - } - err_xpr: - if (!(state &= ~Xpr)) - goto try; - } - if (state & Cd) { - if (debug > 0) - netdev_info(dev, "CD transition\n"); - if (!(state &= ~Cd)) /* DEBUG */ - goto try; - } - } else { /* ! SccEvt */ - if (state & Hi) { -#ifdef DSCC4_POLLING - while (!dscc4_tx_poll(dpriv, dev)); -#endif - netdev_info(dev, "Tx Hi\n"); - state &= ~Hi; - } - if (state & Err) { - netdev_info(dev, "Tx ERR\n"); - dev->stats.tx_errors++; - state &= ~Err; - } - } - goto try; -} - -static void dscc4_rx_irq(struct dscc4_pci_priv *priv, - struct dscc4_dev_priv *dpriv) -{ - struct net_device *dev = dscc4_to_dev(dpriv); - u32 state; - int cur; - -try: - cur = dpriv->iqrx_current%IRQ_RING_SIZE; - state = le32_to_cpu(dpriv->iqrx[cur]); - if (!state) - return; - dpriv->iqrx[cur] = 0; - dpriv->iqrx_current++; - - if (state_check(state, dpriv, dev, "Rx") < 0) - return; - - if (!(state & SccEvt)){ - struct RxFD *rx_fd; - - if (debug > 4) - printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name, - state); - state &= 0x00ffffff; - if (state & Err) { /* Hold or reset */ - printk(KERN_DEBUG "%s: Rx ERR\n", dev->name); - cur = dpriv->rx_current%RX_RING_SIZE; - rx_fd = dpriv->rx_fd + cur; - /* - * Presume we're not facing a DMAC receiver reset. - * As We use the rx size-filtering feature of the - * DSCC4, the beginning of a new frame is waiting in - * the rx fifo. I bet a Receive Data Overflow will - * happen most of time but let's try and avoid it. - * Btw (as for RDO) if one experiences ERR whereas - * the system looks rather idle, there may be a - * problem with latency. In this case, increasing - * RX_RING_SIZE may help. - */ - //while (dpriv->rx_needs_refill) { - while (!(rx_fd->state1 & Hold)) { - rx_fd++; - cur++; - if (!(cur = cur%RX_RING_SIZE)) - rx_fd = dpriv->rx_fd; - } - //dpriv->rx_needs_refill--; - try_get_rx_skb(dpriv, dev); - if (!rx_fd->data) - goto try; - rx_fd->state1 &= ~Hold; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - //} - goto try; - } - if (state & Fi) { - dscc4_rx_skb(dpriv, dev); - goto try; - } - if (state & Hi ) { /* HI bit */ - netdev_info(dev, "Rx Hi\n"); - state &= ~Hi; - goto try; - } - } else { /* SccEvt */ - if (debug > 1) { - //FIXME: verifier la presence de tous les evenements - static struct { - u32 mask; - const char *irq_name; - } evts[] = { - { 0x00008000, "TIN"}, - { 0x00000020, "RSC"}, - { 0x00000010, "PCE"}, - { 0x00000008, "PLLA"}, - { 0, NULL} - }, *evt; - - for (evt = evts; evt->irq_name; evt++) { - if (state & evt->mask) { - printk(KERN_DEBUG "%s: %s\n", - dev->name, evt->irq_name); - if (!(state &= ~evt->mask)) - goto try; - } - } - } else { - if (!(state &= ~0x0000c03c)) - goto try; - } - if (state & Cts) { - netdev_info(dev, "CTS transition\n"); - if (!(state &= ~Cts)) /* DEBUG */ - goto try; - } - /* - * Receive Data Overflow (FIXME: fscked) - */ - if (state & Rdo) { - struct RxFD *rx_fd; - void __iomem *scc_addr; - int cur; - - //if (debug) - // dscc4_rx_dump(dpriv); - scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id; - - scc_patchl(RxActivate, 0, dpriv, dev, CCR2); - /* - * This has no effect. Why ? - * ORed with TxSccRes, one sees the CFG ack (for - * the TX part only). - */ - scc_writel(RxSccRes, dpriv, dev, CMDR); - dpriv->flags |= RdoSet; - - /* - * Let's try and save something in the received data. - * rx_current must be incremented at least once to - * avoid HOLD in the BRDA-to-be-pointed desc. - */ - do { - cur = dpriv->rx_current++%RX_RING_SIZE; - rx_fd = dpriv->rx_fd + cur; - if (!(rx_fd->state2 & DataComplete)) - break; - if (rx_fd->state2 & FrameAborted) { - dev->stats.rx_over_errors++; - rx_fd->state1 |= Hold; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - } else - dscc4_rx_skb(dpriv, dev); - } while (1); - - if (debug > 0) { - if (dpriv->flags & RdoSet) - printk(KERN_DEBUG - "%s: no RDO in Rx data\n", DRV_NAME); - } -#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY - /* - * FIXME: must the reset be this violent ? - */ -#warning "FIXME: CH0BRDA" - writel(dpriv->rx_fd_dma + - (dpriv->rx_current%RX_RING_SIZE)* - sizeof(struct RxFD), scc_addr + CH0BRDA); - writel(MTFi|Rdr|Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "RDR") < 0) { - netdev_err(dev, "RDO recovery failed(RDR)\n"); - goto rdo_end; - } - writel(MTFi|Idr, scc_addr + CH0CFG); - if (dscc4_do_action(dev, "IDR") < 0) { - netdev_err(dev, "RDO recovery failed(IDR)\n"); - goto rdo_end; - } - rdo_end: -#endif - scc_patchl(0, RxActivate, dpriv, dev, CCR2); - goto try; - } - if (state & Cd) { - netdev_info(dev, "CD transition\n"); - if (!(state &= ~Cd)) /* DEBUG */ - goto try; - } - if (state & Flex) { - printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME); - if (!(state &= ~Flex)) - goto try; - } - } -} - -/* - * I had expected the following to work for the first descriptor - * (tx_fd->state = 0xc0000000) - * - Hold=1 (don't try and branch to the next descripto); - * - No=0 (I want an empty data section, i.e. size=0); - * - Fe=1 (required by No=0 or we got an Err irq and must reset). - * It failed and locked solid. Thus the introduction of a dummy skb. - * Problem is acknowledged in errata sheet DS5. Joy :o/ - */ -static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) -{ - struct sk_buff *skb; - - skb = dev_alloc_skb(DUMMY_SKB_SIZE); - if (skb) { - struct device *d = &dpriv->pci_priv->pdev->dev; - int last = dpriv->tx_dirty%TX_RING_SIZE; - struct TxFD *tx_fd = dpriv->tx_fd + last; - dma_addr_t addr; - - skb->len = DUMMY_SKB_SIZE; - skb_copy_to_linear_data(skb, version, - strlen(version) % DUMMY_SKB_SIZE); - addr = dma_map_single(d, skb->data, DUMMY_SKB_SIZE, - DMA_TO_DEVICE); - if (dma_mapping_error(d, addr)) { - dev_kfree_skb_any(skb); - return NULL; - } - tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); - tx_fd->data = cpu_to_le32(addr); - dpriv->tx_skbuff[last] = skb; - } - return skb; -} - -static int dscc4_init_ring(struct net_device *dev) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct device *d = &dpriv->pci_priv->pdev->dev; - struct TxFD *tx_fd; - struct RxFD *rx_fd; - void *ring; - int i; - - ring = dma_alloc_coherent(d, RX_TOTAL_SIZE, &dpriv->rx_fd_dma, - GFP_KERNEL); - if (!ring) - goto err_out; - dpriv->rx_fd = rx_fd = (struct RxFD *) ring; - - ring = dma_alloc_coherent(d, TX_TOTAL_SIZE, &dpriv->tx_fd_dma, - GFP_KERNEL); - if (!ring) - goto err_free_dma_rx; - dpriv->tx_fd = tx_fd = (struct TxFD *) ring; - - memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE); - dpriv->tx_dirty = 0xffffffff; - i = dpriv->tx_current = 0; - do { - tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE); - tx_fd->complete = 0x00000000; - /* FIXME: NULL should be ok - to be tried */ - tx_fd->data = cpu_to_le32(dpriv->tx_fd_dma); - (tx_fd++)->next = cpu_to_le32(dpriv->tx_fd_dma + - (++i%TX_RING_SIZE)*sizeof(*tx_fd)); - } while (i < TX_RING_SIZE); - - if (!dscc4_init_dummy_skb(dpriv)) - goto err_free_dma_tx; - - memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE); - i = dpriv->rx_dirty = dpriv->rx_current = 0; - do { - /* size set by the host. Multiple of 4 bytes please */ - rx_fd->state1 = HiDesc; - rx_fd->state2 = 0x00000000; - rx_fd->end = cpu_to_le32(0xbabeface); - rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU); - // FIXME: return value verifiee mais traitement suspect - if (try_get_rx_skb(dpriv, dev) >= 0) - dpriv->rx_dirty++; - (rx_fd++)->next = cpu_to_le32(dpriv->rx_fd_dma + - (++i%RX_RING_SIZE)*sizeof(*rx_fd)); - } while (i < RX_RING_SIZE); - - return 0; - -err_free_dma_tx: - dma_free_coherent(d, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); -err_free_dma_rx: - dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); -err_out: - return -ENOMEM; -} - -static void dscc4_remove_one(struct pci_dev *pdev) -{ - struct dscc4_pci_priv *ppriv; - struct dscc4_dev_priv *root; - void __iomem *ioaddr; - int i; - - ppriv = pci_get_drvdata(pdev); - root = ppriv->root; - - ioaddr = root->base_addr; - - dscc4_pci_reset(pdev, ioaddr); - - free_irq(pdev->irq, root); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, - ppriv->iqcfg_dma); - for (i = 0; i < dev_per_card; i++) { - struct dscc4_dev_priv *dpriv = root + i; - - dscc4_release_ring(dpriv); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); - } - - dscc4_free1(pdev); - - iounmap(ioaddr); - - pci_release_region(pdev, 1); - pci_release_region(pdev, 0); - - pci_disable_device(pdev); -} - -static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding, - unsigned short parity) -{ - struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - - if (encoding != ENCODING_NRZ && - encoding != ENCODING_NRZI && - encoding != ENCODING_FM_MARK && - encoding != ENCODING_FM_SPACE && - encoding != ENCODING_MANCHESTER) - return -EINVAL; - - if (parity != PARITY_NONE && - parity != PARITY_CRC16_PR0_CCITT && - parity != PARITY_CRC16_PR1_CCITT && - parity != PARITY_CRC32_PR0_CCITT && - parity != PARITY_CRC32_PR1_CCITT) - return -EINVAL; - - dpriv->encoding = encoding; - dpriv->parity = parity; - return 0; -} - -#ifndef MODULE -static int __init dscc4_setup(char *str) -{ - int *args[] = { &debug, &quartz, NULL }, **p = args; - - while (*p && (get_option(&str, *p) == 2)) - p++; - return 1; -} - -__setup("dscc4.setup=", dscc4_setup); -#endif - -static const struct pci_device_id dscc4_pci_tbl[] = { - { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, - PCI_ANY_ID, PCI_ANY_ID, }, - { 0,} -}; -MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl); - -static struct pci_driver dscc4_driver = { - .name = DRV_NAME, - .id_table = dscc4_pci_tbl, - .probe = dscc4_init_one, - .remove = dscc4_remove_one, -}; - -module_pci_driver(dscc4_driver); -- cgit v1.2.3-59-g8ed1b From 81e09359b4658fde625b59da554386179af31e33 Mon Sep 17 00:00:00 2001 From: Rain River Date: Fri, 13 Sep 2019 21:43:45 +0800 Subject: MAINTAINERS: update FORCEDETH MAINTAINERS info Many FORCEDETH NICs are used in our hosts. Several bugs are fixed and some features are developed for FORCEDETH NICs. And I have been reviewing patches for FORCEDETH NIC for several months. Mark me as the FORCEDETH NIC maintainer. I will send out the patches and maintain FORCEDETH NIC. Signed-off-by: Rain River Signed-off-by: David S. Miller --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 63ab65dbc966..ceeb7ceae041 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -649,6 +649,12 @@ M: Lino Sanfilippo S: Maintained F: drivers/net/ethernet/alacritech/* +FORCEDETH GIGABIT ETHERNET DRIVER +M: Rain River +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/nvidia/* + ALCATEL SPEEDTOUCH USB DRIVER M: Duncan Sands L: linux-usb@vger.kernel.org -- cgit v1.2.3-59-g8ed1b