aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/wimax/i2400m
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/wimax/i2400m')
-rw-r--r--drivers/staging/wimax/i2400m/Kconfig37
-rw-r--r--drivers/staging/wimax/i2400m/Makefile23
-rw-r--r--drivers/staging/wimax/i2400m/control.c1434
-rw-r--r--drivers/staging/wimax/i2400m/debug-levels.h32
-rw-r--r--drivers/staging/wimax/i2400m/debugfs.c253
-rw-r--r--drivers/staging/wimax/i2400m/driver.c1002
-rw-r--r--drivers/staging/wimax/i2400m/fw.c1653
-rw-r--r--drivers/staging/wimax/i2400m/i2400m-usb.h275
-rw-r--r--drivers/staging/wimax/i2400m/i2400m.h970
-rw-r--r--drivers/staging/wimax/i2400m/linux-wimax-i2400m.h572
-rw-r--r--drivers/staging/wimax/i2400m/netdev.c603
-rw-r--r--drivers/staging/wimax/i2400m/op-rfkill.c196
-rw-r--r--drivers/staging/wimax/i2400m/rx.c1395
-rw-r--r--drivers/staging/wimax/i2400m/sysfs.c65
-rw-r--r--drivers/staging/wimax/i2400m/tx.c1011
-rw-r--r--drivers/staging/wimax/i2400m/usb-debug-levels.h28
-rw-r--r--drivers/staging/wimax/i2400m/usb-fw.c365
-rw-r--r--drivers/staging/wimax/i2400m/usb-notif.c258
-rw-r--r--drivers/staging/wimax/i2400m/usb-rx.c462
-rw-r--r--drivers/staging/wimax/i2400m/usb-tx.c273
-rw-r--r--drivers/staging/wimax/i2400m/usb.c764
21 files changed, 11671 insertions, 0 deletions
diff --git a/drivers/staging/wimax/i2400m/Kconfig b/drivers/staging/wimax/i2400m/Kconfig
new file mode 100644
index 000000000000..843b905a26a3
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config WIMAX_I2400M
+ tristate
+ depends on WIMAX
+ select FW_LOADER
+
+comment "Enable USB support to see WiMAX USB drivers"
+ depends on USB = n
+
+config WIMAX_I2400M_USB
+ tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
+ depends on WIMAX && USB
+ select WIMAX_I2400M
+ help
+ Select if you have a device based on the Intel WiMAX
+ Connection 2400 over USB (like any of the Intel Wireless
+ WiMAX/WiFi Link 5x50 series).
+
+ If unsure, it is safe to select M (module).
+
+config WIMAX_I2400M_DEBUG_LEVEL
+ int "WiMAX i2400m debug level"
+ depends on WIMAX_I2400M
+ default 8
+ help
+
+ Select the maximum debug verbosity level to be compiled into
+ the WiMAX i2400m driver code.
+
+ By default, this is disabled at runtime and can be
+ selectively enabled at runtime for different parts of the
+ code using the sysfs debug-levels file.
+
+ If set at zero, this will compile out all the debug code.
+
+ It is recommended that it is left at 8.
diff --git a/drivers/staging/wimax/i2400m/Makefile b/drivers/staging/wimax/i2400m/Makefile
new file mode 100644
index 000000000000..b1db1eff0648
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
+obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
+
+i2400m-y := \
+ control.o \
+ driver.o \
+ fw.o \
+ op-rfkill.o \
+ sysfs.o \
+ netdev.o \
+ tx.o \
+ rx.o
+
+i2400m-$(CONFIG_DEBUG_FS) += debugfs.o
+
+i2400m-usb-y := \
+ usb-fw.o \
+ usb-notif.o \
+ usb-tx.o \
+ usb-rx.o \
+ usb.o
diff --git a/drivers/staging/wimax/i2400m/control.c b/drivers/staging/wimax/i2400m/control.c
new file mode 100644
index 000000000000..fe885aa56cf3
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/control.c
@@ -0,0 +1,1434 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Miscellaneous control functions for managing the device
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ * This is a collection of functions used to control the device (plus
+ * a few helpers).
+ *
+ * There are utilities for handling TLV buffers, hooks on the device's
+ * reports to act on device changes of state [i2400m_report_hook()],
+ * on acks to commands [i2400m_msg_ack_hook()], a helper for sending
+ * commands to the device and blocking until a reply arrives
+ * [i2400m_msg_to_dev()], a few high level commands for manipulating
+ * the device state, powersving mode and configuration plus the
+ * routines to setup the device once communication is stablished with
+ * it [i2400m_dev_initialize()].
+ *
+ * ROADMAP
+ *
+ * i2400m_dev_initialize() Called by i2400m_dev_start()
+ * i2400m_set_init_config()
+ * i2400m_cmd_get_state()
+ * i2400m_dev_shutdown() Called by i2400m_dev_stop()
+ * i2400m_reset()
+ *
+ * i2400m_{cmd,get,set}_*()
+ * i2400m_msg_to_dev()
+ * i2400m_msg_check_status()
+ *
+ * i2400m_report_hook() Called on reception of an event
+ * i2400m_report_state_hook()
+ * i2400m_tlv_buffer_walk()
+ * i2400m_tlv_match()
+ * i2400m_report_tlv_system_state()
+ * i2400m_report_tlv_rf_switches_status()
+ * i2400m_report_tlv_media_status()
+ * i2400m_cmd_enter_powersave()
+ *
+ * i2400m_msg_ack_hook() Called on reception of a reply to a
+ * command, get or set
+ */
+
+#include <stdarg.h>
+#include "i2400m.h"
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "linux-wimax-i2400m.h"
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+
+
+#define D_SUBMODULE control
+#include "debug-levels.h"
+
+static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
+module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
+MODULE_PARM_DESC(idle_mode_disabled,
+ "If true, the device will not enable idle mode negotiation "
+ "with the base station (when connected) to save power.");
+
+/* 0 (power saving enabled) by default */
+static int i2400m_power_save_disabled;
+module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
+MODULE_PARM_DESC(power_save_disabled,
+ "If true, the driver will not tell the device to enter "
+ "power saving mode when it reports it is ready for it. "
+ "False by default (so the device is told to do power "
+ "saving).");
+
+static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
+module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
+MODULE_PARM_DESC(passive_mode,
+ "If true, the driver will not do any device setup "
+ "and leave it up to user space, who must be properly "
+ "setup.");
+
+
+/*
+ * Return if a TLV is of a give type and size
+ *
+ * @tlv_hdr: pointer to the TLV
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ * don't check the size). This includes the header
+ * Returns: 0 if the TLV matches
+ * < 0 if it doesn't match at all
+ * > 0 total TLV + payload size, if the type matches, but not
+ * the size
+ */
+static
+ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
+ enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+ if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */
+ return -1;
+ if (tlv_size != -1
+ && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
+ size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
+ printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
+ "size (got %zu vs %zd expected)\n",
+ tlv_type, size, tlv_size);
+ return size;
+ }
+ return 0;
+}
+
+
+/*
+ * Given a buffer of TLVs, iterate over them
+ *
+ * @i2400m: device instance
+ * @tlv_buf: pointer to the beginning of the TLV buffer
+ * @buf_size: buffer size in bytes
+ * @tlv_pos: seek position; this is assumed to be a pointer returned
+ * by i2400m_tlv_buffer_walk() [and thus, validated]. The
+ * TLV returned will be the one following this one.
+ *
+ * Usage:
+ *
+ * tlv_itr = NULL;
+ * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) {
+ * ...
+ * // Do stuff with tlv_itr, DON'T MODIFY IT
+ * ...
+ * }
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk(
+ struct i2400m *i2400m,
+ const void *tlv_buf, size_t buf_size,
+ const struct i2400m_tlv_hdr *tlv_pos)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size;
+ size_t offset, length, avail_size;
+ unsigned type;
+
+ if (tlv_pos == NULL) /* Take the first one? */
+ tlv_pos = tlv_buf;
+ else /* Nope, the next one */
+ tlv_pos = (void *) tlv_pos
+ + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos);
+ if (tlv_pos == tlv_top) { /* buffer done */
+ tlv_pos = NULL;
+ goto error_beyond_end;
+ }
+ if (tlv_pos > tlv_top) {
+ tlv_pos = NULL;
+ WARN_ON(1);
+ goto error_beyond_end;
+ }
+ offset = (void *) tlv_pos - (void *) tlv_buf;
+ avail_size = buf_size - offset;
+ if (avail_size < sizeof(*tlv_pos)) {
+ dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: "
+ "short header\n", tlv_buf, buf_size, offset);
+ goto error_short_header;
+ }
+ type = le16_to_cpu(tlv_pos->type);
+ length = le16_to_cpu(tlv_pos->length);
+ if (avail_size < sizeof(*tlv_pos) + length) {
+ dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], "
+ "tlv type 0x%04x @%zu: "
+ "short data (%zu bytes vs %zu needed)\n",
+ tlv_buf, buf_size, type, offset, avail_size,
+ sizeof(*tlv_pos) + length);
+ goto error_short_header;
+ }
+error_short_header:
+error_beyond_end:
+ return tlv_pos;
+}
+
+
+/*
+ * Find a TLV in a buffer of sequential TLVs
+ *
+ * @i2400m: device descriptor
+ * @tlv_hdr: pointer to the first TLV in the sequence
+ * @size: size of the buffer in bytes; all TLVs are assumed to fit
+ * fully in the buffer (otherwise we'll complain).
+ * @tlv_type: type of the TLV we are looking for
+ * @tlv_size: expected size of the TLV we are looking for (if -1,
+ * don't check the size). This includes the header
+ *
+ * Returns: NULL if the TLV is not found, otherwise a pointer to
+ * it. If the sizes don't match, an error is printed and NULL
+ * returned.
+ */
+static
+const struct i2400m_tlv_hdr *i2400m_tlv_find(
+ struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr *tlv_hdr, size_t size,
+ enum i2400m_tlv tlv_type, ssize_t tlv_size)
+{
+ ssize_t match;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv = NULL;
+ while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) {
+ match = i2400m_tlv_match(tlv, tlv_type, tlv_size);
+ if (match == 0) /* found it :) */
+ break;
+ if (match > 0)
+ dev_warn(dev, "TLV type 0x%04x found with size "
+ "mismatch (%zu vs %zd needed)\n",
+ tlv_type, match, tlv_size);
+ }
+ return tlv;
+}
+
+
+static const struct
+{
+ char *msg;
+ int errno;
+} ms_to_errno[I2400M_MS_MAX] = {
+ [I2400M_MS_DONE_OK] = { "", 0 },
+ [I2400M_MS_DONE_IN_PROGRESS] = { "", 0 },
+ [I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS },
+ [I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ },
+ [I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL },
+ [I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG },
+ [I2400M_MS_VERSION_ERROR] = { "bad version", -EIO },
+ [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
+ [I2400M_MS_BUSY] = { "busy", -EBUSY },
+ [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
+ [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
+ [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
+ [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
+ [I2400M_MS_NO_RF] = { "no RF", -EIO },
+ [I2400M_MS_NOT_READY_FOR_POWERSAVE] =
+ { "not ready for powersave", -EACCES },
+ [I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT },
+};
+
+
+/*
+ * i2400m_msg_check_status - translate a message's status code
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: message header
+ * @strbuf: buffer to place a formatted error message (unless NULL).
+ * @strbuf_size: max amount of available space; larger messages will
+ * be truncated.
+ *
+ * Returns: errno code corresponding to the status code in @l3l4_hdr
+ * and a message in @strbuf describing the error.
+ */
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
+ char *strbuf, size_t strbuf_size)
+{
+ int result;
+ enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status);
+ const char *str;
+
+ if (status == 0)
+ return 0;
+ if (status >= ARRAY_SIZE(ms_to_errno)) {
+ str = "unknown status code";
+ result = -EBADR;
+ } else {
+ str = ms_to_errno[status].msg;
+ result = ms_to_errno[status].errno;
+ }
+ if (strbuf)
+ snprintf(strbuf, strbuf_size, "%s (%d)", str, status);
+ return result;
+}
+
+
+/*
+ * Act on a TLV System State reported by the device
+ *
+ * @i2400m: device descriptor
+ * @ss: validated System State TLV
+ */
+static
+void i2400m_report_tlv_system_state(struct i2400m *i2400m,
+ const struct i2400m_tlv_system_state *ss)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state);
+
+ d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state);
+
+ if (i2400m->state != i2400m_state) {
+ i2400m->state = i2400m_state;
+ wake_up_all(&i2400m->state_wq);
+ }
+ switch (i2400m_state) {
+ case I2400M_SS_UNINITIALIZED:
+ case I2400M_SS_INIT:
+ case I2400M_SS_CONFIG:
+ case I2400M_SS_PRODUCTION:
+ wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
+ break;
+
+ case I2400M_SS_RF_OFF:
+ case I2400M_SS_RF_SHUTDOWN:
+ wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF);
+ break;
+
+ case I2400M_SS_READY:
+ case I2400M_SS_STANDBY:
+ case I2400M_SS_SLEEPACTIVE:
+ wimax_state_change(wimax_dev, WIMAX_ST_READY);
+ break;
+
+ case I2400M_SS_CONNECTING:
+ case I2400M_SS_WIMAX_CONNECTED:
+ wimax_state_change(wimax_dev, WIMAX_ST_READY);
+ break;
+
+ case I2400M_SS_SCAN:
+ case I2400M_SS_OUT_OF_ZONE:
+ wimax_state_change(wimax_dev, WIMAX_ST_SCANNING);
+ break;
+
+ case I2400M_SS_IDLE:
+ d_printf(1, dev, "entering BS-negotiated idle mode\n");
+ fallthrough;
+ case I2400M_SS_DISCONNECTING:
+ case I2400M_SS_DATA_PATH_CONNECTED:
+ wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
+ break;
+
+ default:
+ /* Huh? just in case, shut it down */
+ dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
+ i2400m_state);
+ i2400m_reset(i2400m, I2400M_RT_WARM);
+ break;
+ }
+ d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
+ i2400m, ss, i2400m_state);
+}
+
+
+/*
+ * Parse and act on a TLV Media Status sent by the device
+ *
+ * @i2400m: device descriptor
+ * @ms: validated Media Status TLV
+ *
+ * This will set the carrier up on down based on the device's link
+ * report. This is done asides of what the WiMAX stack does based on
+ * the device's state as sometimes we need to do a link-renew (the BS
+ * wants us to renew a DHCP lease, for example).
+ *
+ * In fact, doc says that every time we get a link-up, we should do a
+ * DHCP negotiation...
+ */
+static
+void i2400m_report_tlv_media_status(struct i2400m *i2400m,
+ const struct i2400m_tlv_media_status *ms)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = wimax_dev->net_dev;
+ enum i2400m_media_status status = le32_to_cpu(ms->media_status);
+
+ d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status);
+
+ switch (status) {
+ case I2400M_MEDIA_STATUS_LINK_UP:
+ netif_carrier_on(net_dev);
+ break;
+ case I2400M_MEDIA_STATUS_LINK_DOWN:
+ netif_carrier_off(net_dev);
+ break;
+ /*
+ * This is the network telling us we need to retrain the DHCP
+ * lease -- so far, we are trusting the WiMAX Network Service
+ * in user space to pick this up and poke the DHCP client.
+ */
+ case I2400M_MEDIA_STATUS_LINK_RENEW:
+ netif_carrier_on(net_dev);
+ break;
+ default:
+ dev_err(dev, "HW BUG? unknown media status %u\n",
+ status);
+ }
+ d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
+ i2400m, ms, status);
+}
+
+
+/*
+ * Process a TLV from a 'state report'
+ *
+ * @i2400m: device descriptor
+ * @tlv: pointer to the TLV header; it has been already validated for
+ * consistent size.
+ * @tag: for error messages
+ *
+ * Act on the TLVs from a 'state report'.
+ */
+static
+void i2400m_report_state_parse_tlv(struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr *tlv,
+ const char *tag)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_media_status *ms;
+ const struct i2400m_tlv_system_state *ss;
+ const struct i2400m_tlv_rf_switches_status *rfss;
+
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) {
+ ss = container_of(tlv, typeof(*ss), hdr);
+ d_printf(2, dev, "%s: system state TLV "
+ "found (0x%04x), state 0x%08x\n",
+ tag, I2400M_TLV_SYSTEM_STATE,
+ le32_to_cpu(ss->state));
+ i2400m_report_tlv_system_state(i2400m, ss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) {
+ rfss = container_of(tlv, typeof(*rfss), hdr);
+ d_printf(2, dev, "%s: RF status TLV "
+ "found (0x%04x), sw 0x%02x hw 0x%02x\n",
+ tag, I2400M_TLV_RF_STATUS,
+ le32_to_cpu(rfss->sw_rf_switch),
+ le32_to_cpu(rfss->hw_rf_switch));
+ i2400m_report_tlv_rf_switches_status(i2400m, rfss);
+ }
+ if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) {
+ ms = container_of(tlv, typeof(*ms), hdr);
+ d_printf(2, dev, "%s: Media Status TLV: %u\n",
+ tag, le32_to_cpu(ms->media_status));
+ i2400m_report_tlv_media_status(i2400m, ms);
+ }
+}
+
+
+/*
+ * Parse a 'state report' and extract information
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message (header + payload). The header length
+ * declaration is assumed to be congruent with @size (as in
+ * sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Walk over the TLVs in a report state and act on them.
+ */
+static
+void i2400m_report_state_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t size, const char *tag)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_tlv_hdr *tlv;
+ size_t tlv_size = le16_to_cpu(l3l4_hdr->length);
+
+ d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n",
+ i2400m, l3l4_hdr, size, tag);
+ tlv = NULL;
+
+ while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl,
+ tlv_size, tlv)))
+ i2400m_report_state_parse_tlv(i2400m, tlv, tag);
+ d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n",
+ i2400m, l3l4_hdr, size, tag);
+}
+
+
+/*
+ * i2400m_report_hook - (maybe) act on a report
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message (header + payload). The header length
+ * declaration is assumed to be congruent with @size (as in
+ * sizeof(*l3l4_hdr) + l3l4_hdr->length == size)
+ *
+ * Extract information we might need (like carrien on/off) from a
+ * device report.
+ */
+void i2400m_report_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned msg_type;
+
+ d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n",
+ i2400m, l3l4_hdr, size);
+ /* Chew on the message, we might need some information from
+ * here */
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ switch (msg_type) {
+ case I2400M_MT_REPORT_STATE: /* carrier detection... */
+ i2400m_report_state_hook(i2400m,
+ l3l4_hdr, size, "REPORT STATE");
+ break;
+ /* If the device is ready for power save, then ask it to do
+ * it. */
+ case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */
+ if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) {
+ if (i2400m_power_save_disabled)
+ d_printf(1, dev, "ready for powersave, "
+ "not requesting (disabled by module "
+ "parameter)\n");
+ else {
+ d_printf(1, dev, "ready for powersave, "
+ "requesting\n");
+ i2400m_cmd_enter_powersave(i2400m);
+ }
+ }
+ break;
+ }
+ d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
+ i2400m, l3l4_hdr, size);
+}
+
+
+/*
+ * i2400m_msg_ack_hook - process cmd/set/get ack for internal status
+ *
+ * @i2400m: device descriptor
+ * @l3l4_hdr: pointer to message; it has been already validated for
+ * consistent size.
+ * @size: size of the message
+ *
+ * Extract information we might need from acks to commands and act on
+ * it. This is akin to i2400m_report_hook(). Note most of this
+ * processing should be done in the function that calls the
+ * command. This is here for some cases where it can't happen...
+ */
+static void i2400m_msg_ack_hook(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned int ack_type;
+ char strerr[32];
+
+ /* Chew on the message, we might need some information from
+ * here */
+ ack_type = le16_to_cpu(l3l4_hdr->type);
+ switch (ack_type) {
+ case I2400M_MT_CMD_ENTER_POWERSAVE:
+ /* This is just left here for the sake of example, as
+ * the processing is done somewhere else. */
+ if (0) {
+ result = i2400m_msg_check_status(
+ l3l4_hdr, strerr, sizeof(strerr));
+ if (result >= 0)
+ d_printf(1, dev, "ready for power save: %zd\n",
+ size);
+ }
+ break;
+ }
+}
+
+
+/*
+ * i2400m_msg_size_check() - verify message size and header are congruent
+ *
+ * It is ok if the total message size is larger than the expected
+ * size, as there can be padding.
+ */
+int i2400m_msg_size_check(struct i2400m *i2400m,
+ const struct i2400m_l3l4_hdr *l3l4_hdr,
+ size_t msg_size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t expected_size;
+ d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n",
+ i2400m, l3l4_hdr, msg_size);
+ if (msg_size < sizeof(*l3l4_hdr)) {
+ dev_err(dev, "bad size for message header "
+ "(expected at least %zu, got %zu)\n",
+ (size_t) sizeof(*l3l4_hdr), msg_size);
+ result = -EIO;
+ goto error_hdr_size;
+ }
+ expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr);
+ if (msg_size < expected_size) {
+ dev_err(dev, "bad size for message code 0x%04x (expected %zu, "
+ "got %zu)\n", le16_to_cpu(l3l4_hdr->type),
+ expected_size, msg_size);
+ result = -EIO;
+ } else
+ result = 0;
+error_hdr_size:
+ d_fnend(4, dev,
+ "(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n",
+ i2400m, l3l4_hdr, msg_size, result);
+ return result;
+}
+
+
+
+/*
+ * Cancel a wait for a command ACK
+ *
+ * @i2400m: device descriptor
+ * @code: [negative] errno code to cancel with (don't use
+ * -EINPROGRESS)
+ *
+ * If there is an ack already filled out, free it.
+ */
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
+{
+ struct sk_buff *ack_skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ ack_skb = i2400m->ack_skb;
+ if (ack_skb && !IS_ERR(ack_skb))
+ kfree_skb(ack_skb);
+ i2400m->ack_skb = ERR_PTR(code);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+}
+
+
+/**
+ * i2400m_msg_to_dev - Send a control message to the device and get a response
+ *
+ * @i2400m: device descriptor
+ *
+ * @buf: pointer to the buffer containing the message to be sent; it
+ * has to start with a &struct i2400M_l3l4_hdr and then
+ * followed by the payload. Once this function returns, the
+ * buffer can be reused.
+ *
+ * @buf_len: buffer size
+ *
+ * Returns:
+ *
+ * Pointer to skb containing the ack message. You need to check the
+ * pointer with IS_ERR(), as it might be an error code. Error codes
+ * could happen because:
+ *
+ * - the message wasn't formatted correctly
+ * - couldn't send the message
+ * - failed waiting for a response
+ * - the ack message wasn't formatted correctly
+ *
+ * The returned skb has been allocated with wimax_msg_to_user_alloc(),
+ * it contains the response in a netlink attribute and is ready to be
+ * passed up to user space with wimax_msg_to_user_send(). To access
+ * the payload and its length, use wimax_msg_{data,len}() on the skb.
+ *
+ * The skb has to be freed with kfree_skb() once done.
+ *
+ * Description:
+ *
+ * This function delivers a message/command to the device and waits
+ * for an ack to be received. The format is described in
+ * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
+ * ack.
+ *
+ * This function will not check the ack status, that's left up to the
+ * caller. Once done with the ack skb, it has to be kfree_skb()ed.
+ *
+ * The i2400m handles only one message at the same time, thus we need
+ * the mutex to exclude other players.
+ *
+ * We write the message and then wait for an answer to come back. The
+ * RX path intercepts control messages and handles them in
+ * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
+ * locally and then forwarded (as needed) to user space on the WiMAX
+ * stack message pipe. Acks are saved and passed back to us through an
+ * skb in i2400m->ack_skb which is ready to be given to generic
+ * netlink if need be.
+ */
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
+ const void *buf, size_t buf_len)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
+ struct sk_buff *ack_skb;
+ const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
+ size_t ack_len;
+ int ack_timeout;
+ unsigned msg_type;
+ unsigned long flags;
+
+ d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
+ i2400m, buf, buf_len);
+
+ rmb(); /* Make sure we see what i2400m_dev_reset_handle() */
+ if (i2400m->boot_mode)
+ return ERR_PTR(-EL3RST);
+
+ msg_l3l4_hdr = buf;
+ /* Check msg & payload consistency */
+ result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
+ if (result < 0)
+ goto error_bad_msg;
+ msg_type = le16_to_cpu(msg_l3l4_hdr->type);
+ d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
+ msg_type, buf_len);
+ d_dump(2, dev, buf, buf_len);
+
+ /* Setup the completion, ack_skb ("we are waiting") and send
+ * the message to the device */
+ mutex_lock(&i2400m->msg_mutex);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ init_completion(&i2400m->msg_completion);
+ result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
+ if (result < 0) {
+ dev_err(dev, "can't send message 0x%04x: %d\n",
+ le16_to_cpu(msg_l3l4_hdr->type), result);
+ goto error_tx;
+ }
+
+ /* Some commands take longer to execute because of crypto ops,
+ * so we give them some more leeway on timeout */
+ switch (msg_type) {
+ case I2400M_MT_GET_TLS_OPERATION_RESULT:
+ case I2400M_MT_CMD_SEND_EAP_RESPONSE:
+ ack_timeout = 5 * HZ;
+ break;
+ default:
+ ack_timeout = HZ;
+ }
+
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
+ /* The RX path in rx.c will put any response for this message
+ * in i2400m->ack_skb and wake us up. If we cancel the wait,
+ * we need to change the value of i2400m->ack_skb to something
+ * not -EINPROGRESS so RX knows there is no one waiting. */
+ result = wait_for_completion_interruptible_timeout(
+ &i2400m->msg_completion, ack_timeout);
+ if (result == 0) {
+ dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
+ msg_type);
+ result = -ETIMEDOUT;
+ i2400m_msg_to_dev_cancel_wait(i2400m, result);
+ goto error_wait_for_completion;
+ } else if (result < 0) {
+ dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
+ msg_type, result);
+ i2400m_msg_to_dev_cancel_wait(i2400m, result);
+ goto error_wait_for_completion;
+ }
+
+ /* Pull out the ack data from i2400m->ack_skb -- see if it is
+ * an error and act accordingly */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ ack_skb = i2400m->ack_skb;
+ if (IS_ERR(ack_skb))
+ result = PTR_ERR(ack_skb);
+ else
+ result = 0;
+ i2400m->ack_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ if (result < 0)
+ goto error_ack_status;
+ ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);
+
+ /* Check the ack and deliver it if it is ok */
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo",
+ ack_l3l4_hdr, ack_len, GFP_KERNEL);
+ result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
+ msg_type, result);
+ goto error_bad_ack_len;
+ }
+ if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
+ dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
+ le16_to_cpu(ack_l3l4_hdr->type), msg_type);
+ result = -EIO;
+ goto error_bad_ack_type;
+ }
+ i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
+ mutex_unlock(&i2400m->msg_mutex);
+ d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
+ i2400m, buf, buf_len, ack_skb);
+ return ack_skb;
+
+error_bad_ack_type:
+error_bad_ack_len:
+ kfree_skb(ack_skb);
+error_ack_status:
+error_wait_for_completion:
+error_tx:
+ mutex_unlock(&i2400m->msg_mutex);
+error_bad_msg:
+ d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
+ i2400m, buf, buf_len, result);
+ return ERR_PTR(result);
+}
+
+
+/*
+ * Definitions for the Enter Power Save command
+ *
+ * The Enter Power Save command requests the device to go into power
+ * saving mode. The device will ack or nak the command depending on it
+ * being ready for it. If it acks, we tell the USB subsystem to
+ *
+ * As well, the device might request to go into power saving mode by
+ * sending a report (REPORT_POWERSAVE_READY), in which case, we issue
+ * this command. The hookups in the RX coder allow
+ */
+enum {
+ I2400M_WAKEUP_ENABLED = 0x01,
+ I2400M_WAKEUP_DISABLED = 0x02,
+ I2400M_TLV_TYPE_WAKEUP_MODE = 144,
+};
+
+struct i2400m_cmd_enter_power_save {
+ struct i2400m_l3l4_hdr hdr;
+ struct i2400m_tlv_hdr tlv;
+ __le32 val;
+} __packed;
+
+
+/*
+ * Request entering power save
+ *
+ * This command is (mainly) executed when the device indicates that it
+ * is ready to go into powersave mode via a REPORT_POWERSAVE_READY.
+ */
+int i2400m_cmd_enter_powersave(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_cmd_enter_power_save *cmd;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE);
+ cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
+ cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE);
+ cmd->tlv.length = cpu_to_le16(sizeof(cmd->val));
+ cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'Enter power save' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result == -EACCES)
+ d_printf(1, dev, "Cannot enter power save mode\n");
+ else if (result < 0)
+ dev_err(dev, "'Enter power save' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE,
+ result, strerr);
+ else
+ d_printf(1, dev, "device ready to power save\n");
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave);
+
+
+/*
+ * Definitions for getting device information
+ */
+enum {
+ I2400M_TLV_DETAILED_DEVICE_INFO = 140
+};
+
+/**
+ * i2400m_get_device_info - Query the device for detailed device information
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: an skb whose skb->data points to a 'struct
+ * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The
+ * skb is *guaranteed* to contain the whole TLV data structure.
+ *
+ * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error
+ * code.
+ */
+struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ const struct i2400m_tlv_hdr *tlv;
+ const struct i2400m_tlv_detailed_device_info *ddi;
+ char strerr[32];
+
+ ack_skb = ERR_PTR(-ENOMEM);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'get device info' command: %ld\n",
+ PTR_ERR(ack_skb));
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get device info' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result,
+ strerr);
+ goto error_cmd_failed;
+ }
+ tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+ I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi));
+ if (tlv == NULL) {
+ dev_err(dev, "GET DEVICE INFO: "
+ "detailed device info TLV not found (0x%04x)\n",
+ I2400M_TLV_DETAILED_DEVICE_INFO);
+ result = -EIO;
+ goto error_no_tlv;
+ }
+ skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return ack_skb;
+
+error_no_tlv:
+error_cmd_failed:
+ kfree_skb(ack_skb);
+ kfree(cmd);
+ return ERR_PTR(result);
+}
+
+
+/* Firmware interface versions we support */
+enum {
+ I2400M_HDIv_MAJOR = 9,
+ I2400M_HDIv_MINOR = 1,
+ I2400M_HDIv_MINOR_2 = 2,
+};
+
+
+/**
+ * i2400m_firmware_check - check firmware versions are compatible with
+ * the driver
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code an error and a message in the
+ * kernel log.
+ *
+ * Long function, but quite simple; first chunk launches the command
+ * and double checks the reply for the right TLV. Then we process the
+ * TLV (where the meat is).
+ *
+ * Once we process the TLV that gives us the firmware's interface
+ * version, we encode it and save it in i2400m->fw_version for future
+ * reference.
+ */
+int i2400m_firmware_check(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ const struct i2400m_tlv_hdr *tlv;
+ const struct i2400m_tlv_l4_message_versions *l4mv;
+ char strerr[32];
+ unsigned major, minor, branch;
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ result = PTR_ERR(ack_skb);
+ dev_err(dev, "Failed to issue 'get lm version' command: %-d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get lm version' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_LM_VERSION, result,
+ strerr);
+ goto error_cmd_failed;
+ }
+ tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack),
+ I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv));
+ if (tlv == NULL) {
+ dev_err(dev, "get lm version: TLV not found (0x%04x)\n",
+ I2400M_TLV_L4_MESSAGE_VERSIONS);
+ result = -EIO;
+ goto error_no_tlv;
+ }
+ l4mv = container_of(tlv, typeof(*l4mv), hdr);
+ major = le16_to_cpu(l4mv->major);
+ minor = le16_to_cpu(l4mv->minor);
+ branch = le16_to_cpu(l4mv->branch);
+ result = -EINVAL;
+ if (major != I2400M_HDIv_MAJOR) {
+ dev_err(dev, "unsupported major fw version "
+ "%u.%u.%u\n", major, minor, branch);
+ goto error_bad_major;
+ }
+ result = 0;
+ if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR)
+ dev_warn(dev, "untested minor fw version %u.%u.%u\n",
+ major, minor, branch);
+ /* Yes, we ignore the branch -- we don't have to track it */
+ i2400m->fw_version = major << 16 | minor;
+ dev_info(dev, "firmware interface version %u.%u.%u\n",
+ major, minor, branch);
+error_bad_major:
+error_no_tlv:
+error_cmd_failed:
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+
+
+/*
+ * Send an DoExitIdle command to the device to ask it to go out of
+ * basestation-idle mode.
+ *
+ * @i2400m: device descriptor
+ *
+ * This starts a renegotiation with the basestation that might involve
+ * another crypto handshake with user space.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+int i2400m_cmd_exit_idle(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'exit idle' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+
+}
+
+
+/*
+ * Query the device for its state, update the WiMAX stack's idea of it
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Executes a 'Get State' command and parses the returned
+ * TLVs.
+ *
+ * Because this is almost identical to a 'Report State', we use
+ * i2400m_report_state_hook() to parse the answer. This will set the
+ * carrier state, as well as the RF Kill switches state.
+ */
+static int i2400m_cmd_get_state(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ char strerr[32];
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->type = cpu_to_le16(I2400M_MT_GET_STATE);
+ cmd->length = 0;
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'get state' command: %ld\n",
+ PTR_ERR(ack_skb));
+ result = PTR_ERR(ack_skb);
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'get state' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
+ goto error_cmd_failed;
+ }
+ i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack),
+ "GET STATE");
+ result = 0;
+ kfree_skb(ack_skb);
+error_cmd_failed:
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+
+/**
+ * Set basic configuration settings
+ *
+ * @i2400m: device descriptor
+ * @args: array of pointers to the TLV headers to send for
+ * configuration (each followed by its payload).
+ * TLV headers and payloads must be properly initialized, with the
+ * right endianess (LE).
+ * @arg_size: number of pointers in the @args array
+ */
+static int i2400m_set_init_config(struct i2400m *i2400m,
+ const struct i2400m_tlv_hdr **arg,
+ size_t args)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct i2400m_l3l4_hdr *cmd;
+ char strerr[32];
+ unsigned argc, argsize, tlv_size;
+ const struct i2400m_tlv_hdr *tlv_hdr;
+ void *buf, *itr;
+
+ d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args);
+ result = 0;
+ if (args == 0)
+ goto none;
+ /* Compute the size of all the TLVs, so we can alloc a
+ * contiguous command block to copy them. */
+ argsize = 0;
+ for (argc = 0; argc < args; argc++) {
+ tlv_hdr = arg[argc];
+ argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+ }
+ WARN_ON(argc >= 9); /* As per hw spec */
+
+ /* Alloc the space for the command and TLVs*/
+ result = -ENOMEM;
+ buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL);
+ if (buf == NULL)
+ goto error_alloc;
+ cmd = buf;
+ cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG);
+ cmd->length = cpu_to_le16(argsize);
+ cmd->version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ /* Copy the TLVs */
+ itr = buf + sizeof(*cmd);
+ for (argc = 0; argc < args; argc++) {
+ tlv_hdr = arg[argc];
+ tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length);
+ memcpy(itr, tlv_hdr, tlv_size);
+ itr += tlv_size;
+ }
+
+ /* Send the message! */
+ ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize);
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'init config' command: %d\n",
+ result);
+
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result < 0)
+ dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n",
+ I2400M_MT_SET_INIT_CONFIG, result, strerr);
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+ kfree(buf);
+error_alloc:
+none:
+ d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n",
+ i2400m, arg, args, result);
+ return result;
+
+}
+
+/**
+ * i2400m_set_idle_timeout - Set the device's idle mode timeout
+ *
+ * @i2400m: i2400m device descriptor
+ *
+ * @msecs: milliseconds for the timeout to enter idle mode. Between
+ * 100 to 300000 (5m); 0 to disable. In increments of 100.
+ *
+ * After this @msecs of the link being idle (no data being sent or
+ * received), the device will negotiate with the basestation entering
+ * idle mode for saving power. The connection is maintained, but
+ * getting out of it (done in tx.c) will require some negotiation,
+ * possible crypto re-handshake and a possible DHCP re-lease.
+ *
+ * Only available if fw_version >= 0x00090002.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct {
+ struct i2400m_l3l4_hdr hdr;
+ struct i2400m_tlv_config_idle_timeout cit;
+ } *cmd;
+ const struct i2400m_l3l4_hdr *ack;
+ size_t ack_len;
+ char strerr[32];
+
+ result = -ENOSYS;
+ if (i2400m_le_v1_3(i2400m))
+ goto error_alloc;
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE);
+ cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr));
+ cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+
+ cmd->cit.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
+ cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout));
+ cmd->cit.timeout = cpu_to_le32(msecs);
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'set idle timeout' command: "
+ "%ld\n", PTR_ERR(ack_skb));
+ result = PTR_ERR(ack_skb);
+ goto error_msg_to_dev;
+ }
+ ack = wimax_msg_data_len(ack_skb, &ack_len);
+ result = i2400m_msg_check_status(ack, strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'set idle timeout' (0x%04x) command failed: "
+ "%d - %s\n", I2400M_MT_GET_STATE, result, strerr);
+ goto error_cmd_failed;
+ }
+ result = 0;
+ kfree_skb(ack_skb);
+error_cmd_failed:
+error_msg_to_dev:
+ kfree(cmd);
+error_alloc:
+ return result;
+}
+
+
+/**
+ * i2400m_dev_initialize - Initialize the device once communications are ready
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Configures the device to work the way we like it.
+ *
+ * At the point of this call, the device is registered with the WiMAX
+ * and netdev stacks, firmware is uploaded and we can talk to the
+ * device normally.
+ */
+int i2400m_dev_initialize(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_tlv_config_idle_parameters idle_params;
+ struct i2400m_tlv_config_idle_timeout idle_timeout;
+ struct i2400m_tlv_config_d2h_data_format df;
+ struct i2400m_tlv_config_dl_host_reorder dlhr;
+ const struct i2400m_tlv_hdr *args[9];
+ unsigned argc = 0;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ if (i2400m_passive_mode)
+ goto out_passive;
+ /* Disable idle mode? (enabled by default) */
+ if (i2400m_idle_mode_disabled) {
+ if (i2400m_le_v1_3(i2400m)) {
+ idle_params.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS);
+ idle_params.hdr.length = cpu_to_le16(
+ sizeof(idle_params) - sizeof(idle_params.hdr));
+ idle_params.idle_timeout = 0;
+ idle_params.idle_paging_interval = 0;
+ args[argc++] = &idle_params.hdr;
+ } else {
+ idle_timeout.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT);
+ idle_timeout.hdr.length = cpu_to_le16(
+ sizeof(idle_timeout) - sizeof(idle_timeout.hdr));
+ idle_timeout.timeout = 0;
+ args[argc++] = &idle_timeout.hdr;
+ }
+ }
+ if (i2400m_ge_v1_4(i2400m)) {
+ /* Enable extended RX data format? */
+ df.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT);
+ df.hdr.length = cpu_to_le16(
+ sizeof(df) - sizeof(df.hdr));
+ df.format = 1;
+ args[argc++] = &df.hdr;
+
+ /* Enable RX data reordering?
+ * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */
+ if (i2400m->rx_reorder) {
+ dlhr.hdr.type =
+ cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER);
+ dlhr.hdr.length = cpu_to_le16(
+ sizeof(dlhr) - sizeof(dlhr.hdr));
+ dlhr.reorder = 1;
+ args[argc++] = &dlhr.hdr;
+ }
+ }
+ result = i2400m_set_init_config(i2400m, args, argc);
+ if (result < 0)
+ goto error;
+out_passive:
+ /*
+ * Update state: Here it just calls a get state; parsing the
+ * result (System State TLV and RF Status TLV [done in the rx
+ * path hooks]) will set the hardware and software RF-Kill
+ * status.
+ */
+ result = i2400m_cmd_get_state(i2400m);
+error:
+ if (result < 0)
+ dev_err(dev, "failed to initialize the device: %d\n", result);
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/**
+ * i2400m_dev_shutdown - Shutdown a running device
+ *
+ * @i2400m: device descriptor
+ *
+ * Release resources acquired during the running of the device; in
+ * theory, should also tell the device to go to sleep, switch off the
+ * radio, all that, but at this point, in most cases (driver
+ * disconnection, reset handling) we can't even talk to the device.
+ */
+void i2400m_dev_shutdown(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
diff --git a/drivers/staging/wimax/i2400m/debug-levels.h b/drivers/staging/wimax/i2400m/debug-levels.h
new file mode 100644
index 000000000000..a317e9fbb734
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/debug-levels.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m module
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include "../linux-wimax-debug.h"
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(control),
+ D_SUBMODULE_DECLARE(driver),
+ D_SUBMODULE_DECLARE(debugfs),
+ D_SUBMODULE_DECLARE(fw),
+ D_SUBMODULE_DECLARE(netdev),
+ D_SUBMODULE_DECLARE(rfkill),
+ D_SUBMODULE_DECLARE(rx),
+ D_SUBMODULE_DECLARE(sysfs),
+ D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/staging/wimax/i2400m/debugfs.c b/drivers/staging/wimax/i2400m/debugfs.c
new file mode 100644
index 000000000000..1c640b41ea4c
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/debugfs.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debugfs interfaces to manipulate driver and device information
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+static
+int debugfs_netdev_queue_stopped_get(void *data, u64 *val)
+{
+ struct i2400m *i2400m = data;
+ *val = netif_queue_stopped(i2400m->wimax_dev.net_dev);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped,
+ debugfs_netdev_queue_stopped_get,
+ NULL, "%llu\n");
+
+/*
+ * We don't allow partial reads of this file, as then the reader would
+ * get weirdly confused data as it is updated.
+ *
+ * So or you read it all or nothing; if you try to read with an offset
+ * != 0, we consider you are done reading.
+ */
+static
+ssize_t i2400m_rx_stats_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ char buf[128];
+ unsigned long flags;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+ i2400m->rx_pl_num, i2400m->rx_pl_min,
+ i2400m->rx_pl_max, i2400m->rx_num,
+ i2400m->rx_size_acc,
+ i2400m->rx_size_min, i2400m->rx_size_max);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_rx_stats_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->rx_pl_num = 0;
+ i2400m->rx_pl_max = 0;
+ i2400m->rx_pl_min = UINT_MAX;
+ i2400m->rx_num = 0;
+ i2400m->rx_size_acc = 0;
+ i2400m->rx_size_min = UINT_MAX;
+ i2400m->rx_size_max = 0;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ return count;
+}
+
+static
+const struct file_operations i2400m_rx_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i2400m_rx_stats_read,
+ .write = i2400m_rx_stats_write,
+ .llseek = default_llseek,
+};
+
+
+/* See i2400m_rx_stats_read() */
+static
+ssize_t i2400m_tx_stats_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ char buf[128];
+ unsigned long flags;
+
+ if (*ppos != 0)
+ return 0;
+ if (count < sizeof(buf))
+ return -ENOSPC;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n",
+ i2400m->tx_pl_num, i2400m->tx_pl_min,
+ i2400m->tx_pl_max, i2400m->tx_num,
+ i2400m->tx_size_acc,
+ i2400m->tx_size_min, i2400m->tx_size_max);
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+}
+
+/* Any write clears the stats */
+static
+ssize_t i2400m_tx_stats_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i2400m *i2400m = filp->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_pl_num = 0;
+ i2400m->tx_pl_max = 0;
+ i2400m->tx_pl_min = UINT_MAX;
+ i2400m->tx_num = 0;
+ i2400m->tx_size_acc = 0;
+ i2400m->tx_size_min = UINT_MAX;
+ i2400m->tx_size_max = 0;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ return count;
+}
+
+static
+const struct file_operations i2400m_tx_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i2400m_tx_stats_read,
+ .write = i2400m_tx_stats_write,
+ .llseek = default_llseek,
+};
+
+
+/* Write 1 to ask the device to go into suspend */
+static
+int debugfs_i2400m_suspend_set(void *data, u64 val)
+{
+ int result;
+ struct i2400m *i2400m = data;
+ result = i2400m_cmd_enter_powersave(i2400m);
+ if (result >= 0)
+ result = 0;
+ return result;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend,
+ NULL, debugfs_i2400m_suspend_set,
+ "%llu\n");
+
+/*
+ * Reset the device
+ *
+ * Write 0 to ask the device to soft reset, 1 to cold reset, 2 to bus
+ * reset (as defined by enum i2400m_reset_type).
+ */
+static
+int debugfs_i2400m_reset_set(void *data, u64 val)
+{
+ int result;
+ struct i2400m *i2400m = data;
+ enum i2400m_reset_type rt = val;
+ switch(rt) {
+ case I2400M_RT_WARM:
+ case I2400M_RT_COLD:
+ case I2400M_RT_BUS:
+ result = i2400m_reset(i2400m, rt);
+ if (result >= 0)
+ result = 0;
+ break;
+ default:
+ result = -EINVAL;
+ }
+ return result;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset,
+ NULL, debugfs_i2400m_reset_set,
+ "%llu\n");
+
+void i2400m_debugfs_add(struct i2400m *i2400m)
+{
+ struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry;
+
+ dentry = debugfs_create_dir("i2400m", dentry);
+ i2400m->debugfs_dentry = dentry;
+
+ d_level_register_debugfs("dl_", control, dentry);
+ d_level_register_debugfs("dl_", driver, dentry);
+ d_level_register_debugfs("dl_", debugfs, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", netdev, dentry);
+ d_level_register_debugfs("dl_", rfkill, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
+
+ debugfs_create_size_t("tx_in", 0400, dentry, &i2400m->tx_in);
+ debugfs_create_size_t("tx_out", 0400, dentry, &i2400m->tx_out);
+ debugfs_create_u32("state", 0600, dentry, &i2400m->state);
+
+ /*
+ * Trace received messages from user space
+ *
+ * In order to tap the bidirectional message stream in the
+ * 'msg' pipe, user space can read from the 'msg' pipe;
+ * however, due to limitations in libnl, we can't know what
+ * the different applications are sending down to the kernel.
+ *
+ * So we have this hack where the driver will echo any message
+ * received on the msg pipe from user space [through a call to
+ * wimax_dev->op_msg_from_user() into
+ * i2400m_op_msg_from_user()] into the 'trace' pipe that this
+ * driver creates.
+ *
+ * So then, reading from both the 'trace' and 'msg' pipes in
+ * user space will provide a full dump of the traffic.
+ *
+ * Write 1 to activate, 0 to clear.
+ *
+ * It is not really very atomic, but it is also not too
+ * critical.
+ */
+ debugfs_create_u8("trace_msg_from_user", 0600, dentry,
+ &i2400m->trace_msg_from_user);
+
+ debugfs_create_file("netdev_queue_stopped", 0400, dentry, i2400m,
+ &fops_netdev_queue_stopped);
+
+ debugfs_create_file("rx_stats", 0600, dentry, i2400m,
+ &i2400m_rx_stats_fops);
+
+ debugfs_create_file("tx_stats", 0600, dentry, i2400m,
+ &i2400m_tx_stats_fops);
+
+ debugfs_create_file("suspend", 0200, dentry, i2400m,
+ &fops_i2400m_suspend);
+
+ debugfs_create_file("reset", 0200, dentry, i2400m, &fops_i2400m_reset);
+}
+
+void i2400m_debugfs_rm(struct i2400m *i2400m)
+{
+ debugfs_remove_recursive(i2400m->debugfs_dentry);
+}
diff --git a/drivers/staging/wimax/i2400m/driver.c b/drivers/staging/wimax/i2400m/driver.c
new file mode 100644
index 000000000000..dc8939ff78c0
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/driver.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic probe/disconnect, reset and message passing
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * See i2400m.h for driver documentation. This contains helpers for
+ * the driver model glue [_setup()/_release()], handling device resets
+ * [_dev_reset_handle()], and the backends for the WiMAX stack ops
+ * reset [_op_reset()] and message from user [_op_msg_from_user()].
+ *
+ * ROADMAP:
+ *
+ * i2400m_op_msg_from_user()
+ * i2400m_msg_to_dev()
+ * wimax_msg_to_user_send()
+ *
+ * i2400m_op_reset()
+ * i240m->bus_reset()
+ *
+ * i2400m_dev_reset_handle()
+ * __i2400m_dev_reset_handle()
+ * __i2400m_dev_stop()
+ * __i2400m_dev_start()
+ *
+ * i2400m_setup()
+ * i2400m->bus_setup()
+ * i2400m_bootrom_init()
+ * register_netdev()
+ * wimax_dev_add()
+ * i2400m_dev_start()
+ * __i2400m_dev_start()
+ * i2400m_dev_bootstrap()
+ * i2400m_tx_setup()
+ * i2400m->bus_dev_start()
+ * i2400m_firmware_check()
+ * i2400m_check_mac_addr()
+ *
+ * i2400m_release()
+ * i2400m_dev_stop()
+ * __i2400m_dev_stop()
+ * i2400m_dev_shutdown()
+ * i2400m->bus_dev_stop()
+ * i2400m_tx_release()
+ * i2400m->bus_release()
+ * wimax_dev_rm()
+ * unregister_netdev()
+ */
+#include "i2400m.h"
+#include <linux/etherdevice.h>
+#include "linux-wimax-i2400m.h"
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+#define D_SUBMODULE driver
+#include "debug-levels.h"
+
+
+static char i2400m_debug_params[128];
+module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
+ 0644);
+MODULE_PARM_DESC(debug,
+ "String of space-separated NAME:VALUE pairs, where NAMEs "
+ "are the different debug submodules and VALUE are the "
+ "initial debug value to set.");
+
+static char i2400m_barkers_params[128];
+module_param_string(barkers, i2400m_barkers_params,
+ sizeof(i2400m_barkers_params), 0644);
+MODULE_PARM_DESC(barkers,
+ "String of comma-separated 32-bit values; each is "
+ "recognized as the value the device sends as a reboot "
+ "signal; values are appended to a list--setting one value "
+ "as zero cleans the existing list and starts a new one.");
+
+/*
+ * WiMAX stack operation: relay a message from user space
+ *
+ * @wimax_dev: device descriptor
+ * @pipe_name: named pipe the message is for
+ * @msg_buf: pointer to the message bytes
+ * @msg_len: length of the buffer
+ * @genl_info: passed by the generic netlink layer
+ *
+ * The WiMAX stack will call this function when a message was received
+ * from user space.
+ *
+ * For the i2400m, this is an L3L4 message, as specified in
+ * include/linux/wimax/i2400m.h, and thus prefixed with a 'struct
+ * i2400m_l3l4_hdr'. Driver (and device) expect the messages to be
+ * coded in Little Endian.
+ *
+ * This function just verifies that the header declaration and the
+ * payload are consistent and then deals with it, either forwarding it
+ * to the device or procesing it locally.
+ *
+ * In the i2400m, messages are basically commands that will carry an
+ * ack, so we use i2400m_msg_to_dev() and then deliver the ack back to
+ * user space. The rx.c code might intercept the response and use it
+ * to update the driver's state, but then it will pass it on so it can
+ * be relayed back to user space.
+ *
+ * Note that asynchronous events from the device are processed and
+ * sent to user space in rx.c.
+ */
+static
+int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev,
+ const char *pipe_name,
+ const void *msg_buf, size_t msg_len,
+ const struct genl_info *genl_info)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+
+ d_fnstart(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p "
+ "msg_len %zu genl_info %p)\n", wimax_dev, i2400m,
+ msg_buf, msg_len, genl_info);
+ ack_skb = i2400m_msg_to_dev(i2400m, msg_buf, msg_len);
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb))
+ goto error_msg_to_dev;
+ result = wimax_msg_send(&i2400m->wimax_dev, ack_skb);
+error_msg_to_dev:
+ d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu "
+ "genl_info %p) = %d\n", wimax_dev, i2400m, msg_buf, msg_len,
+ genl_info, result);
+ return result;
+}
+
+
+/*
+ * Context to wait for a reset to finalize
+ */
+struct i2400m_reset_ctx {
+ struct completion completion;
+ int result;
+};
+
+
+/*
+ * WiMAX stack operation: reset a device
+ *
+ * @wimax_dev: device descriptor
+ *
+ * See the documentation for wimax_reset() and wimax_dev->op_reset for
+ * the requirements of this function. The WiMAX stack guarantees
+ * serialization on calls to this function.
+ *
+ * Do a warm reset on the device; if it fails, resort to a cold reset
+ * and return -ENODEV. On successful warm reset, we need to block
+ * until it is complete.
+ *
+ * The bus-driver implementation of reset takes care of falling back
+ * to cold reset if warm fails.
+ */
+static
+int i2400m_op_reset(struct wimax_dev *wimax_dev)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_reset_ctx ctx = {
+ .completion = COMPLETION_INITIALIZER_ONSTACK(ctx.completion),
+ .result = 0,
+ };
+
+ d_fnstart(4, dev, "(wimax_dev %p)\n", wimax_dev);
+ mutex_lock(&i2400m->init_mutex);
+ i2400m->reset_ctx = &ctx;
+ mutex_unlock(&i2400m->init_mutex);
+ result = i2400m_reset(i2400m, I2400M_RT_WARM);
+ if (result < 0)
+ goto out;
+ result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ else if (result > 0)
+ result = ctx.result;
+ /* if result < 0, pass it on */
+ mutex_lock(&i2400m->init_mutex);
+ i2400m->reset_ctx = NULL;
+ mutex_unlock(&i2400m->init_mutex);
+out:
+ d_fnend(4, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+
+
+/*
+ * Check the MAC address we got from boot mode is ok
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ */
+static
+int i2400m_check_mac_addr(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb;
+ const struct i2400m_tlv_detailed_device_info *ddi;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ skb = i2400m_get_device_info(i2400m);
+ if (IS_ERR(skb)) {
+ result = PTR_ERR(skb);
+ dev_err(dev, "Cannot verify MAC address, error reading: %d\n",
+ result);
+ goto error;
+ }
+ /* Extract MAC address */
+ ddi = (void *) skb->data;
+ BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
+ if (!memcmp(net_dev->perm_addr, ddi->mac_address,
+ sizeof(ddi->mac_address)))
+ goto ok;
+ dev_warn(dev, "warning: device reports a different MAC address "
+ "to that of boot mode's\n");
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
+ if (is_zero_ether_addr(ddi->mac_address))
+ dev_err(dev, "device reports an invalid MAC address, "
+ "not updating\n");
+ else {
+ dev_warn(dev, "updating MAC address\n");
+ net_dev->addr_len = ETH_ALEN;
+ memcpy(net_dev->perm_addr, ddi->mac_address, ETH_ALEN);
+ memcpy(net_dev->dev_addr, ddi->mac_address, ETH_ALEN);
+ }
+ok:
+ result = 0;
+ kfree_skb(skb);
+error:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/**
+ * __i2400m_dev_start - Bring up driver communication with the device
+ *
+ * @i2400m: device descriptor
+ * @flags: boot mode flags
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Uploads firmware and brings up all the resources needed to be able
+ * to communicate with the device.
+ *
+ * The workqueue has to be setup early, at least before RX handling
+ * (it's only real user for now) so it can process reports as they
+ * arrive. We also want to destroy it if we retry, to make sure it is
+ * flushed...easier like this.
+ *
+ * TX needs to be setup before the bus-specific code (otherwise on
+ * shutdown, the bus-tx code could try to access it).
+ */
+static
+int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int result;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = wimax_dev->net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+ int times = i2400m->bus_bm_retries;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+retry:
+ result = i2400m_dev_bootstrap(i2400m, flags);
+ if (result < 0) {
+ dev_err(dev, "cannot bootstrap device: %d\n", result);
+ goto error_bootstrap;
+ }
+ result = i2400m_tx_setup(i2400m);
+ if (result < 0)
+ goto error_tx_setup;
+ result = i2400m_rx_setup(i2400m);
+ if (result < 0)
+ goto error_rx_setup;
+ i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name);
+ if (i2400m->work_queue == NULL) {
+ result = -ENOMEM;
+ dev_err(dev, "cannot create workqueue\n");
+ goto error_create_workqueue;
+ }
+ if (i2400m->bus_dev_start) {
+ result = i2400m->bus_dev_start(i2400m);
+ if (result < 0)
+ goto error_bus_dev_start;
+ }
+ i2400m->ready = 1;
+ wmb(); /* see i2400m->ready's documentation */
+ /* process pending reports from the device */
+ queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
+ result = i2400m_firmware_check(i2400m); /* fw versions ok? */
+ if (result < 0)
+ goto error_fw_check;
+ /* At this point is ok to send commands to the device */
+ result = i2400m_check_mac_addr(i2400m);
+ if (result < 0)
+ goto error_check_mac_addr;
+ result = i2400m_dev_initialize(i2400m);
+ if (result < 0)
+ goto error_dev_initialize;
+
+ /* We don't want any additional unwanted error recovery triggered
+ * from any other context so if anything went wrong before we come
+ * here, let's keep i2400m->error_recovery untouched and leave it to
+ * dev_reset_handle(). See dev_reset_handle(). */
+
+ atomic_dec(&i2400m->error_recovery);
+ /* Every thing works so far, ok, now we are ready to
+ * take error recovery if it's required. */
+
+ /* At this point, reports will come for the device and set it
+ * to the right state if it is different than UNINITIALIZED */
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+
+error_dev_initialize:
+error_check_mac_addr:
+error_fw_check:
+ i2400m->ready = 0;
+ wmb(); /* see i2400m->ready's documentation */
+ flush_workqueue(i2400m->work_queue);
+ if (i2400m->bus_dev_stop)
+ i2400m->bus_dev_stop(i2400m);
+error_bus_dev_start:
+ destroy_workqueue(i2400m->work_queue);
+error_create_workqueue:
+ i2400m_rx_release(i2400m);
+error_rx_setup:
+ i2400m_tx_release(i2400m);
+error_tx_setup:
+error_bootstrap:
+ if (result == -EL3RST && times-- > 0) {
+ flags = I2400M_BRI_SOFT|I2400M_BRI_MAC_REINIT;
+ goto retry;
+ }
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+}
+
+
+static
+int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+ int result = 0;
+ mutex_lock(&i2400m->init_mutex); /* Well, start the device */
+ if (i2400m->updown == 0) {
+ result = __i2400m_dev_start(i2400m, bm_flags);
+ if (result >= 0) {
+ i2400m->updown = 1;
+ i2400m->alive = 1;
+ wmb();/* see i2400m->updown and i2400m->alive's doc */
+ }
+ }
+ mutex_unlock(&i2400m->init_mutex);
+ return result;
+}
+
+
+/**
+ * i2400m_dev_stop - Tear down driver communication with the device
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Releases all the resources allocated to communicate with the
+ * device. Note we cannot destroy the workqueue earlier as until RX is
+ * fully destroyed, it could still try to schedule jobs.
+ */
+static
+void __i2400m_dev_stop(struct i2400m *i2400m)
+{
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+ i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
+ complete(&i2400m->msg_completion);
+ i2400m_net_wake_stop(i2400m);
+ i2400m_dev_shutdown(i2400m);
+ /*
+ * Make sure no report hooks are running *before* we stop the
+ * communication infrastructure with the device.
+ */
+ i2400m->ready = 0; /* nobody can queue work anymore */
+ wmb(); /* see i2400m->ready's documentation */
+ flush_workqueue(i2400m->work_queue);
+
+ if (i2400m->bus_dev_stop)
+ i2400m->bus_dev_stop(i2400m);
+ destroy_workqueue(i2400m->work_queue);
+ i2400m_rx_release(i2400m);
+ i2400m_tx_release(i2400m);
+ wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+}
+
+
+/*
+ * Watch out -- we only need to stop if there is a need for it. The
+ * device could have reset itself and failed to come up again (see
+ * _i2400m_dev_reset_handle()).
+ */
+static
+void i2400m_dev_stop(struct i2400m *i2400m)
+{
+ mutex_lock(&i2400m->init_mutex);
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
+ i2400m->updown = 0;
+ i2400m->alive = 0;
+ wmb(); /* see i2400m->updown and i2400m->alive's doc */
+ }
+ mutex_unlock(&i2400m->init_mutex);
+}
+
+
+/*
+ * Listen to PM events to cache the firmware before suspend/hibernation
+ *
+ * When the device comes out of suspend, it might go into reset and
+ * firmware has to be uploaded again. At resume, most of the times, we
+ * can't load firmware images from disk, so we need to cache it.
+ *
+ * i2400m_fw_cache() will allocate a kobject and attach the firmware
+ * to it; that way we don't have to worry too much about the fw loader
+ * hitting a race condition.
+ *
+ * Note: modus operandi stolen from the Orinoco driver; thx.
+ */
+static
+int i2400m_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct i2400m *i2400m =
+ container_of(notifier, struct i2400m, pm_notifier);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event);
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ i2400m_fw_cache(i2400m);
+ break;
+ case PM_POST_RESTORE:
+ /* Restore from hibernation failed. We need to clean
+ * up in exactly the same way, so fall through. */
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ i2400m_fw_uncache(i2400m);
+ break;
+
+ case PM_RESTORE_PREPARE:
+ default:
+ break;
+ }
+ d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event);
+ return NOTIFY_DONE;
+}
+
+
+/*
+ * pre-reset is called before a device is going on reset
+ *
+ * This has to be followed by a call to i2400m_post_reset(), otherwise
+ * bad things might happen.
+ */
+int i2400m_pre_reset(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ d_printf(1, dev, "pre-reset shut down\n");
+
+ mutex_lock(&i2400m->init_mutex);
+ if (i2400m->updown) {
+ netif_tx_disable(i2400m->wimax_dev.net_dev);
+ __i2400m_dev_stop(i2400m);
+ /* down't set updown to zero -- this way
+ * post_reset can restore properly */
+ }
+ mutex_unlock(&i2400m->init_mutex);
+ if (i2400m->bus_release)
+ i2400m->bus_release(i2400m);
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2400m_pre_reset);
+
+
+/*
+ * Restore device state after a reset
+ *
+ * Do the work needed after a device reset to bring it up to the same
+ * state as it was before the reset.
+ *
+ * NOTE: this requires i2400m->init_mutex taken
+ */
+int i2400m_post_reset(struct i2400m *i2400m)
+{
+ int result = 0;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ d_printf(1, dev, "post-reset start\n");
+ if (i2400m->bus_setup) {
+ result = i2400m->bus_setup(i2400m);
+ if (result < 0) {
+ dev_err(dev, "bus-specific setup failed: %d\n",
+ result);
+ goto error_bus_setup;
+ }
+ }
+ mutex_lock(&i2400m->init_mutex);
+ if (i2400m->updown) {
+ result = __i2400m_dev_start(
+ i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0)
+ goto error_dev_start;
+ }
+ mutex_unlock(&i2400m->init_mutex);
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+error_dev_start:
+ if (i2400m->bus_release)
+ i2400m->bus_release(i2400m);
+ /* even if the device was up, it could not be recovered, so we
+ * mark it as down. */
+ i2400m->updown = 0;
+ wmb(); /* see i2400m->updown's documentation */
+ mutex_unlock(&i2400m->init_mutex);
+error_bus_setup:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_post_reset);
+
+
+/*
+ * The device has rebooted; fix up the device and the driver
+ *
+ * Tear down the driver communication with the device, reload the
+ * firmware and reinitialize the communication with the device.
+ *
+ * If someone calls a reset when the device's firmware is down, in
+ * theory we won't see it because we are not listening. However, just
+ * in case, leave the code to handle it.
+ *
+ * If there is a reset context, use it; this means someone is waiting
+ * for us to tell him when the reset operation is complete and the
+ * device is ready to rock again.
+ *
+ * NOTE: if we are in the process of bringing up or down the
+ * communication with the device [running i2400m_dev_start() or
+ * _stop()], don't do anything, let it fail and handle it.
+ *
+ * This function is ran always in a thread context
+ *
+ * This function gets passed, as payload to i2400m_work() a 'const
+ * char *' ptr with a "reason" why the reset happened (for messages).
+ */
+static
+void __i2400m_dev_reset_handle(struct work_struct *ws)
+{
+ struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
+ const char *reason = i2400m->reset_reason;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
+ int result;
+
+ d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
+
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
+
+ result = 0;
+ if (mutex_trylock(&i2400m->init_mutex) == 0) {
+ /* We are still in i2400m_dev_start() [let it fail] or
+ * i2400m_dev_stop() [we are shutting down anyway, so
+ * ignore it] or we are resetting somewhere else. */
+ dev_err(dev, "device rebooted somewhere else?\n");
+ i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
+ complete(&i2400m->msg_completion);
+ goto out;
+ }
+
+ dev_err(dev, "%s: reinitializing driver\n", reason);
+ rmb();
+ if (i2400m->updown) {
+ __i2400m_dev_stop(i2400m);
+ i2400m->updown = 0;
+ wmb(); /* see i2400m->updown's documentation */
+ }
+
+ if (i2400m->alive) {
+ result = __i2400m_dev_start(i2400m,
+ I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "%s: cannot start the device: %d\n",
+ reason, result);
+ result = -EUCLEAN;
+ if (atomic_read(&i2400m->bus_reset_retries)
+ >= I2400M_BUS_RESET_RETRIES) {
+ result = -ENODEV;
+ dev_err(dev, "tried too many times to "
+ "reset the device, giving up\n");
+ }
+ }
+ }
+
+ if (i2400m->reset_ctx) {
+ ctx->result = result;
+ complete(&ctx->completion);
+ }
+ mutex_unlock(&i2400m->init_mutex);
+ if (result == -EUCLEAN) {
+ /*
+ * We come here because the reset during operational mode
+ * wasn't successfully done and need to proceed to a bus
+ * reset. For the dev_reset_handle() to be able to handle
+ * the reset event later properly, we restore boot_mode back
+ * to the state before previous reset. ie: just like we are
+ * issuing the bus reset for the first time
+ */
+ i2400m->boot_mode = 0;
+ wmb();
+
+ atomic_inc(&i2400m->bus_reset_retries);
+ /* ops, need to clean up [w/ init_mutex not held] */
+ result = i2400m_reset(i2400m, I2400M_RT_BUS);
+ if (result >= 0)
+ result = -ENODEV;
+ } else {
+ rmb();
+ if (i2400m->alive) {
+ /* great, we expect the device state up and
+ * dev_start() actually brings the device state up */
+ i2400m->updown = 1;
+ wmb();
+ atomic_set(&i2400m->bus_reset_retries, 0);
+ }
+ }
+out:
+ d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
+ ws, i2400m, reason);
+}
+
+
+/**
+ * i2400m_dev_reset_handle - Handle a device's reset in a thread context
+ *
+ * Schedule a device reset handling out on a thread context, so it
+ * is safe to call from atomic context. We can't use the i2400m's
+ * queue as we are going to destroy it and reinitialize it as part of
+ * the driver bringup/bringup process.
+ *
+ * See __i2400m_dev_reset_handle() for details; that takes care of
+ * reinitializing the driver to handle the reset, calling into the
+ * bus-specific functions ops as needed.
+ */
+int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
+{
+ i2400m->reset_reason = reason;
+ return schedule_work(&i2400m->reset_ws);
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
+
+
+ /*
+ * The actual work of error recovery.
+ *
+ * The current implementation of error recovery is to trigger a bus reset.
+ */
+static
+void __i2400m_error_recovery(struct work_struct *ws)
+{
+ struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
+
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+}
+
+/*
+ * Schedule a work struct for error recovery.
+ *
+ * The intention of error recovery is to bring back the device to some
+ * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
+ * the device. The TX failure could mean a device bus stuck, so the current
+ * error recovery implementation is to trigger a bus reset to the device
+ * and hopefully it can bring back the device.
+ *
+ * The actual work of error recovery has to be in a thread context because
+ * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
+ * destroyed by the error recovery mechanism (currently a bus reset).
+ *
+ * Also, there may be already a queue of TX works that all hit
+ * the -ETIMEOUT error condition because the device is stuck already.
+ * Since bus reset is used as the error recovery mechanism and we don't
+ * want consecutive bus resets simply because the multiple TX works
+ * in the queue all hit the same device erratum, the flag "error_recovery"
+ * is introduced for preventing unwanted consecutive bus resets.
+ *
+ * Error recovery shall only be invoked again if previous one was completed.
+ * The flag error_recovery is set when error recovery mechanism is scheduled,
+ * and is checked when we need to schedule another error recovery. If it is
+ * in place already, then we shouldn't schedule another one.
+ */
+void i2400m_error_recovery(struct i2400m *i2400m)
+{
+ if (atomic_add_return(1, &i2400m->error_recovery) == 1)
+ schedule_work(&i2400m->recovery_ws);
+ else
+ atomic_dec(&i2400m->error_recovery);
+}
+EXPORT_SYMBOL_GPL(i2400m_error_recovery);
+
+/*
+ * Alloc the command and ack buffers for boot mode
+ *
+ * Get the buffers needed to deal with boot mode messages.
+ */
+static
+int i2400m_bm_buf_alloc(struct i2400m *i2400m)
+{
+ i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
+ if (i2400m->bm_cmd_buf == NULL)
+ goto error_bm_cmd_kzalloc;
+ i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
+ if (i2400m->bm_ack_buf == NULL)
+ goto error_bm_ack_buf_kzalloc;
+ return 0;
+
+error_bm_ack_buf_kzalloc:
+ kfree(i2400m->bm_cmd_buf);
+error_bm_cmd_kzalloc:
+ return -ENOMEM;
+}
+
+
+/*
+ * Free boot mode command and ack buffers.
+ */
+static
+void i2400m_bm_buf_free(struct i2400m *i2400m)
+{
+ kfree(i2400m->bm_ack_buf);
+ kfree(i2400m->bm_cmd_buf);
+}
+
+
+/**
+ * i2400m_init - Initialize a 'struct i2400m' from all zeroes
+ *
+ * This is a bus-generic API call.
+ */
+void i2400m_init(struct i2400m *i2400m)
+{
+ wimax_dev_init(&i2400m->wimax_dev);
+
+ i2400m->boot_mode = 1;
+ i2400m->rx_reorder = 1;
+ init_waitqueue_head(&i2400m->state_wq);
+
+ spin_lock_init(&i2400m->tx_lock);
+ i2400m->tx_pl_min = UINT_MAX;
+ i2400m->tx_size_min = UINT_MAX;
+
+ spin_lock_init(&i2400m->rx_lock);
+ i2400m->rx_pl_min = UINT_MAX;
+ i2400m->rx_size_min = UINT_MAX;
+ INIT_LIST_HEAD(&i2400m->rx_reports);
+ INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work);
+
+ mutex_init(&i2400m->msg_mutex);
+ init_completion(&i2400m->msg_completion);
+
+ mutex_init(&i2400m->init_mutex);
+ /* wake_tx_ws is initialized in i2400m_tx_setup() */
+
+ INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
+ INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
+
+ atomic_set(&i2400m->bus_reset_retries, 0);
+
+ i2400m->alive = 0;
+
+ /* initialize error_recovery to 1 for denoting we
+ * are not yet ready to take any error recovery */
+ atomic_set(&i2400m->error_recovery, 1);
+}
+EXPORT_SYMBOL_GPL(i2400m_init);
+
+
+int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+ /*
+ * Make sure we stop TXs and down the carrier before
+ * resetting; this is needed to avoid things like
+ * i2400m_wake_tx() scheduling stuff in parallel.
+ */
+ if (net_dev->reg_state == NETREG_REGISTERED) {
+ netif_tx_disable(net_dev);
+ netif_carrier_off(net_dev);
+ }
+ return i2400m->bus_reset(i2400m, rt);
+}
+EXPORT_SYMBOL_GPL(i2400m_reset);
+
+
+/**
+ * i2400m_setup - bus-generic setup function for the i2400m device
+ *
+ * @i2400m: device descriptor (bus-specific parts have been initialized)
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * Sets up basic device comunication infrastructure, boots the ROM to
+ * read the MAC address, registers with the WiMAX and network stacks
+ * and then brings up the device.
+ */
+int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+
+ snprintf(wimax_dev->name, sizeof(wimax_dev->name),
+ "i2400m-%s:%s", dev->bus->name, dev_name(dev));
+
+ result = i2400m_bm_buf_alloc(i2400m);
+ if (result < 0) {
+ dev_err(dev, "cannot allocate bootmode scratch buffers\n");
+ goto error_bm_buf_alloc;
+ }
+
+ if (i2400m->bus_setup) {
+ result = i2400m->bus_setup(i2400m);
+ if (result < 0) {
+ dev_err(dev, "bus-specific setup failed: %d\n",
+ result);
+ goto error_bus_setup;
+ }
+ }
+
+ result = i2400m_bootrom_init(i2400m, bm_flags);
+ if (result < 0) {
+ dev_err(dev, "read mac addr: bootrom init "
+ "failed: %d\n", result);
+ goto error_bootrom_init;
+ }
+ result = i2400m_read_mac_addr(i2400m);
+ if (result < 0)
+ goto error_read_mac_addr;
+ eth_random_addr(i2400m->src_mac_addr);
+
+ i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
+ register_pm_notifier(&i2400m->pm_notifier);
+
+ result = register_netdev(net_dev); /* Okey dokey, bring it up */
+ if (result < 0) {
+ dev_err(dev, "cannot register i2400m network device: %d\n",
+ result);
+ goto error_register_netdev;
+ }
+ netif_carrier_off(net_dev);
+
+ i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
+ i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
+ i2400m->wimax_dev.op_reset = i2400m_op_reset;
+
+ result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
+ if (result < 0)
+ goto error_wimax_dev_add;
+
+ /* Now setup all that requires a registered net and wimax device. */
+ result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group);
+ if (result < 0) {
+ dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result);
+ goto error_sysfs_setup;
+ }
+
+ i2400m_debugfs_add(i2400m);
+
+ result = i2400m_dev_start(i2400m, bm_flags);
+ if (result < 0)
+ goto error_dev_start;
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+error_dev_start:
+ i2400m_debugfs_rm(i2400m);
+ sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
+ &i2400m_dev_attr_group);
+error_sysfs_setup:
+ wimax_dev_rm(&i2400m->wimax_dev);
+error_wimax_dev_add:
+ unregister_netdev(net_dev);
+error_register_netdev:
+ unregister_pm_notifier(&i2400m->pm_notifier);
+error_read_mac_addr:
+error_bootrom_init:
+ if (i2400m->bus_release)
+ i2400m->bus_release(i2400m);
+error_bus_setup:
+ i2400m_bm_buf_free(i2400m);
+error_bm_buf_alloc:
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_setup);
+
+
+/**
+ * i2400m_release - release the bus-generic driver resources
+ *
+ * Sends a disconnect message and undoes any setup done by i2400m_setup()
+ */
+void i2400m_release(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ netif_stop_queue(i2400m->wimax_dev.net_dev);
+
+ i2400m_dev_stop(i2400m);
+
+ cancel_work_sync(&i2400m->reset_ws);
+ cancel_work_sync(&i2400m->recovery_ws);
+
+ i2400m_debugfs_rm(i2400m);
+ sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
+ &i2400m_dev_attr_group);
+ wimax_dev_rm(&i2400m->wimax_dev);
+ unregister_netdev(i2400m->wimax_dev.net_dev);
+ unregister_pm_notifier(&i2400m->pm_notifier);
+ if (i2400m->bus_release)
+ i2400m->bus_release(i2400m);
+ i2400m_bm_buf_free(i2400m);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_release);
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(control),
+ D_SUBMODULE_DEFINE(driver),
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(fw),
+ D_SUBMODULE_DEFINE(netdev),
+ D_SUBMODULE_DEFINE(rfkill),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(sysfs),
+ D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+static
+int __init i2400m_driver_init(void)
+{
+ d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params,
+ "i2400m.debug");
+ return i2400m_barker_db_init(i2400m_barkers_params);
+}
+module_init(i2400m_driver_init);
+
+static
+void __exit i2400m_driver_exit(void)
+{
+ i2400m_barker_db_exit();
+}
+module_exit(i2400m_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Intel 2400M WiMAX networking bus-generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wimax/i2400m/fw.c b/drivers/staging/wimax/i2400m/fw.c
new file mode 100644
index 000000000000..6c9a41bff2e0
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/fw.c
@@ -0,0 +1,1653 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * THE PROCEDURE
+ *
+ * The 2400m and derived devices work in two modes: boot-mode or
+ * normal mode. In boot mode we can execute only a handful of commands
+ * targeted at uploading the firmware and launching it.
+ *
+ * The 2400m enters boot mode when it is first connected to the
+ * system, when it crashes and when you ask it to reboot. There are
+ * two submodes of the boot mode: signed and non-signed. Signed takes
+ * firmwares signed with a certain private key, non-signed takes any
+ * firmware. Normal hardware takes only signed firmware.
+ *
+ * On boot mode, in USB, we write to the device using the bulk out
+ * endpoint and read from it in the notification endpoint.
+ *
+ * Upon entrance to boot mode, the device sends (preceded with a few
+ * zero length packets (ZLPs) on the notification endpoint in USB) a
+ * reboot barker (4 le32 words with the same value). We ack it by
+ * sending the same barker to the device. The device acks with a
+ * reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and
+ * then is fully booted. At this point we can upload the firmware.
+ *
+ * Note that different iterations of the device and EEPROM
+ * configurations will send different [re]boot barkers; these are
+ * collected in i2400m_barker_db along with the firmware
+ * characteristics they require.
+ *
+ * This process is accomplished by the i2400m_bootrom_init()
+ * function. All the device interaction happens through the
+ * i2400m_bm_cmd() [boot mode command]. Special return values will
+ * indicate if the device did reset during the process.
+ *
+ * After this, we read the MAC address and then (if needed)
+ * reinitialize the device. We need to read it ahead of time because
+ * in the future, we might not upload the firmware until userspace
+ * 'ifconfig up's the device.
+ *
+ * We can then upload the firmware file. The file is composed of a BCF
+ * header (basic data, keys and signatures) and a list of write
+ * commands and payloads. Optionally more BCF headers might follow the
+ * main payload. We first upload the header [i2400m_dnload_init()] and
+ * then pass the commands and payloads verbatim to the i2400m_bm_cmd()
+ * function [i2400m_dnload_bcf()]. Then we tell the device to jump to
+ * the new firmware [i2400m_dnload_finalize()].
+ *
+ * Once firmware is uploaded, we are good to go :)
+ *
+ * When we don't know in which mode we are, we first try by sending a
+ * warm reset request that will take us to boot-mode. If we time out
+ * waiting for a reboot barker, that means maybe we are already in
+ * boot mode, so we send a reboot barker.
+ *
+ * COMMAND EXECUTION
+ *
+ * This code (and process) is single threaded; for executing commands,
+ * we post a URB to the notification endpoint, post the command, wait
+ * for data on the notification buffer. We don't need to worry about
+ * others as we know we are the only ones in there.
+ *
+ * BACKEND IMPLEMENTATION
+ *
+ * This code is bus-generic; the bus-specific driver provides back end
+ * implementations to send a boot mode command to the device and to
+ * read an acknolwedgement from it (or an asynchronous notification)
+ * from it.
+ *
+ * FIRMWARE LOADING
+ *
+ * Note that in some cases, we can't just load a firmware file (for
+ * example, when resuming). For that, we might cache the firmware
+ * file. Thus, when doing the bootstrap, if there is a cache firmware
+ * file, it is used; if not, loading from disk is attempted.
+ *
+ * ROADMAP
+ *
+ * i2400m_barker_db_init Called by i2400m_driver_init()
+ * i2400m_barker_db_add
+ *
+ * i2400m_barker_db_exit Called by i2400m_driver_exit()
+ *
+ * i2400m_dev_bootstrap Called by __i2400m_dev_start()
+ * request_firmware
+ * i2400m_fw_bootstrap
+ * i2400m_fw_check
+ * i2400m_fw_hdr_check
+ * i2400m_fw_dnload
+ * release_firmware
+ *
+ * i2400m_fw_dnload
+ * i2400m_bootrom_init
+ * i2400m_bm_cmd
+ * i2400m_reset
+ * i2400m_dnload_init
+ * i2400m_dnload_init_signed
+ * i2400m_dnload_init_nonsigned
+ * i2400m_download_chunk
+ * i2400m_bm_cmd
+ * i2400m_dnload_bcf
+ * i2400m_bm_cmd
+ * i2400m_dnload_finalize
+ * i2400m_bm_cmd
+ *
+ * i2400m_bm_cmd
+ * i2400m->bus_bm_cmd_send()
+ * i2400m->bus_bm_wait_for_ack
+ * __i2400m_bm_ack_verify
+ * i2400m_is_boot_barker
+ *
+ * i2400m_bm_cmd_prepare Used by bus-drivers to prep
+ * commands before sending
+ *
+ * i2400m_pm_notifier Called on Power Management events
+ * i2400m_fw_cache
+ * i2400m_fw_uncache
+ */
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/export.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE fw
+#include "debug-levels.h"
+
+
+static const __le32 i2400m_ACK_BARKER[4] = {
+ cpu_to_le32(I2400M_ACK_BARKER),
+ cpu_to_le32(I2400M_ACK_BARKER),
+ cpu_to_le32(I2400M_ACK_BARKER),
+ cpu_to_le32(I2400M_ACK_BARKER)
+};
+
+
+/**
+ * Prepare a boot-mode command for delivery
+ *
+ * @cmd: pointer to bootrom header to prepare
+ *
+ * Computes checksum if so needed. After calling this function, DO NOT
+ * modify the command or header as the checksum won't work anymore.
+ *
+ * We do it from here because some times we cannot do it in the
+ * original context the command was sent (it is a const), so when we
+ * copy it to our staging buffer, we add the checksum there.
+ */
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd)
+{
+ if (i2400m_brh_get_use_checksum(cmd)) {
+ int i;
+ u32 checksum = 0;
+ const u32 *checksum_ptr = (void *) cmd->payload;
+ for (i = 0; i < cmd->data_size / 4; i++)
+ checksum += cpu_to_le32(*checksum_ptr++);
+ checksum += cmd->command + cmd->target_addr + cmd->data_size;
+ cmd->block_checksum = cpu_to_le32(checksum);
+ }
+}
+EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
+
+
+/*
+ * Database of known barkers.
+ *
+ * A barker is what the device sends indicating he is ready to be
+ * bootloaded. Different versions of the device will send different
+ * barkers. Depending on the barker, it might mean the device wants
+ * some kind of firmware or the other.
+ */
+static struct i2400m_barker_db {
+ __le32 data[4];
+} *i2400m_barker_db;
+static size_t i2400m_barker_db_used, i2400m_barker_db_size;
+
+
+static
+int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size,
+ gfp_t gfp_flags)
+{
+ size_t old_count = *_count,
+ new_count = old_count ? 2 * old_count : 2,
+ old_size = el_size * old_count,
+ new_size = el_size * new_count;
+ void *nptr = krealloc(*ptr, new_size, gfp_flags);
+ if (nptr) {
+ /* zero the other half or the whole thing if old_count
+ * was zero */
+ if (old_size == 0)
+ memset(nptr, 0, new_size);
+ else
+ memset(nptr + old_size, 0, old_size);
+ *_count = new_count;
+ *ptr = nptr;
+ return 0;
+ } else
+ return -ENOMEM;
+}
+
+
+/*
+ * Add a barker to the database
+ *
+ * This cannot used outside of this module and only at at module_init
+ * time. This is to avoid the need to do locking.
+ */
+static
+int i2400m_barker_db_add(u32 barker_id)
+{
+ int result;
+
+ struct i2400m_barker_db *barker;
+ if (i2400m_barker_db_used >= i2400m_barker_db_size) {
+ result = i2400m_zrealloc_2x(
+ (void **) &i2400m_barker_db, &i2400m_barker_db_size,
+ sizeof(i2400m_barker_db[0]), GFP_KERNEL);
+ if (result < 0)
+ return result;
+ }
+ barker = i2400m_barker_db + i2400m_barker_db_used++;
+ barker->data[0] = le32_to_cpu(barker_id);
+ barker->data[1] = le32_to_cpu(barker_id);
+ barker->data[2] = le32_to_cpu(barker_id);
+ barker->data[3] = le32_to_cpu(barker_id);
+ return 0;
+}
+
+
+void i2400m_barker_db_exit(void)
+{
+ kfree(i2400m_barker_db);
+ i2400m_barker_db = NULL;
+ i2400m_barker_db_size = 0;
+ i2400m_barker_db_used = 0;
+}
+
+
+/*
+ * Helper function to add all the known stable barkers to the barker
+ * database.
+ */
+static
+int i2400m_barker_db_known_barkers(void)
+{
+ int result;
+
+ result = i2400m_barker_db_add(I2400M_NBOOT_BARKER);
+ if (result < 0)
+ goto error_add;
+ result = i2400m_barker_db_add(I2400M_SBOOT_BARKER);
+ if (result < 0)
+ goto error_add;
+ result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050);
+ if (result < 0)
+ goto error_add;
+error_add:
+ return result;
+}
+
+
+/*
+ * Initialize the barker database
+ *
+ * This can only be used from the module_init function for this
+ * module; this is to avoid the need to do locking.
+ *
+ * @options: command line argument with extra barkers to
+ * recognize. This is a comma-separated list of 32-bit hex
+ * numbers. They are appended to the existing list. Setting 0
+ * cleans the existing list and starts a new one.
+ */
+int i2400m_barker_db_init(const char *_options)
+{
+ int result;
+ char *options = NULL, *options_orig, *token;
+
+ i2400m_barker_db = NULL;
+ i2400m_barker_db_size = 0;
+ i2400m_barker_db_used = 0;
+
+ result = i2400m_barker_db_known_barkers();
+ if (result < 0)
+ goto error_add;
+ /* parse command line options from i2400m.barkers */
+ if (_options != NULL) {
+ unsigned barker;
+
+ options_orig = kstrdup(_options, GFP_KERNEL);
+ if (options_orig == NULL) {
+ result = -ENOMEM;
+ goto error_parse;
+ }
+ options = options_orig;
+
+ while ((token = strsep(&options, ",")) != NULL) {
+ if (*token == '\0') /* eat joint commas */
+ continue;
+ if (sscanf(token, "%x", &barker) != 1
+ || barker > 0xffffffff) {
+ printk(KERN_ERR "%s: can't recognize "
+ "i2400m.barkers value '%s' as "
+ "a 32-bit number\n",
+ __func__, token);
+ result = -EINVAL;
+ goto error_parse;
+ }
+ if (barker == 0) {
+ /* clean list and start new */
+ i2400m_barker_db_exit();
+ continue;
+ }
+ result = i2400m_barker_db_add(barker);
+ if (result < 0)
+ goto error_parse_add;
+ }
+ kfree(options_orig);
+ }
+ return 0;
+
+error_parse_add:
+error_parse:
+ kfree(options_orig);
+error_add:
+ kfree(i2400m_barker_db);
+ return result;
+}
+
+
+/*
+ * Recognize a boot barker
+ *
+ * @buf: buffer where the boot barker.
+ * @buf_size: size of the buffer (has to be 16 bytes). It is passed
+ * here so the function can check it for the caller.
+ *
+ * Note that as a side effect, upon identifying the obtained boot
+ * barker, this function will set i2400m->barker to point to the right
+ * barker database entry. Subsequent calls to the function will result
+ * in verifying that the same type of boot barker is returned when the
+ * device [re]boots (as long as the same device instance is used).
+ *
+ * Return: 0 if @buf matches a known boot barker. -ENOENT if the
+ * buffer in @buf doesn't match any boot barker in the database or
+ * -EILSEQ if the buffer doesn't have the right size.
+ */
+int i2400m_is_boot_barker(struct i2400m *i2400m,
+ const void *buf, size_t buf_size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_barker_db *barker;
+ int i;
+
+ result = -ENOENT;
+ if (buf_size != sizeof(i2400m_barker_db[i].data))
+ return result;
+
+ /* Short circuit if we have already discovered the barker
+ * associated with the device. */
+ if (i2400m->barker &&
+ !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data)))
+ return 0;
+
+ for (i = 0; i < i2400m_barker_db_used; i++) {
+ barker = &i2400m_barker_db[i];
+ BUILD_BUG_ON(sizeof(barker->data) != 16);
+ if (memcmp(buf, barker->data, sizeof(barker->data)))
+ continue;
+
+ if (i2400m->barker == NULL) {
+ i2400m->barker = barker;
+ d_printf(1, dev, "boot barker set to #%u/%08x\n",
+ i, le32_to_cpu(barker->data[0]));
+ if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER))
+ i2400m->sboot = 0;
+ else
+ i2400m->sboot = 1;
+ } else if (i2400m->barker != barker) {
+ dev_err(dev, "HW inconsistency: device "
+ "reports a different boot barker "
+ "than set (from %08x to %08x)\n",
+ le32_to_cpu(i2400m->barker->data[0]),
+ le32_to_cpu(barker->data[0]));
+ result = -EIO;
+ } else
+ d_printf(2, dev, "boot barker confirmed #%u/%08x\n",
+ i, le32_to_cpu(barker->data[0]));
+ result = 0;
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_is_boot_barker);
+
+
+/*
+ * Verify the ack data received
+ *
+ * Given a reply to a boot mode command, chew it and verify everything
+ * is ok.
+ *
+ * @opcode: opcode which generated this ack. For error messages.
+ * @ack: pointer to ack data we received
+ * @ack_size: size of that data buffer
+ * @flags: I2400M_BM_CMD_* flags we called the command with.
+ *
+ * Way too long function -- maybe it should be further split
+ */
+static
+ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
+ struct i2400m_bootrom_header *ack,
+ size_t ack_size, int flags)
+{
+ ssize_t result = -ENOMEM;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(8, dev, "(i2400m %p opcode %d ack %p size %zu)\n",
+ i2400m, opcode, ack, ack_size);
+ if (ack_size < sizeof(*ack)) {
+ result = -EIO;
+ dev_err(dev, "boot-mode cmd %d: HW BUG? notification didn't "
+ "return enough data (%zu bytes vs %zu expected)\n",
+ opcode, ack_size, sizeof(*ack));
+ goto error_ack_short;
+ }
+ result = i2400m_is_boot_barker(i2400m, ack, ack_size);
+ if (result >= 0) {
+ result = -ERESTARTSYS;
+ d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode);
+ goto error_reboot;
+ }
+ if (ack_size == sizeof(i2400m_ACK_BARKER)
+ && memcmp(ack, i2400m_ACK_BARKER, sizeof(*ack)) == 0) {
+ result = -EISCONN;
+ d_printf(3, dev, "boot-mode cmd %d: HW reboot ack barker\n",
+ opcode);
+ goto error_reboot_ack;
+ }
+ result = 0;
+ if (flags & I2400M_BM_CMD_RAW)
+ goto out_raw;
+ ack->data_size = le32_to_cpu(ack->data_size);
+ ack->target_addr = le32_to_cpu(ack->target_addr);
+ ack->block_checksum = le32_to_cpu(ack->block_checksum);
+ d_printf(5, dev, "boot-mode cmd %d: notification for opcode %u "
+ "response %u csum %u rr %u da %u\n",
+ opcode, i2400m_brh_get_opcode(ack),
+ i2400m_brh_get_response(ack),
+ i2400m_brh_get_use_checksum(ack),
+ i2400m_brh_get_response_required(ack),
+ i2400m_brh_get_direct_access(ack));
+ result = -EIO;
+ if (i2400m_brh_get_signature(ack) != 0xcbbc) {
+ dev_err(dev, "boot-mode cmd %d: HW BUG? wrong signature "
+ "0x%04x\n", opcode, i2400m_brh_get_signature(ack));
+ goto error_ack_signature;
+ }
+ if (opcode != -1 && opcode != i2400m_brh_get_opcode(ack)) {
+ dev_err(dev, "boot-mode cmd %d: HW BUG? "
+ "received response for opcode %u, expected %u\n",
+ opcode, i2400m_brh_get_opcode(ack), opcode);
+ goto error_ack_opcode;
+ }
+ if (i2400m_brh_get_response(ack) != 0) { /* failed? */
+ dev_err(dev, "boot-mode cmd %d: error; hw response %u\n",
+ opcode, i2400m_brh_get_response(ack));
+ goto error_ack_failed;
+ }
+ if (ack_size < ack->data_size + sizeof(*ack)) {
+ dev_err(dev, "boot-mode cmd %d: SW BUG "
+ "driver provided only %zu bytes for %zu bytes "
+ "of data\n", opcode, ack_size,
+ (size_t) le32_to_cpu(ack->data_size) + sizeof(*ack));
+ goto error_ack_short_buffer;
+ }
+ result = ack_size;
+ /* Don't you love this stack of empty targets? Well, I don't
+ * either, but it helps track exactly who comes in here and
+ * why :) */
+error_ack_short_buffer:
+error_ack_failed:
+error_ack_opcode:
+error_ack_signature:
+out_raw:
+error_reboot_ack:
+error_reboot:
+error_ack_short:
+ d_fnend(8, dev, "(i2400m %p opcode %d ack %p size %zu) = %d\n",
+ i2400m, opcode, ack, ack_size, (int) result);
+ return result;
+}
+
+
+/**
+ * i2400m_bm_cmd - Execute a boot mode command
+ *
+ * @cmd: buffer containing the command data (pointing at the header).
+ * This data can be ANYWHERE (for USB, we will copy it to an
+ * specific buffer). Make sure everything is in proper little
+ * endian.
+ *
+ * A raw buffer can be also sent, just cast it and set flags to
+ * I2400M_BM_CMD_RAW.
+ *
+ * This function will generate a checksum for you if the
+ * checksum bit in the command is set (unless I2400M_BM_CMD_RAW
+ * is set).
+ *
+ * You can use the i2400m->bm_cmd_buf to stage your commands and
+ * send them.
+ *
+ * If NULL, no command is sent (we just wait for an ack).
+ *
+ * @cmd_size: size of the command. Will be auto padded to the
+ * bus-specific drivers padding requirements.
+ *
+ * @ack: buffer where to place the acknowledgement. If it is a regular
+ * command response, all fields will be returned with the right,
+ * native endianess.
+ *
+ * You *cannot* use i2400m->bm_ack_buf for this buffer.
+ *
+ * @ack_size: size of @ack, 16 aligned; you need to provide at least
+ * sizeof(*ack) bytes and then enough to contain the return data
+ * from the command
+ *
+ * @flags: see I2400M_BM_CMD_* above.
+ *
+ * @returns: bytes received by the notification; if < 0, an errno code
+ * denoting an error or:
+ *
+ * -ERESTARTSYS The device has rebooted
+ *
+ * Executes a boot-mode command and waits for a response, doing basic
+ * validation on it; if a zero length response is received, it retries
+ * waiting for a response until a non-zero one is received (timing out
+ * after %I2400M_BOOT_RETRIES retries).
+ */
+static
+ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
+ const struct i2400m_bootrom_header *cmd, size_t cmd_size,
+ struct i2400m_bootrom_header *ack, size_t ack_size,
+ int flags)
+{
+ ssize_t result = -ENOMEM, rx_bytes;
+ struct device *dev = i2400m_dev(i2400m);
+ int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd);
+
+ d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n",
+ i2400m, cmd, cmd_size, ack, ack_size);
+ BUG_ON(ack_size < sizeof(*ack));
+ BUG_ON(i2400m->boot_mode == 0);
+
+ if (cmd != NULL) { /* send the command */
+ result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
+ if (result < 0)
+ goto error_cmd_send;
+ if ((flags & I2400M_BM_CMD_RAW) == 0)
+ d_printf(5, dev,
+ "boot-mode cmd %d csum %u rr %u da %u: "
+ "addr 0x%04x size %u block csum 0x%04x\n",
+ opcode, i2400m_brh_get_use_checksum(cmd),
+ i2400m_brh_get_response_required(cmd),
+ i2400m_brh_get_direct_access(cmd),
+ cmd->target_addr, cmd->data_size,
+ cmd->block_checksum);
+ }
+ result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size);
+ if (result < 0) {
+ dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n",
+ opcode, (int) result); /* bah, %zd doesn't work */
+ goto error_wait_for_ack;
+ }
+ rx_bytes = result;
+ /* verify the ack and read more if necessary [result is the
+ * final amount of bytes we get in the ack] */
+ result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags);
+ if (result < 0)
+ goto error_bad_ack;
+ /* Don't you love this stack of empty targets? Well, I don't
+ * either, but it helps track exactly who comes in here and
+ * why :) */
+ result = rx_bytes;
+error_bad_ack:
+error_wait_for_ack:
+error_cmd_send:
+ d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n",
+ i2400m, cmd, cmd_size, ack, ack_size, (int) result);
+ return result;
+}
+
+
+/**
+ * i2400m_download_chunk - write a single chunk of data to the device's memory
+ *
+ * @i2400m: device descriptor
+ * @buf: the buffer to write
+ * @buf_len: length of the buffer to write
+ * @addr: address in the device memory space
+ * @direct: bootrom write mode
+ * @do_csum: should a checksum validation be performed
+ */
+static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
+ size_t __chunk_len, unsigned long addr,
+ unsigned int direct, unsigned int do_csum)
+{
+ int ret;
+ size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_ALIGN);
+ struct device *dev = i2400m_dev(i2400m);
+ struct {
+ struct i2400m_bootrom_header cmd;
+ u8 cmd_payload[];
+ } __packed *buf;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+ "direct %u do_csum %u)\n", i2400m, chunk, __chunk_len,
+ addr, direct, do_csum);
+ buf = i2400m->bm_cmd_buf;
+ memcpy(buf->cmd_payload, chunk, __chunk_len);
+ memset(buf->cmd_payload + __chunk_len, 0xad, chunk_len - __chunk_len);
+
+ buf->cmd.command = i2400m_brh_command(I2400M_BRH_WRITE,
+ __chunk_len & 0x3 ? 0 : do_csum,
+ __chunk_len & 0xf ? 0 : direct);
+ buf->cmd.target_addr = cpu_to_le32(addr);
+ buf->cmd.data_size = cpu_to_le32(__chunk_len);
+ ret = i2400m_bm_cmd(i2400m, &buf->cmd, sizeof(buf->cmd) + chunk_len,
+ &ack, sizeof(ack), 0);
+ if (ret >= 0)
+ ret = 0;
+ d_fnend(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx "
+ "direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len,
+ addr, direct, do_csum, ret);
+ return ret;
+}
+
+
+/*
+ * Download a BCF file's sections to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to firmware data (first header followed by the
+ * payloads). Assumed verified and consistent.
+ * @bcf_len: length (in bytes) of the @bcf buffer.
+ *
+ * Returns: < 0 errno code on error or the offset to the jump instruction.
+ *
+ * Given a BCF file, downloads each section (a command and a payload)
+ * to the device's address space. Actually, it just executes each
+ * command i the BCF file.
+ *
+ * The section size has to be aligned to 4 bytes AND the padding has
+ * to be taken from the firmware file, as the signature takes it into
+ * account.
+ */
+static
+ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf, size_t bcf_len)
+{
+ ssize_t ret;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t offset, /* iterator offset */
+ data_size, /* Size of the data payload */
+ section_size, /* Size of the whole section (cmd + payload) */
+ section = 1;
+ const struct i2400m_bootrom_header *bh;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(3, dev, "(i2400m %p bcf %p bcf_len %zu)\n",
+ i2400m, bcf, bcf_len);
+ /* Iterate over the command blocks in the BCF file that start
+ * after the header */
+ offset = le32_to_cpu(bcf->header_len) * sizeof(u32);
+ while (1) { /* start sending the file */
+ bh = (void *) bcf + offset;
+ data_size = le32_to_cpu(bh->data_size);
+ section_size = ALIGN(sizeof(*bh) + data_size, 4);
+ d_printf(7, dev,
+ "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
+ section, offset, sizeof(*bh) + data_size,
+ le32_to_cpu(bh->target_addr));
+ /*
+ * We look for JUMP cmd from the bootmode header,
+ * either I2400M_BRH_SIGNED_JUMP for secure boot
+ * or I2400M_BRH_JUMP for unsecure boot, the last chunk
+ * should be the bootmode header with JUMP cmd.
+ */
+ if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP ||
+ i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) {
+ d_printf(5, dev, "jump found @%zu\n", offset);
+ break;
+ }
+ if (offset + section_size > bcf_len) {
+ dev_err(dev, "fw %s: bad section #%zu, "
+ "end (@%zu) beyond EOF (@%zu)\n",
+ i2400m->fw_name, section,
+ offset + section_size, bcf_len);
+ ret = -EINVAL;
+ goto error_section_beyond_eof;
+ }
+ __i2400m_msleep(20);
+ ret = i2400m_bm_cmd(i2400m, bh, section_size,
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ if (ret < 0) {
+ dev_err(dev, "fw %s: section #%zu (@%zu %zu B) "
+ "failed %d\n", i2400m->fw_name, section,
+ offset, sizeof(*bh) + data_size, (int) ret);
+ goto error_send;
+ }
+ offset += section_size;
+ section++;
+ }
+ ret = offset;
+error_section_beyond_eof:
+error_send:
+ d_fnend(3, dev, "(i2400m %p bcf %p bcf_len %zu) = %d\n",
+ i2400m, bcf, bcf_len, (int) ret);
+ return ret;
+}
+
+
+/*
+ * Indicate if the device emitted a reboot barker that indicates
+ * "signed boot"
+ */
+static
+unsigned i2400m_boot_is_signed(struct i2400m *i2400m)
+{
+ return likely(i2400m->sboot);
+}
+
+
+/*
+ * Do the final steps of uploading firmware
+ *
+ * @bcf_hdr: BCF header we are actually using
+ * @bcf: pointer to the firmware image (which matches the first header
+ * that is followed by the actual payloads).
+ * @offset: [byte] offset into @bcf for the command we need to send.
+ *
+ * Depending on the boot mode (signed vs non-signed), different
+ * actions need to be taken.
+ */
+static
+int i2400m_dnload_finalize(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr,
+ const struct i2400m_bcf_hdr *bcf, size_t offset)
+{
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_bootrom_header *cmd, ack;
+ struct {
+ struct i2400m_bootrom_header cmd;
+ u8 cmd_pl[0];
+ } __packed *cmd_buf;
+ size_t signature_block_offset, signature_block_size;
+
+ d_fnstart(3, dev, "offset %zu\n", offset);
+ cmd = (void *) bcf + offset;
+ if (i2400m_boot_is_signed(i2400m) == 0) {
+ struct i2400m_bootrom_header jump_ack;
+ d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
+ le32_to_cpu(cmd->target_addr));
+ cmd_buf = i2400m->bm_cmd_buf;
+ memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
+ cmd = &cmd_buf->cmd;
+ /* now cmd points to the actual bootrom_header in cmd_buf */
+ i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
+ cmd->data_size = 0;
+ ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &jump_ack, sizeof(jump_ack), 0);
+ } else {
+ d_printf(1, dev, "secure boot, jumping to 0x%08x\n",
+ le32_to_cpu(cmd->target_addr));
+ cmd_buf = i2400m->bm_cmd_buf;
+ memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
+ signature_block_offset =
+ sizeof(*bcf_hdr)
+ + le32_to_cpu(bcf_hdr->key_size) * sizeof(u32)
+ + le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32);
+ signature_block_size =
+ le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32);
+ memcpy(cmd_buf->cmd_pl,
+ (void *) bcf_hdr + signature_block_offset,
+ signature_block_size);
+ ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
+ sizeof(cmd_buf->cmd) + signature_block_size,
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ }
+ d_fnend(3, dev, "returning %d\n", ret);
+ return ret;
+}
+
+
+/**
+ * i2400m_bootrom_init - Reboots a powered device into boot mode
+ *
+ * @i2400m: device descriptor
+ * @flags:
+ * I2400M_BRI_SOFT: a reboot barker has been seen
+ * already, so don't wait for it.
+ *
+ * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
+ * for a reboot barker notification. This is a one shot; if
+ * the state machine needs to send a reboot command it will.
+ *
+ * Returns:
+ *
+ * < 0 errno code on error, 0 if ok.
+ *
+ * Description:
+ *
+ * Tries hard enough to put the device in boot-mode. There are two
+ * main phases to this:
+ *
+ * a. (1) send a reboot command and (2) get a reboot barker
+ *
+ * b. (1) echo/ack the reboot sending the reboot barker back and (2)
+ * getting an ack barker in return
+ *
+ * We want to skip (a) in some cases [soft]. The state machine is
+ * horrible, but it is basically: on each phase, send what has to be
+ * sent (if any), wait for the answer and act on the answer. We might
+ * have to backtrack and retry, so we keep a max tries counter for
+ * that.
+ *
+ * It sucks because we don't know ahead of time which is going to be
+ * the reboot barker (the device might send different ones depending
+ * on its EEPROM config) and once the device reboots and waits for the
+ * echo/ack reboot barker being sent back, it doesn't understand
+ * anything else. So we can be left at the point where we don't know
+ * what to send to it -- cold reset and bus reset seem to have little
+ * effect. So the function iterates (in this case) through all the
+ * known barkers and tries them all until an ACK is
+ * received. Otherwise, it gives up.
+ *
+ * If we get a timeout after sending a warm reset, we do it again.
+ */
+int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_bootrom_header *cmd;
+ struct i2400m_bootrom_header ack;
+ int count = i2400m->bus_bm_retries;
+ int ack_timeout_cnt = 1;
+ unsigned i;
+
+ BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data));
+ BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
+
+ d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
+ result = -ENOMEM;
+ cmd = i2400m->bm_cmd_buf;
+ if (flags & I2400M_BRI_SOFT)
+ goto do_reboot_ack;
+do_reboot:
+ ack_timeout_cnt = 1;
+ if (--count < 0)
+ goto error_timeout;
+ d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
+ count);
+ if ((flags & I2400M_BRI_NO_REBOOT) == 0)
+ i2400m_reset(i2400m, I2400M_RT_WARM);
+ result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
+ I2400M_BM_CMD_RAW);
+ flags &= ~I2400M_BRI_NO_REBOOT;
+ switch (result) {
+ case -ERESTARTSYS:
+ /*
+ * at this point, i2400m_bm_cmd(), through
+ * __i2400m_bm_ack_process(), has updated
+ * i2400m->barker and we are good to go.
+ */
+ d_printf(4, dev, "device reboot: got reboot barker\n");
+ break;
+ case -EISCONN: /* we don't know how it got here...but we follow it */
+ d_printf(4, dev, "device reboot: got ack barker - whatever\n");
+ goto do_reboot;
+ case -ETIMEDOUT:
+ /*
+ * Device has timed out, we might be in boot mode
+ * already and expecting an ack; if we don't know what
+ * the barker is, we just send them all. Cold reset
+ * and bus reset don't work. Beats me.
+ */
+ if (i2400m->barker != NULL) {
+ dev_err(dev, "device boot: reboot barker timed out, "
+ "trying (set) %08x echo/ack\n",
+ le32_to_cpu(i2400m->barker->data[0]));
+ goto do_reboot_ack;
+ }
+ for (i = 0; i < i2400m_barker_db_used; i++) {
+ struct i2400m_barker_db *barker = &i2400m_barker_db[i];
+ memcpy(cmd, barker->data, sizeof(barker->data));
+ result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &ack, sizeof(ack),
+ I2400M_BM_CMD_RAW);
+ if (result == -EISCONN) {
+ dev_warn(dev, "device boot: got ack barker "
+ "after sending echo/ack barker "
+ "#%d/%08x; rebooting j.i.c.\n",
+ i, le32_to_cpu(barker->data[0]));
+ flags &= ~I2400M_BRI_NO_REBOOT;
+ goto do_reboot;
+ }
+ }
+ dev_err(dev, "device boot: tried all the echo/acks, could "
+ "not get device to respond; giving up");
+ result = -ESHUTDOWN;
+ case -EPROTO:
+ case -ESHUTDOWN: /* dev is gone */
+ case -EINTR: /* user cancelled */
+ goto error_dev_gone;
+ default:
+ dev_err(dev, "device reboot: error %d while waiting "
+ "for reboot barker - rebooting\n", result);
+ d_dump(1, dev, &ack, result);
+ goto do_reboot;
+ }
+ /* At this point we ack back with 4 REBOOT barkers and expect
+ * 4 ACK barkers. This is ugly, as we send a raw command --
+ * hence the cast. _bm_cmd() will catch the reboot ack
+ * notification and report it as -EISCONN. */
+do_reboot_ack:
+ d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
+ memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data));
+ result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &ack, sizeof(ack), I2400M_BM_CMD_RAW);
+ switch (result) {
+ case -ERESTARTSYS:
+ d_printf(4, dev, "reboot ack: got reboot barker - retrying\n");
+ if (--count < 0)
+ goto error_timeout;
+ goto do_reboot_ack;
+ case -EISCONN:
+ d_printf(4, dev, "reboot ack: got ack barker - good\n");
+ break;
+ case -ETIMEDOUT: /* no response, maybe it is the other type? */
+ if (ack_timeout_cnt-- < 0) {
+ d_printf(4, dev, "reboot ack timedout: retrying\n");
+ goto do_reboot_ack;
+ } else {
+ dev_err(dev, "reboot ack timedout too long: "
+ "trying reboot\n");
+ goto do_reboot;
+ }
+ break;
+ case -EPROTO:
+ case -ESHUTDOWN: /* dev is gone */
+ goto error_dev_gone;
+ default:
+ dev_err(dev, "device reboot ack: error %d while waiting for "
+ "reboot ack barker - rebooting\n", result);
+ goto do_reboot;
+ }
+ d_printf(2, dev, "device reboot ack: got ack barker - boot done\n");
+ result = 0;
+exit_timeout:
+error_dev_gone:
+ d_fnend(4, dev, "(i2400m %p flags 0x%08x) = %d\n",
+ i2400m, flags, result);
+ return result;
+
+error_timeout:
+ dev_err(dev, "Timed out waiting for reboot ack\n");
+ result = -ETIMEDOUT;
+ goto exit_timeout;
+}
+
+
+/*
+ * Read the MAC addr
+ *
+ * The position this function reads is fixed in device memory and
+ * always available, even without firmware.
+ *
+ * Note we specify we want to read only six bytes, but provide space
+ * for 16, as we always get it rounded up.
+ */
+int i2400m_read_mac_addr(struct i2400m *i2400m)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct i2400m_bootrom_header *cmd;
+ struct {
+ struct i2400m_bootrom_header ack;
+ u8 ack_pl[16];
+ } __packed ack_buf;
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ cmd = i2400m->bm_cmd_buf;
+ cmd->command = i2400m_brh_command(I2400M_BRH_READ, 0, 1);
+ cmd->target_addr = cpu_to_le32(0x00203fe8);
+ cmd->data_size = cpu_to_le32(6);
+ result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
+ &ack_buf.ack, sizeof(ack_buf), 0);
+ if (result < 0) {
+ dev_err(dev, "BM: read mac addr failed: %d\n", result);
+ goto error_read_mac;
+ }
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
+ if (i2400m->bus_bm_mac_addr_impaired == 1) {
+ ack_buf.ack_pl[0] = 0x00;
+ ack_buf.ack_pl[1] = 0x16;
+ ack_buf.ack_pl[2] = 0xd3;
+ get_random_bytes(&ack_buf.ack_pl[3], 3);
+ dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
+ "mac addr is %pM\n", ack_buf.ack_pl);
+ result = 0;
+ }
+ net_dev->addr_len = ETH_ALEN;
+ memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN);
+error_read_mac:
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+}
+
+
+/*
+ * Initialize a non signed boot
+ *
+ * This implies sending some magic values to the device's memory. Note
+ * we convert the values to little endian in the same array
+ * declaration.
+ */
+static
+int i2400m_dnload_init_nonsigned(struct i2400m *i2400m)
+{
+ unsigned i = 0;
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ if (i2400m->bus_bm_pokes_table) {
+ while (i2400m->bus_bm_pokes_table[i].address) {
+ ret = i2400m_download_chunk(
+ i2400m,
+ &i2400m->bus_bm_pokes_table[i].data,
+ sizeof(i2400m->bus_bm_pokes_table[i].data),
+ i2400m->bus_bm_pokes_table[i].address, 1, 1);
+ if (ret < 0)
+ break;
+ i++;
+ }
+ }
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+ return ret;
+}
+
+
+/*
+ * Initialize the signed boot process
+ *
+ * @i2400m: device descriptor
+ *
+ * @bcf_hdr: pointer to the firmware header; assumes it is fully in
+ * memory (it has gone through basic validation).
+ *
+ * Returns: 0 if ok, < 0 errno code on error, -ERESTARTSYS if the hw
+ * rebooted.
+ *
+ * This writes the firmware BCF header to the device using the
+ * HASH_PAYLOAD_ONLY command.
+ */
+static
+int i2400m_dnload_init_signed(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr)
+{
+ int ret;
+ struct device *dev = i2400m_dev(i2400m);
+ struct {
+ struct i2400m_bootrom_header cmd;
+ struct i2400m_bcf_hdr cmd_pl;
+ } __packed *cmd_buf;
+ struct i2400m_bootrom_header ack;
+
+ d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr);
+ cmd_buf = i2400m->bm_cmd_buf;
+ cmd_buf->cmd.command =
+ i2400m_brh_command(I2400M_BRH_HASH_PAYLOAD_ONLY, 0, 0);
+ cmd_buf->cmd.target_addr = 0;
+ cmd_buf->cmd.data_size = cpu_to_le32(sizeof(cmd_buf->cmd_pl));
+ memcpy(&cmd_buf->cmd_pl, bcf_hdr, sizeof(*bcf_hdr));
+ ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(*cmd_buf),
+ &ack, sizeof(ack), 0);
+ if (ret >= 0)
+ ret = 0;
+ d_fnend(5, dev, "(i2400m %p bcf_hdr %p) = %d\n", i2400m, bcf_hdr, ret);
+ return ret;
+}
+
+
+/*
+ * Initialize the firmware download at the device size
+ *
+ * Multiplex to the one that matters based on the device's mode
+ * (signed or non-signed).
+ */
+static
+int i2400m_dnload_init(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+
+ if (i2400m_boot_is_signed(i2400m)) {
+ d_printf(1, dev, "signed boot\n");
+ result = i2400m_dnload_init_signed(i2400m, bcf_hdr);
+ if (result == -ERESTARTSYS)
+ return result;
+ if (result < 0)
+ dev_err(dev, "firmware %s: signed boot download "
+ "initialization failed: %d\n",
+ i2400m->fw_name, result);
+ } else {
+ /* non-signed boot process without pokes */
+ d_printf(1, dev, "non-signed boot\n");
+ result = i2400m_dnload_init_nonsigned(i2400m);
+ if (result == -ERESTARTSYS)
+ return result;
+ if (result < 0)
+ dev_err(dev, "firmware %s: non-signed download "
+ "initialization failed: %d\n",
+ i2400m->fw_name, result);
+ }
+ return result;
+}
+
+
+/*
+ * Run consistency tests on the firmware file and load up headers
+ *
+ * Check for the firmware being made for the i2400m device,
+ * etc...These checks are mostly informative, as the device will make
+ * them too; but the driver's response is more informative on what
+ * went wrong.
+ *
+ * This will also look at all the headers present on the firmware
+ * file, and update i2400m->fw_bcf_hdr to point to them.
+ */
+static
+int i2400m_fw_hdr_check(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr,
+ size_t index, size_t offset)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ unsigned module_type, header_len, major_version, minor_version,
+ module_id, module_vendor, date, size;
+
+ module_type = le32_to_cpu(bcf_hdr->module_type);
+ header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
+ major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
+ >> 16;
+ minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff;
+ module_id = le32_to_cpu(bcf_hdr->module_id);
+ module_vendor = le32_to_cpu(bcf_hdr->module_vendor);
+ date = le32_to_cpu(bcf_hdr->date);
+ size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
+
+ d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header "
+ "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n",
+ i2400m->fw_name, index, offset,
+ module_type, module_vendor, module_id,
+ major_version, minor_version, header_len, size, date);
+
+ /* Hard errors */
+ if (major_version != 1) {
+ dev_err(dev, "firmware %s #%zd@%08zx: major header version "
+ "v%u.%u not supported\n",
+ i2400m->fw_name, index, offset,
+ major_version, minor_version);
+ return -EBADF;
+ }
+
+ if (module_type != 6) { /* built for the right hardware? */
+ dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
+ "type 0x%x; aborting\n",
+ i2400m->fw_name, index, offset,
+ module_type);
+ return -EBADF;
+ }
+
+ if (module_vendor != 0x8086) {
+ dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
+ "vendor 0x%x; aborting\n",
+ i2400m->fw_name, index, offset, module_vendor);
+ return -EBADF;
+ }
+
+ if (date < 0x20080300)
+ dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x "
+ "too old; unsupported\n",
+ i2400m->fw_name, index, offset, date);
+ return 0;
+}
+
+
+/*
+ * Run consistency tests on the firmware file and load up headers
+ *
+ * Check for the firmware being made for the i2400m device,
+ * etc...These checks are mostly informative, as the device will make
+ * them too; but the driver's response is more informative on what
+ * went wrong.
+ *
+ * This will also look at all the headers present on the firmware
+ * file, and update i2400m->fw_hdrs to point to them.
+ */
+static
+int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t headers = 0;
+ const struct i2400m_bcf_hdr *bcf_hdr;
+ const void *itr, *next, *top;
+ size_t slots = 0, used_slots = 0;
+
+ for (itr = bcf, top = itr + bcf_size;
+ itr < top;
+ headers++, itr = next) {
+ size_t leftover, offset, header_len, size;
+
+ leftover = top - itr;
+ offset = itr - bcf;
+ if (leftover <= sizeof(*bcf_hdr)) {
+ dev_err(dev, "firmware %s: %zu B left at @%zx, "
+ "not enough for BCF header\n",
+ i2400m->fw_name, leftover, offset);
+ break;
+ }
+ bcf_hdr = itr;
+ /* Only the first header is supposed to be followed by
+ * payload */
+ header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
+ size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
+ if (headers == 0)
+ next = itr + size;
+ else
+ next = itr + header_len;
+
+ result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset);
+ if (result < 0)
+ continue;
+ if (used_slots + 1 >= slots) {
+ /* +1 -> we need to account for the one we'll
+ * occupy and at least an extra one for
+ * always being NULL */
+ result = i2400m_zrealloc_2x(
+ (void **) &i2400m->fw_hdrs, &slots,
+ sizeof(i2400m->fw_hdrs[0]),
+ GFP_KERNEL);
+ if (result < 0)
+ goto error_zrealloc;
+ }
+ i2400m->fw_hdrs[used_slots] = bcf_hdr;
+ used_slots++;
+ }
+ if (headers == 0) {
+ dev_err(dev, "firmware %s: no usable headers found\n",
+ i2400m->fw_name);
+ result = -EBADF;
+ } else
+ result = 0;
+error_zrealloc:
+ return result;
+}
+
+
+/*
+ * Match a barker to a BCF header module ID
+ *
+ * The device sends a barker which tells the firmware loader which
+ * header in the BCF file has to be used. This does the matching.
+ */
+static
+unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m,
+ const struct i2400m_bcf_hdr *bcf_hdr)
+{
+ u32 barker = le32_to_cpu(i2400m->barker->data[0])
+ & 0x7fffffff;
+ u32 module_id = le32_to_cpu(bcf_hdr->module_id)
+ & 0x7fffffff; /* high bit used for something else */
+
+ /* special case for 5x50 */
+ if (barker == I2400M_SBOOT_BARKER && module_id == 0)
+ return 1;
+ if (module_id == barker)
+ return 1;
+ return 0;
+}
+
+static
+const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr;
+ unsigned i = 0;
+ u32 barker = le32_to_cpu(i2400m->barker->data[0]);
+
+ d_printf(2, dev, "finding BCF header for barker %08x\n", barker);
+ if (barker == I2400M_NBOOT_BARKER) {
+ bcf_hdr = i2400m->fw_hdrs[0];
+ d_printf(1, dev, "using BCF header #%u/%08x for non-signed "
+ "barker\n", 0, le32_to_cpu(bcf_hdr->module_id));
+ return bcf_hdr;
+ }
+ for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) {
+ bcf_hdr = *bcf_itr;
+ if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) {
+ d_printf(1, dev, "hit on BCF hdr #%u/%08x\n",
+ i, le32_to_cpu(bcf_hdr->module_id));
+ return bcf_hdr;
+ } else
+ d_printf(1, dev, "miss on BCF hdr #%u/%08x\n",
+ i, le32_to_cpu(bcf_hdr->module_id));
+ }
+ dev_err(dev, "cannot find a matching BCF header for barker %08x\n",
+ barker);
+ return NULL;
+}
+
+
+/*
+ * Download the firmware to the device
+ *
+ * @i2400m: device descriptor
+ * @bcf: pointer to loaded (and minimally verified for consistency)
+ * firmware
+ * @bcf_size: size of the @bcf buffer (header plus payloads)
+ *
+ * The process for doing this is described in this file's header.
+ *
+ * Note we only reinitialize boot-mode if the flags say so. Some hw
+ * iterations need it, some don't. In any case, if we loop, we always
+ * need to reinitialize the boot room, hence the flags modification.
+ */
+static
+int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
+ size_t fw_size, enum i2400m_bri flags)
+{
+ int ret = 0;
+ struct device *dev = i2400m_dev(i2400m);
+ int count = i2400m->bus_bm_retries;
+ const struct i2400m_bcf_hdr *bcf_hdr;
+ size_t bcf_size;
+
+ d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n",
+ i2400m, bcf, fw_size);
+ i2400m->boot_mode = 1;
+ wmb(); /* Make sure other readers see it */
+hw_reboot:
+ if (count-- == 0) {
+ ret = -ERESTARTSYS;
+ dev_err(dev, "device rebooted too many times, aborting\n");
+ goto error_too_many_reboots;
+ }
+ if (flags & I2400M_BRI_MAC_REINIT) {
+ ret = i2400m_bootrom_init(i2400m, flags);
+ if (ret < 0) {
+ dev_err(dev, "bootrom init failed: %d\n", ret);
+ goto error_bootrom_init;
+ }
+ }
+ flags |= I2400M_BRI_MAC_REINIT;
+
+ /*
+ * Initialize the download, push the bytes to the device and
+ * then jump to the new firmware. Note @ret is passed with the
+ * offset of the jump instruction to _dnload_finalize()
+ *
+ * Note we need to use the BCF header in the firmware image
+ * that matches the barker that the device sent when it
+ * rebooted, so it has to be passed along.
+ */
+ ret = -EBADF;
+ bcf_hdr = i2400m_bcf_hdr_find(i2400m);
+ if (bcf_hdr == NULL)
+ goto error_bcf_hdr_find;
+
+ ret = i2400m_dnload_init(i2400m, bcf_hdr);
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0)
+ goto error_dnload_init;
+
+ /*
+ * bcf_size refers to one header size plus the fw sections size
+ * indicated by the header,ie. if there are other extended headers
+ * at the tail, they are not counted
+ */
+ bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
+ ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0) {
+ dev_err(dev, "fw %s: download failed: %d\n",
+ i2400m->fw_name, ret);
+ goto error_dnload_bcf;
+ }
+
+ ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret);
+ if (ret == -ERESTARTSYS)
+ goto error_dev_rebooted;
+ if (ret < 0) {
+ dev_err(dev, "fw %s: "
+ "download finalization failed: %d\n",
+ i2400m->fw_name, ret);
+ goto error_dnload_finalize;
+ }
+
+ d_printf(2, dev, "fw %s successfully uploaded\n",
+ i2400m->fw_name);
+ i2400m->boot_mode = 0;
+ wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
+error_dnload_finalize:
+error_dnload_bcf:
+error_dnload_init:
+error_bcf_hdr_find:
+error_bootrom_init:
+error_too_many_reboots:
+ d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
+ i2400m, bcf, fw_size, ret);
+ return ret;
+
+error_dev_rebooted:
+ dev_err(dev, "device rebooted, %d tries left\n", count);
+ /* we got the notification already, no need to wait for it again */
+ flags |= I2400M_BRI_SOFT;
+ goto hw_reboot;
+}
+
+static
+int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw,
+ enum i2400m_bri flags)
+{
+ int ret;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_bcf_hdr *bcf; /* Firmware data */
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+ bcf = (void *) fw->data;
+ ret = i2400m_fw_check(i2400m, bcf, fw->size);
+ if (ret >= 0)
+ ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
+ if (ret < 0)
+ dev_err(dev, "%s: cannot use: %d, skipping\n",
+ i2400m->fw_name, ret);
+ kfree(i2400m->fw_hdrs);
+ i2400m->fw_hdrs = NULL;
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+ return ret;
+}
+
+
+/* Refcounted container for firmware data */
+struct i2400m_fw {
+ struct kref kref;
+ const struct firmware *fw;
+};
+
+
+static
+void i2400m_fw_destroy(struct kref *kref)
+{
+ struct i2400m_fw *i2400m_fw =
+ container_of(kref, struct i2400m_fw, kref);
+ release_firmware(i2400m_fw->fw);
+ kfree(i2400m_fw);
+}
+
+
+static
+struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw)
+{
+ if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
+ kref_get(&i2400m_fw->kref);
+ return i2400m_fw;
+}
+
+
+static
+void i2400m_fw_put(struct i2400m_fw *i2400m_fw)
+{
+ kref_put(&i2400m_fw->kref, i2400m_fw_destroy);
+}
+
+
+/**
+ * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
+ *
+ * @i2400m: device descriptor
+ *
+ * Returns: >= 0 if ok, < 0 errno code on error.
+ *
+ * This sets up the firmware upload environment, loads the firmware
+ * file from disk, verifies and then calls the firmware upload process
+ * per se.
+ *
+ * Can be called either from probe, or after a warm reset. Can not be
+ * called from within an interrupt. All the flow in this code is
+ * single-threade; all I/Os are synchronous.
+ */
+int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
+{
+ int ret, itr;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_fw *i2400m_fw;
+ const struct firmware *fw;
+ const char *fw_name;
+
+ d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
+
+ ret = -ENODEV;
+ spin_lock(&i2400m->rx_lock);
+ i2400m_fw = i2400m_fw_get(i2400m->fw_cached);
+ spin_unlock(&i2400m->rx_lock);
+ if (i2400m_fw == (void *) ~0) {
+ dev_err(dev, "can't load firmware now!");
+ goto out;
+ } else if (i2400m_fw != NULL) {
+ dev_info(dev, "firmware %s: loading from cache\n",
+ i2400m->fw_name);
+ ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags);
+ i2400m_fw_put(i2400m_fw);
+ goto out;
+ }
+
+ /* Load firmware files to memory. */
+ for (itr = 0, ret = -ENOENT; ; itr++) {
+ fw_name = i2400m->bus_fw_names[itr];
+ if (fw_name == NULL) {
+ dev_err(dev, "Could not find a usable firmware image\n");
+ break;
+ }
+ d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr);
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret < 0) {
+ dev_err(dev, "fw %s: cannot load file: %d\n",
+ fw_name, ret);
+ continue;
+ }
+ i2400m->fw_name = fw_name;
+ ret = i2400m_fw_bootstrap(i2400m, fw, flags);
+ release_firmware(fw);
+ if (ret >= 0) /* firmware loaded successfully */
+ break;
+ i2400m->fw_name = NULL;
+ }
+out:
+ d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
+
+
+void i2400m_fw_cache(struct i2400m *i2400m)
+{
+ int result;
+ struct i2400m_fw *i2400m_fw;
+ struct device *dev = i2400m_dev(i2400m);
+
+ /* if there is anything there, free it -- now, this'd be weird */
+ spin_lock(&i2400m->rx_lock);
+ i2400m_fw = i2400m->fw_cached;
+ spin_unlock(&i2400m->rx_lock);
+ if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) {
+ i2400m_fw_put(i2400m_fw);
+ WARN(1, "%s:%u: still cached fw still present?\n",
+ __func__, __LINE__);
+ }
+
+ if (i2400m->fw_name == NULL) {
+ dev_err(dev, "firmware n/a: can't cache\n");
+ i2400m_fw = (void *) ~0;
+ goto out;
+ }
+
+ i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC);
+ if (i2400m_fw == NULL)
+ goto out;
+ kref_init(&i2400m_fw->kref);
+ result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev);
+ if (result < 0) {
+ dev_err(dev, "firmware %s: failed to cache: %d\n",
+ i2400m->fw_name, result);
+ kfree(i2400m_fw);
+ i2400m_fw = (void *) ~0;
+ } else
+ dev_info(dev, "firmware %s: cached\n", i2400m->fw_name);
+out:
+ spin_lock(&i2400m->rx_lock);
+ i2400m->fw_cached = i2400m_fw;
+ spin_unlock(&i2400m->rx_lock);
+}
+
+
+void i2400m_fw_uncache(struct i2400m *i2400m)
+{
+ struct i2400m_fw *i2400m_fw;
+
+ spin_lock(&i2400m->rx_lock);
+ i2400m_fw = i2400m->fw_cached;
+ i2400m->fw_cached = NULL;
+ spin_unlock(&i2400m->rx_lock);
+
+ if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
+ i2400m_fw_put(i2400m_fw);
+}
+
diff --git a/drivers/staging/wimax/i2400m/i2400m-usb.h b/drivers/staging/wimax/i2400m/i2400m-usb.h
new file mode 100644
index 000000000000..eff4f464a23e
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/i2400m-usb.h
@@ -0,0 +1,275 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB-specific i2400m driver definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This driver implements the bus-specific part of the i2400m for
+ * USB. Check i2400m.h for a generic driver description.
+ *
+ * ARCHITECTURE
+ *
+ * This driver listens to notifications sent from the notification
+ * endpoint (in usb-notif.c); when data is ready to read, the code in
+ * there schedules a read from the device (usb-rx.c) and then passes
+ * the data to the generic RX code (rx.c).
+ *
+ * When the generic driver needs to send data (network or control), it
+ * queues up in the TX FIFO (tx.c) and that will notify the driver
+ * through the i2400m->bus_tx_kick() callback
+ * (usb-tx.c:i2400mu_bus_tx_kick) which will send the items in the
+ * FIFO queue.
+ *
+ * This driver, as well, implements the USB-specific ops for the generic
+ * driver to be able to setup/teardown communication with the device
+ * [i2400m_bus_dev_start() and i2400m_bus_dev_stop()], reseting the
+ * device [i2400m_bus_reset()] and performing firmware upload
+ * [i2400m_bus_bm_cmd() and i2400_bus_bm_wait_for_ack()].
+ */
+
+#ifndef __I2400M_USB_H__
+#define __I2400M_USB_H__
+
+#include "i2400m.h"
+#include <linux/kthread.h>
+
+
+/*
+ * Error Density Count: cheapo error density (over time) counter
+ *
+ * Originally by Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * Embed an 'struct edc' somewhere. Each time there is a soft or
+ * retryable error, call edc_inc() and check if the error top
+ * watermark has been reached.
+ */
+enum {
+ EDC_MAX_ERRORS = 10,
+ EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+ unsigned long timestart;
+ u16 errorcount;
+};
+
+struct i2400m_endpoint_cfg {
+ unsigned char bulk_out;
+ unsigned char notification;
+ unsigned char reset_cold;
+ unsigned char bulk_in;
+};
+
+static inline void edc_init(struct edc *edc)
+{
+ edc->timestart = jiffies;
+}
+
+/**
+ * edc_inc - report a soft error and check if we are over the watermark
+ *
+ * @edc: pointer to error density counter.
+ * @max_err: maximum number of errors we can accept over the timeframe
+ * @timeframe: length of the timeframe (in jiffies).
+ *
+ * Returns: !0 1 if maximum acceptable errors per timeframe has been
+ * exceeded. 0 otherwise.
+ *
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurrences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * To use, embed a 'struct edc' somewhere, initialize it with
+ * edc_init() and when an error hits:
+ *
+ * if (do_something_fails_with_a_soft_error) {
+ * if (edc_inc(&my->edc, MAX_ERRORS, MAX_TIMEFRAME))
+ * Ops, hard error, do something about it
+ * else
+ * Retry or ignore, depending on whatever
+ * }
+ */
+static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
+{
+ unsigned long now;
+
+ now = jiffies;
+ if (time_after(now, edc->timestart + timeframe)) {
+ edc->errorcount = 1;
+ edc->timestart = now;
+ } else if (++edc->errorcount > max_err) {
+ edc->errorcount = 0;
+ edc->timestart = now;
+ return 1;
+ }
+ return 0;
+}
+
+/* Host-Device interface for USB */
+enum {
+ I2400M_USB_BOOT_RETRIES = 3,
+ I2400MU_MAX_NOTIFICATION_LEN = 256,
+ I2400MU_BLK_SIZE = 16,
+ I2400MU_PL_SIZE_MAX = 0x3EFF,
+
+ /* Device IDs */
+ USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
+ USB_DEVICE_ID_I6150 = 0x07d6,
+ USB_DEVICE_ID_I6150_2 = 0x07d7,
+ USB_DEVICE_ID_I6150_3 = 0x07d9,
+ USB_DEVICE_ID_I6250 = 0x0187,
+};
+
+
+/**
+ * struct i2400mu - descriptor for a USB connected i2400m
+ *
+ * @i2400m: bus-generic i2400m implementation; has to be first (see
+ * it's documentation in i2400m.h).
+ *
+ * @usb_dev: pointer to our USB device
+ *
+ * @usb_iface: pointer to our USB interface
+ *
+ * @urb_edc: error density counter; used to keep a density-on-time tab
+ * on how many soft (retryable or ignorable) errors we get. If we
+ * go over the threshold, we consider the bus transport is failing
+ * too much and reset.
+ *
+ * @notif_urb: URB for receiving notifications from the device.
+ *
+ * @tx_kthread: thread we use for data TX. We use a thread because in
+ * order to do deep power saving and put the device to sleep, we
+ * need to call usb_autopm_*() [blocking functions].
+ *
+ * @tx_wq: waitqueue for the TX kthread to sleep when there is no data
+ * to be sent; when more data is available, it is woken up by
+ * i2400mu_bus_tx_kick().
+ *
+ * @rx_kthread: thread we use for data RX. We use a thread because in
+ * order to do deep power saving and put the device to sleep, we
+ * need to call usb_autopm_*() [blocking functions].
+ *
+ * @rx_wq: waitqueue for the RX kthread to sleep when there is no data
+ * to receive. When data is available, it is woken up by
+ * usb-notif.c:i2400mu_notification_grok().
+ *
+ * @rx_pending_count: number of rx-data-ready notifications that were
+ * still not handled by the RX kthread.
+ *
+ * @rx_size: current RX buffer size that is being used.
+ *
+ * @rx_size_acc: accumulator of the sizes of the previous read
+ * transactions.
+ *
+ * @rx_size_cnt: number of read transactions accumulated in
+ * @rx_size_acc.
+ *
+ * @do_autopm: disable(0)/enable(>0) calling the
+ * usb_autopm_get/put_interface() barriers when executing
+ * commands. See doc in i2400mu_suspend() for more information.
+ *
+ * @rx_size_auto_shrink: if true, the rx_size is shrunk
+ * automatically based on the average size of the received
+ * transactions. This allows the receive code to allocate smaller
+ * chunks of memory and thus reduce pressure on the memory
+ * allocator by not wasting so much space. By default it is
+ * enabled.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ * These have to be in a separate directory, a child of
+ * (wimax_dev->debugfs_dentry) so they can be removed when the
+ * module unloads, as we don't keep each dentry.
+ */
+struct i2400mu {
+ struct i2400m i2400m; /* FIRST! See doc */
+
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ struct edc urb_edc; /* Error density counter */
+ struct i2400m_endpoint_cfg endpoint_cfg;
+
+ struct urb *notif_urb;
+ struct task_struct *tx_kthread;
+ wait_queue_head_t tx_wq;
+
+ struct task_struct *rx_kthread;
+ wait_queue_head_t rx_wq;
+ atomic_t rx_pending_count;
+ size_t rx_size, rx_size_acc, rx_size_cnt;
+ atomic_t do_autopm;
+ u8 rx_size_auto_shrink;
+
+ struct dentry *debugfs_dentry;
+ unsigned i6050:1; /* 1 if this is a 6050 based SKU */
+};
+
+
+static inline
+void i2400mu_init(struct i2400mu *i2400mu)
+{
+ i2400m_init(&i2400mu->i2400m);
+ edc_init(&i2400mu->urb_edc);
+ init_waitqueue_head(&i2400mu->tx_wq);
+ atomic_set(&i2400mu->rx_pending_count, 0);
+ init_waitqueue_head(&i2400mu->rx_wq);
+ i2400mu->rx_size = PAGE_SIZE - sizeof(struct skb_shared_info);
+ atomic_set(&i2400mu->do_autopm, 1);
+ i2400mu->rx_size_auto_shrink = 1;
+}
+
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
+
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
+
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
+
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+ const struct i2400m_bootrom_header *, size_t,
+ int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
+#endif /* #ifndef __I2400M_USB_H__ */
diff --git a/drivers/staging/wimax/i2400m/i2400m.h b/drivers/staging/wimax/i2400m/i2400m.h
new file mode 100644
index 000000000000..de22cc6f2c5c
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/i2400m.h
@@ -0,0 +1,970 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Declarations for bus-generic internal APIs
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ *
+ *
+ * GENERAL DRIVER ARCHITECTURE
+ *
+ * The i2400m driver is split in the following two major parts:
+ *
+ * - bus specific driver
+ * - bus generic driver (this part)
+ *
+ * The bus specific driver sets up stuff specific to the bus the
+ * device is connected to (USB, PCI, tam-tam...non-authoritative
+ * nor binding list) which is basically the device-model management
+ * (probe/disconnect, etc), moving data from device to kernel and
+ * back, doing the power saving details and reseting the device.
+ *
+ * For details on each bus-specific driver, see it's include file,
+ * i2400m-BUSNAME.h
+ *
+ * The bus-generic functionality break up is:
+ *
+ * - Firmware upload: fw.c - takes care of uploading firmware to the
+ * device. bus-specific driver just needs to provides a way to
+ * execute boot-mode commands and to reset the device.
+ *
+ * - RX handling: rx.c - receives data from the bus-specific code and
+ * feeds it to the network or WiMAX stack or uses it to modify
+ * the driver state. bus-specific driver only has to receive
+ * frames and pass them to this module.
+ *
+ * - TX handling: tx.c - manages the TX FIFO queue and provides means
+ * for the bus-specific TX code to pull data from the FIFO
+ * queue. bus-specific code just pulls frames from this module
+ * to sends them to the device.
+ *
+ * - netdev glue: netdev.c - interface with Linux networking
+ * stack. Pass around data frames, and configure when the
+ * device is up and running or shutdown (through ifconfig up /
+ * down). Bus-generic only.
+ *
+ * - control ops: control.c - implements various commands for
+ * controlling the device. bus-generic only.
+ *
+ * - device model glue: driver.c - implements helpers for the
+ * device-model glue done by the bus-specific layer
+ * (setup/release the driver resources), turning the device on
+ * and off, handling the device reboots/resets and a few simple
+ * WiMAX stack ops.
+ *
+ * Code is also broken up in linux-glue / device-glue.
+ *
+ * Linux glue contains functions that deal mostly with gluing with the
+ * rest of the Linux kernel.
+ *
+ * Device-glue are functions that deal mostly with the way the device
+ * does things and talk the device's language.
+ *
+ * device-glue code is licensed BSD so other open source OSes can take
+ * it to implement their drivers.
+ *
+ *
+ * APIs AND HEADER FILES
+ *
+ * This bus generic code exports three APIs:
+ *
+ * - HDI (host-device interface) definitions common to all busses
+ * (include/linux/wimax/i2400m.h); these can be also used by user
+ * space code.
+ * - internal API for the bus-generic code
+ * - external API for the bus-specific drivers
+ *
+ *
+ * LIFE CYCLE:
+ *
+ * When the bus-specific driver probes, it allocates a network device
+ * with enough space for it's data structue, that must contain a
+ * &struct i2400m at the top.
+ *
+ * On probe, it needs to fill the i2400m members marked as [fill], as
+ * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
+ * i2400m driver will only register with the WiMAX and network stacks;
+ * the only access done to the device is to read the MAC address so we
+ * can register a network device.
+ *
+ * The high-level call flow is:
+ *
+ * bus_probe()
+ * i2400m_setup()
+ * i2400m->bus_setup()
+ * boot rom initialization / read mac addr
+ * network / WiMAX stacks registration
+ * i2400m_dev_start()
+ * i2400m->bus_dev_start()
+ * i2400m_dev_initialize()
+ *
+ * The reverse applies for a disconnect() call:
+ *
+ * bus_disconnect()
+ * i2400m_release()
+ * i2400m_dev_stop()
+ * i2400m_dev_shutdown()
+ * i2400m->bus_dev_stop()
+ * network / WiMAX stack unregistration
+ * i2400m->bus_release()
+ *
+ * At this point, control and data communications are possible.
+ *
+ * While the device is up, it might reset. The bus-specific driver has
+ * to catch that situation and call i2400m_dev_reset_handle() to deal
+ * with it (reset the internal driver structures and go back to square
+ * one).
+ */
+
+#ifndef __I2400M_H__
+#define __I2400M_H__
+
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include "../net-wimax.h"
+#include "linux-wimax-i2400m.h"
+#include <asm/byteorder.h>
+
+enum {
+/* netdev interface */
+ /*
+ * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
+ *
+ * The MTU is 1400 or less
+ */
+ I2400M_MAX_MTU = 1400,
+};
+
+/* Misc constants */
+enum {
+ /* Size of the Boot Mode Command buffer */
+ I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
+ I2400M_BM_ACK_BUF_SIZE = 256,
+};
+
+enum {
+ /* Maximum number of bus reset can be retried */
+ I2400M_BUS_RESET_RETRIES = 3,
+};
+
+/**
+ * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
+ *
+ * This structure will be used to create a device specific poke table
+ * to put the device in a consistent state at boot time.
+ *
+ * @address: The device address to poke
+ *
+ * @data: The data value to poke to the device address
+ *
+ */
+struct i2400m_poke_table{
+ __le32 address;
+ __le32 data;
+};
+
+#define I2400M_FW_POKE(a, d) { \
+ .address = cpu_to_le32(a), \
+ .data = cpu_to_le32(d) \
+}
+
+
+/**
+ * i2400m_reset_type - methods to reset a device
+ *
+ * @I2400M_RT_WARM: Reset without device disconnection, device handles
+ * are kept valid but state is back to power on, with firmware
+ * re-uploaded.
+ * @I2400M_RT_COLD: Tell the device to disconnect itself from the bus
+ * and reconnect. Renders all device handles invalid.
+ * @I2400M_RT_BUS: Tells the bus to reset the device; last measure
+ * used when both types above don't work.
+ */
+enum i2400m_reset_type {
+ I2400M_RT_WARM, /* first measure */
+ I2400M_RT_COLD, /* second measure */
+ I2400M_RT_BUS, /* call in artillery */
+};
+
+struct i2400m_reset_ctx;
+struct i2400m_roq;
+struct i2400m_barker_db;
+
+/**
+ * struct i2400m - descriptor for an Intel 2400m
+ *
+ * Members marked with [fill] must be filled out/initialized before
+ * calling i2400m_setup().
+ *
+ * Note the @bus_setup/@bus_release, @bus_dev_start/@bus_dev_release
+ * call pairs are very much doing almost the same, and depending on
+ * the underlying bus, some stuff has to be put in one or the
+ * other. The idea of setup/release is that they setup the minimal
+ * amount needed for loading firmware, where us dev_start/stop setup
+ * the rest needed to do full data/control traffic.
+ *
+ * @bus_tx_block_size: [fill] USB imposes a 16 block size, but other
+ * busses will differ. So we have a tx_blk_size variable that the
+ * bus layer sets to tell the engine how much of that we need.
+ *
+ * @bus_tx_room_min: [fill] Minimum room required while allocating
+ * TX queue's buffer space for message header. USB requires
+ * 16 bytes. Refer to bus specific driver code for details.
+ *
+ * @bus_pl_size_max: [fill] Maximum payload size.
+ *
+ * @bus_setup: [optional fill] Function called by the bus-generic code
+ * [i2400m_setup()] to setup the basic bus-specific communications
+ * to the the device needed to load firmware. See LIFE CYCLE above.
+ *
+ * NOTE: Doesn't need to upload the firmware, as that is taken
+ * care of by the bus-generic code.
+ *
+ * @bus_release: [optional fill] Function called by the bus-generic
+ * code [i2400m_release()] to shutdown the basic bus-specific
+ * communications to the the device needed to load firmware. See
+ * LIFE CYCLE above.
+ *
+ * This function does not need to reset the device, just tear down
+ * all the host resources created to handle communication with
+ * the device.
+ *
+ * @bus_dev_start: [optional fill] Function called by the bus-generic
+ * code [i2400m_dev_start()] to do things needed to start the
+ * device. See LIFE CYCLE above.
+ *
+ * NOTE: Doesn't need to upload the firmware, as that is taken
+ * care of by the bus-generic code.
+ *
+ * @bus_dev_stop: [optional fill] Function called by the bus-generic
+ * code [i2400m_dev_stop()] to do things needed for stopping the
+ * device. See LIFE CYCLE above.
+ *
+ * This function does not need to reset the device, just tear down
+ * all the host resources created to handle communication with
+ * the device.
+ *
+ * @bus_tx_kick: [fill] Function called by the bus-generic code to let
+ * the bus-specific code know that there is data available in the
+ * TX FIFO for transmission to the device.
+ *
+ * This function cannot sleep.
+ *
+ * @bus_reset: [fill] Function called by the bus-generic code to reset
+ * the device in in various ways. Doesn't need to wait for the
+ * reset to finish.
+ *
+ * If warm or cold reset fail, this function is expected to do a
+ * bus-specific reset (eg: USB reset) to get the device to a
+ * working state (even if it implies device disconecction).
+ *
+ * Note the warm reset is used by the firmware uploader to
+ * reinitialize the device.
+ *
+ * IMPORTANT: this is called very early in the device setup
+ * process, so it cannot rely on common infrastructure being laid
+ * out.
+ *
+ * IMPORTANT: don't call reset on RT_BUS with i2400m->init_mutex
+ * held, as the .pre/.post reset handlers will deadlock.
+ *
+ * @bus_bm_retries: [fill] How many times shall a firmware upload /
+ * device initialization be retried? Different models of the same
+ * device might need different values, hence it is set by the
+ * bus-specific driver. Note this value is used in two places,
+ * i2400m_fw_dnload() and __i2400m_dev_start(); they won't become
+ * multiplicative (__i2400m_dev_start() calling N times
+ * i2400m_fw_dnload() and this trying N times to download the
+ * firmware), as if __i2400m_dev_start() only retries if the
+ * firmware crashed while initializing the device (not in a
+ * general case).
+ *
+ * @bus_bm_cmd_send: [fill] Function called to send a boot-mode
+ * command. Flags are defined in 'enum i2400m_bm_cmd_flags'. This
+ * is synchronous and has to return 0 if ok or < 0 errno code in
+ * any error condition.
+ *
+ * @bus_bm_wait_for_ack: [fill] Function called to wait for a
+ * boot-mode notification (that can be a response to a previously
+ * issued command or an asynchronous one). Will read until all the
+ * indicated size is read or timeout. Reading more or less data
+ * than asked for is an error condition. Return 0 if ok, < 0 errno
+ * code on error.
+ *
+ * The caller to this function will check if the response is a
+ * barker that indicates the device going into reset mode.
+ *
+ * @bus_fw_names: [fill] a NULL-terminated array with the names of the
+ * firmware images to try loading. This is made a list so we can
+ * support backward compatibility of firmware releases (eg: if we
+ * can't find the default v1.4, we try v1.3). In general, the name
+ * should be i2400m-fw-X-VERSION.sbcf, where X is the bus name.
+ * The list is tried in order and the first one that loads is
+ * used. The fw loader will set i2400m->fw_name to point to the
+ * active firmware image.
+ *
+ * @bus_bm_mac_addr_impaired: [fill] Set to true if the device's MAC
+ * address provided in boot mode is kind of broken and needs to
+ * be re-read later on.
+ *
+ * @bus_bm_pokes_table: [fill/optional] A table of device addresses
+ * and values that will be poked at device init time to move the
+ * device to the correct state for the type of boot/firmware being
+ * used. This table MUST be terminated with (0x000000,
+ * 0x00000000) or bad things will happen.
+ *
+ *
+ * @wimax_dev: WiMAX generic device for linkage into the kernel WiMAX
+ * stack. Due to the way a net_device is allocated, we need to
+ * force this to be the first field so that we can get from
+ * netdev_priv() the right pointer.
+ *
+ * @updown: the device is up and ready for transmitting control and
+ * data packets. This implies @ready (communication infrastructure
+ * with the device is ready) and the device's firmware has been
+ * loaded and the device initialized.
+ *
+ * Write to it only inside a i2400m->init_mutex protected area
+ * followed with a wmb(); rmb() before accesing (unless locked
+ * inside i2400m->init_mutex). Read access can be loose like that
+ * [just using rmb()] because the paths that use this also do
+ * other error checks later on.
+ *
+ * @ready: Communication infrastructure with the device is ready, data
+ * frames can start to be passed around (this is lighter than
+ * using the WiMAX state for certain hot paths).
+ *
+ * Write to it only inside a i2400m->init_mutex protected area
+ * followed with a wmb(); rmb() before accesing (unless locked
+ * inside i2400m->init_mutex). Read access can be loose like that
+ * [just using rmb()] because the paths that use this also do
+ * other error checks later on.
+ *
+ * @rx_reorder: 1 if RX reordering is enabled; this can only be
+ * set at probe time.
+ *
+ * @state: device's state (as reported by it)
+ *
+ * @state_wq: waitqueue that is woken up whenever the state changes
+ *
+ * @tx_lock: spinlock to protect TX members
+ *
+ * @tx_buf: FIFO buffer for TX; we queue data here
+ *
+ * @tx_in: FIFO index for incoming data. Note this doesn't wrap around
+ * and it is always greater than @tx_out.
+ *
+ * @tx_out: FIFO index for outgoing data
+ *
+ * @tx_msg: current TX message that is active in the FIFO for
+ * appending payloads.
+ *
+ * @tx_sequence: current sequence number for TX messages from the
+ * device to the host.
+ *
+ * @tx_msg_size: size of the current message being transmitted by the
+ * bus-specific code.
+ *
+ * @tx_pl_num: total number of payloads sent
+ *
+ * @tx_pl_max: maximum number of payloads sent in a TX message
+ *
+ * @tx_pl_min: minimum number of payloads sent in a TX message
+ *
+ * @tx_num: number of TX messages sent
+ *
+ * @tx_size_acc: number of bytes in all TX messages sent
+ * (this is different to net_dev's statistics as it also counts
+ * control messages).
+ *
+ * @tx_size_min: smallest TX message sent.
+ *
+ * @tx_size_max: biggest TX message sent.
+ *
+ * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
+ *
+ * @rx_pl_num: total number of payloads received
+ *
+ * @rx_pl_max: maximum number of payloads received in a RX message
+ *
+ * @rx_pl_min: minimum number of payloads received in a RX message
+ *
+ * @rx_num: number of RX messages received
+ *
+ * @rx_size_acc: number of bytes in all RX messages received
+ * (this is different to net_dev's statistics as it also counts
+ * control messages).
+ *
+ * @rx_size_min: smallest RX message received.
+ *
+ * @rx_size_max: buggest RX message received.
+ *
+ * @rx_roq: RX ReOrder queues. (fw >= v1.4) When packets are received
+ * out of order, the device will ask the driver to hold certain
+ * packets until the ones that are received out of order can be
+ * delivered. Then the driver can release them to the host. See
+ * drivers/net/i2400m/rx.c for details.
+ *
+ * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
+ * rx_roq thus preventing rx_roq being destroyed when rx_roq
+ * is being accessed. rx_roq_refcount is protected by rx_lock.
+ *
+ * @rx_reports: reports received from the device that couldn't be
+ * processed because the driver wasn't still ready; when ready,
+ * they are pulled from here and chewed.
+ *
+ * @rx_reports_ws: Work struct used to kick a scan of the RX reports
+ * list and to process each.
+ *
+ * @src_mac_addr: MAC address used to make ethernet packets be coming
+ * from. This is generated at i2400m_setup() time and used during
+ * the life cycle of the instance. See i2400m_fake_eth_header().
+ *
+ * @init_mutex: Mutex used for serializing the device bringup
+ * sequence; this way if the device reboots in the middle, we
+ * don't try to do a bringup again while we are tearing down the
+ * one that failed.
+ *
+ * Can't reuse @msg_mutex because from within the bringup sequence
+ * we need to send messages to the device and thus use @msg_mutex.
+ *
+ * @msg_mutex: mutex used to send control commands to the device (we
+ * only allow one at a time, per host-device interface design).
+ *
+ * @msg_completion: used to wait for an ack to a control command sent
+ * to the device.
+ *
+ * @ack_skb: used to store the actual ack to a control command if the
+ * reception of the command was successful. Otherwise, a ERR_PTR()
+ * errno code that indicates what failed with the ack reception.
+ *
+ * Only valid after @msg_completion is woken up. Only updateable
+ * if @msg_completion is armed. Only touched by
+ * i2400m_msg_to_dev().
+ *
+ * Protected by @rx_lock. In theory the command execution flow is
+ * sequential, but in case the device sends an out-of-phase or
+ * very delayed response, we need to avoid it trampling current
+ * execution.
+ *
+ * @bm_cmd_buf: boot mode command buffer for composing firmware upload
+ * commands.
+ *
+ * USB can't r/w to stack, vmalloc, etc...as well, we end up
+ * having to alloc/free a lot to compose commands, so we use these
+ * for stagging and not having to realloc all the time.
+ *
+ * This assumes the code always runs serialized. Only one thread
+ * can call i2400m_bm_cmd() at the same time.
+ *
+ * @bm_ack_buf: boot mode acknoledge buffer for staging reception of
+ * responses to commands.
+ *
+ * See @bm_cmd_buf.
+ *
+ * @work_queue: work queue for processing device reports. This
+ * workqueue cannot be used for processing TX or RX to the device,
+ * as from it we'll process device reports, which might require
+ * further communication with the device.
+ *
+ * @debugfs_dentry: hookup for debugfs files.
+ * These have to be in a separate directory, a child of
+ * (wimax_dev->debugfs_dentry) so they can be removed when the
+ * module unloads, as we don't keep each dentry.
+ *
+ * @fw_name: name of the firmware image that is currently being used.
+ *
+ * @fw_version: version of the firmware interface, Major.minor,
+ * encoded in the high word and low word (major << 16 | minor).
+ *
+ * @fw_hdrs: NULL terminated array of pointers to the firmware
+ * headers. This is only available during firmware load time.
+ *
+ * @fw_cached: Used to cache firmware when the system goes to
+ * suspend/standby/hibernation (as on resume we can't read it). If
+ * NULL, no firmware was cached, read it. If ~0, you can't read
+ * any firmware files (the system still didn't come out of suspend
+ * and failed to cache one), so abort; otherwise, a valid cached
+ * firmware to be used. Access to this variable is protected by
+ * the spinlock i2400m->rx_lock.
+ *
+ * @barker: barker type that the device uses; this is initialized by
+ * i2400m_is_boot_barker() the first time it is called. Then it
+ * won't change during the life cycle of the device and every time
+ * a boot barker is received, it is just verified for it being the
+ * same.
+ *
+ * @pm_notifier: used to register for PM events
+ *
+ * @bus_reset_retries: counter for the number of bus resets attempted for
+ * this boot. It's not for tracking the number of bus resets during
+ * the whole driver life cycle (from insmod to rmmod) but for the
+ * number of dev_start() executed until dev_start() returns a success
+ * (ie: a good boot means a dev_stop() followed by a successful
+ * dev_start()). dev_reset_handler() increments this counter whenever
+ * it is triggering a bus reset. It checks this counter to decide if a
+ * subsequent bus reset should be retried. dev_reset_handler() retries
+ * the bus reset until dev_start() succeeds or the counter reaches
+ * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
+ * dev_reset_handle() when dev_start() returns a success,
+ * ie: a successul boot is completed.
+ *
+ * @alive: flag to denote if the device *should* be alive. This flag is
+ * everything like @updown (see doc for @updown) except reflecting
+ * the device state *we expect* rather than the actual state as denoted
+ * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
+ * Then the device is expected to be alive all the time
+ * (i2400m->alive remains 1) until the driver is removed. Therefore
+ * all the device reboot events detected can be still handled properly
+ * by either dev_reset_handle() or .pre_reset/.post_reset as long as
+ * the driver presents. It is set 0 along with @updown in dev_stop().
+ *
+ * @error_recovery: flag to denote if we are ready to take an error recovery.
+ * 0 for ready to take an error recovery; 1 for not ready. It is
+ * initialized to 1 while probe() since we don't tend to take any error
+ * recovery during probe(). It is decremented by 1 whenever dev_start()
+ * succeeds to indicate we are ready to take error recovery from now on.
+ * It is checked every time we wanna schedule an error recovery. If an
+ * error recovery is already in place (error_recovery was set 1), we
+ * should not schedule another one until the last one is done.
+ */
+struct i2400m {
+ struct wimax_dev wimax_dev; /* FIRST! See doc */
+
+ unsigned updown:1; /* Network device is up or down */
+ unsigned boot_mode:1; /* is the device in boot mode? */
+ unsigned sboot:1; /* signed or unsigned fw boot */
+ unsigned ready:1; /* Device comm infrastructure ready */
+ unsigned rx_reorder:1; /* RX reorder is enabled */
+ u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
+ /* typed u8 so /sys/kernel/debug/u8 can tweak */
+ enum i2400m_system_state state;
+ wait_queue_head_t state_wq; /* Woken up when on state updates */
+
+ size_t bus_tx_block_size;
+ size_t bus_tx_room_min;
+ size_t bus_pl_size_max;
+ unsigned bus_bm_retries;
+
+ int (*bus_setup)(struct i2400m *);
+ int (*bus_dev_start)(struct i2400m *);
+ void (*bus_dev_stop)(struct i2400m *);
+ void (*bus_release)(struct i2400m *);
+ void (*bus_tx_kick)(struct i2400m *);
+ int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
+ ssize_t (*bus_bm_cmd_send)(struct i2400m *,
+ const struct i2400m_bootrom_header *,
+ size_t, int flags);
+ ssize_t (*bus_bm_wait_for_ack)(struct i2400m *,
+ struct i2400m_bootrom_header *, size_t);
+ const char **bus_fw_names;
+ unsigned bus_bm_mac_addr_impaired:1;
+ const struct i2400m_poke_table *bus_bm_pokes_table;
+
+ spinlock_t tx_lock; /* protect TX state */
+ void *tx_buf;
+ size_t tx_in, tx_out;
+ struct i2400m_msg_hdr *tx_msg;
+ size_t tx_sequence, tx_msg_size;
+ /* TX stats */
+ unsigned tx_pl_num, tx_pl_max, tx_pl_min,
+ tx_num, tx_size_acc, tx_size_min, tx_size_max;
+
+ /* RX stuff */
+ /* protect RX state and rx_roq_refcount */
+ spinlock_t rx_lock;
+ unsigned rx_pl_num, rx_pl_max, rx_pl_min,
+ rx_num, rx_size_acc, rx_size_min, rx_size_max;
+ struct i2400m_roq *rx_roq; /* access is refcounted */
+ struct kref rx_roq_refcount; /* refcount access to rx_roq */
+ u8 src_mac_addr[ETH_HLEN];
+ struct list_head rx_reports; /* under rx_lock! */
+ struct work_struct rx_report_ws;
+
+ struct mutex msg_mutex; /* serialize command execution */
+ struct completion msg_completion;
+ struct sk_buff *ack_skb; /* protected by rx_lock */
+
+ void *bm_ack_buf; /* for receiving acks over USB */
+ void *bm_cmd_buf; /* for issuing commands over USB */
+
+ struct workqueue_struct *work_queue;
+
+ struct mutex init_mutex; /* protect bringup seq */
+ struct i2400m_reset_ctx *reset_ctx; /* protected by init_mutex */
+
+ struct work_struct wake_tx_ws;
+ struct sk_buff *wake_tx_skb;
+
+ struct work_struct reset_ws;
+ const char *reset_reason;
+
+ struct work_struct recovery_ws;
+
+ struct dentry *debugfs_dentry;
+ const char *fw_name; /* name of the current firmware image */
+ unsigned long fw_version; /* version of the firmware interface */
+ const struct i2400m_bcf_hdr **fw_hdrs;
+ struct i2400m_fw *fw_cached; /* protected by rx_lock */
+ struct i2400m_barker_db *barker;
+
+ struct notifier_block pm_notifier;
+
+ /* counting bus reset retries in this boot */
+ atomic_t bus_reset_retries;
+
+ /* if the device is expected to be alive */
+ unsigned alive;
+
+ /* 0 if we are ready for error recovery; 1 if not ready */
+ atomic_t error_recovery;
+
+};
+
+
+/*
+ * Bus-generic internal APIs
+ * -------------------------
+ */
+
+static inline
+struct i2400m *wimax_dev_to_i2400m(struct wimax_dev *wimax_dev)
+{
+ return container_of(wimax_dev, struct i2400m, wimax_dev);
+}
+
+static inline
+struct i2400m *net_dev_to_i2400m(struct net_device *net_dev)
+{
+ return wimax_dev_to_i2400m(netdev_priv(net_dev));
+}
+
+/*
+ * Boot mode support
+ */
+
+/**
+ * i2400m_bm_cmd_flags - flags to i2400m_bm_cmd()
+ *
+ * @I2400M_BM_CMD_RAW: send the command block as-is, without doing any
+ * extra processing for adding CRC.
+ */
+enum i2400m_bm_cmd_flags {
+ I2400M_BM_CMD_RAW = 1 << 2,
+};
+
+/**
+ * i2400m_bri - Boot-ROM indicators
+ *
+ * Flags for i2400m_bootrom_init() and i2400m_dev_bootstrap() [which
+ * are passed from things like i2400m_setup()]. Can be combined with
+ * |.
+ *
+ * @I2400M_BRI_SOFT: The device rebooted already and a reboot
+ * barker received, proceed directly to ack the boot sequence.
+ * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed
+ * directly to wait for a reboot barker from the device.
+ * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
+ * rom after reading the MAC address. This is quite a dirty hack,
+ * if you ask me -- the device requires the bootrom to be
+ * initialized after reading the MAC address.
+ */
+enum i2400m_bri {
+ I2400M_BRI_SOFT = 1 << 1,
+ I2400M_BRI_NO_REBOOT = 1 << 2,
+ I2400M_BRI_MAC_REINIT = 1 << 3,
+};
+
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+static inline
+int i2400m_is_d2h_barker(const void *buf)
+{
+ const __le32 *barker = buf;
+ return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
+}
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+
+/* Make/grok boot-rom header commands */
+
+static inline
+__le32 i2400m_brh_command(enum i2400m_brh_opcode opcode, unsigned use_checksum,
+ unsigned direct_access)
+{
+ return cpu_to_le32(
+ I2400M_BRH_SIGNATURE
+ | (direct_access ? I2400M_BRH_DIRECT_ACCESS : 0)
+ | I2400M_BRH_RESPONSE_REQUIRED /* response always required */
+ | (use_checksum ? I2400M_BRH_USE_CHECKSUM : 0)
+ | (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+void i2400m_brh_set_opcode(struct i2400m_bootrom_header *hdr,
+ enum i2400m_brh_opcode opcode)
+{
+ hdr->command = cpu_to_le32(
+ (le32_to_cpu(hdr->command) & ~I2400M_BRH_OPCODE_MASK)
+ | (opcode & I2400M_BRH_OPCODE_MASK));
+}
+
+static inline
+unsigned i2400m_brh_get_opcode(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_OPCODE_MASK;
+}
+
+static inline
+unsigned i2400m_brh_get_response(const struct i2400m_bootrom_header *hdr)
+{
+ return (le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_MASK)
+ >> I2400M_BRH_RESPONSE_SHIFT;
+}
+
+static inline
+unsigned i2400m_brh_get_use_checksum(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_USE_CHECKSUM;
+}
+
+static inline
+unsigned i2400m_brh_get_response_required(
+ const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_REQUIRED;
+}
+
+static inline
+unsigned i2400m_brh_get_direct_access(const struct i2400m_bootrom_header *hdr)
+{
+ return le32_to_cpu(hdr->command) & I2400M_BRH_DIRECT_ACCESS;
+}
+
+static inline
+unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
+{
+ return (le32_to_cpu(hdr->command) & I2400M_BRH_SIGNATURE_MASK)
+ >> I2400M_BRH_SIGNATURE_SHIFT;
+}
+
+
+/*
+ * Driver / device setup and internal functions
+ */
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+ int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
+enum i2400m_pt;
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+
+#ifdef CONFIG_DEBUG_FS
+void i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
+#else
+static inline void i2400m_debugfs_add(struct i2400m *i2400m) {}
+static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
+#endif
+
+/* Initialize/shutdown the device */
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
+
+extern struct attribute_group i2400m_dev_attr_group;
+
+
+/* HDI message's payload description handling */
+
+static inline
+size_t i2400m_pld_size(const struct i2400m_pld *pld)
+{
+ return I2400M_PLD_SIZE_MASK & le32_to_cpu(pld->val);
+}
+
+static inline
+enum i2400m_pt i2400m_pld_type(const struct i2400m_pld *pld)
+{
+ return (I2400M_PLD_TYPE_MASK & le32_to_cpu(pld->val))
+ >> I2400M_PLD_TYPE_SHIFT;
+}
+
+static inline
+void i2400m_pld_set(struct i2400m_pld *pld, size_t size,
+ enum i2400m_pt type)
+{
+ pld->val = cpu_to_le32(
+ ((type << I2400M_PLD_TYPE_SHIFT) & I2400M_PLD_TYPE_MASK)
+ | (size & I2400M_PLD_SIZE_MASK));
+}
+
+
+/*
+ * API for the bus-specific drivers
+ * --------------------------------
+ */
+
+static inline
+struct i2400m *i2400m_get(struct i2400m *i2400m)
+{
+ dev_hold(i2400m->wimax_dev.net_dev);
+ return i2400m;
+}
+
+static inline
+void i2400m_put(struct i2400m *i2400m)
+{
+ dev_put(i2400m->wimax_dev.net_dev);
+}
+
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
+
+/*
+ * _setup()/_release() are called by the probe/disconnect functions of
+ * the bus-specific drivers.
+ */
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
+
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
+
+
+/*
+ * Utility functions
+ */
+
+static inline
+struct device *i2400m_dev(struct i2400m *i2400m)
+{
+ return i2400m->wimax_dev.net_dev->dev.parent;
+}
+
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+ size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+
+static inline
+struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
+{
+ return &iface->cur_altsetting->endpoint[ep].desc;
+}
+
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+ const struct i2400m_tlv_rf_switches_status *);
+
+/*
+ * Helpers for firmware backwards compatibility
+ *
+ * As we aim to support at least the firmware version that was
+ * released with the previous kernel/driver release, some code will be
+ * conditionally executed depending on the firmware version. On each
+ * release, the code to support fw releases past the last two ones
+ * will be purged.
+ *
+ * By making it depend on this macros, it is easier to keep it a tab
+ * on what has to go and what not.
+ */
+static inline
+unsigned i2400m_le_v1_3(struct i2400m *i2400m)
+{
+ /* running fw is lower or v1.3 */
+ return i2400m->fw_version <= 0x00090001;
+}
+
+static inline
+unsigned i2400m_ge_v1_4(struct i2400m *i2400m)
+{
+ /* running fw is higher or v1.4 */
+ return i2400m->fw_version >= 0x00090002;
+}
+
+
+/*
+ * Do a millisecond-sleep for allowing wireshark to dump all the data
+ * packets. Used only for debugging.
+ */
+static inline
+void __i2400m_msleep(unsigned ms)
+{
+#if 1
+#else
+ msleep(ms);
+#endif
+}
+
+
+/* module initialization helpers */
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
+
+
+
+#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
new file mode 100644
index 000000000000..fd198bc24a3c
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
@@ -0,0 +1,572 @@
+/*
+ * Intel Wireless WiMax Connection 2400m
+ * Host-Device protocol interface definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This header defines the data structures and constants used to
+ * communicate with the device.
+ *
+ * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
+ *
+ * The firmware upload protocol is quite simple and only requires a
+ * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
+ * details.
+ *
+ * The BCF data structure is for the firmware file header.
+ *
+ *
+ * THE DATA / CONTROL PROTOCOL
+ *
+ * This is the normal protocol spoken with the device once the
+ * firmware is uploaded. It transports data payloads and control
+ * messages back and forth.
+ *
+ * It consists 'messages' that pack one or more payloads each. The
+ * format is described in detail in drivers/net/wimax/i2400m/rx.c and
+ * tx.c.
+ *
+ *
+ * THE L3L4 PROTOCOL
+ *
+ * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
+ * driver/host software).
+ *
+ * This is the control protocol used by the host to control the i2400m
+ * device (scan, connect, disconnect...). This is sent to / received
+ * as control frames. These frames consist of a header and zero or
+ * more TLVs with information. We call each control frame a "message".
+ *
+ * Each message is composed of:
+ *
+ * HEADER
+ * [TLV0 + PAYLOAD0]
+ * [TLV1 + PAYLOAD1]
+ * [...]
+ * [TLVN + PAYLOADN]
+ *
+ * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
+ * defined by a TLV structure (Type Length Value) which is a 'header'
+ * (struct i2400m_tlv_hdr) and then the payload.
+ *
+ * All integers are represented as Little Endian.
+ *
+ * - REQUESTS AND EVENTS
+ *
+ * The requests can be clasified as follows:
+ *
+ * COMMAND: implies a request from the host to the device requesting
+ * an action being performed. The device will reply with a
+ * message (with the same type as the command), status and
+ * no (TLV) payload. Execution of a command might cause
+ * events (of different type) to be sent later on as
+ * device's state changes.
+ *
+ * GET/SET: similar to COMMAND, but will not cause other
+ * EVENTs. The reply, in the case of GET, will contain
+ * TLVs with the requested information.
+ *
+ * EVENT: asynchronous messages sent from the device, maybe as a
+ * consequence of previous COMMANDs but disassociated from
+ * them.
+ *
+ * Only one request might be pending at the same time (ie: don't
+ * parallelize nor post another GET request before the previous
+ * COMMAND has been acknowledged with it's corresponding reply by the
+ * device).
+ *
+ * The different requests and their formats are described below:
+ *
+ * I2400M_MT_* Message types
+ * I2400M_MS_* Message status (for replies, events)
+ * i2400m_tlv_* TLVs
+ *
+ * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
+ * operation.
+ */
+
+#ifndef __LINUX__WIMAX__I2400M_H__
+#define __LINUX__WIMAX__I2400M_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/*
+ * Host Device Interface (HDI) common to all busses
+ */
+
+/* Boot-mode (firmware upload mode) commands */
+
+/* Header for the firmware file */
+struct i2400m_bcf_hdr {
+ __le32 module_type;
+ __le32 header_len;
+ __le32 header_version;
+ __le32 module_id;
+ __le32 module_vendor;
+ __le32 date; /* BCD YYYMMDD */
+ __le32 size; /* in dwords */
+ __le32 key_size; /* in dwords */
+ __le32 modulus_size; /* in dwords */
+ __le32 exponent_size; /* in dwords */
+ __u8 reserved[88];
+} __attribute__ ((packed));
+
+/* Boot mode opcodes */
+enum i2400m_brh_opcode {
+ I2400M_BRH_READ = 1,
+ I2400M_BRH_WRITE = 2,
+ I2400M_BRH_JUMP = 3,
+ I2400M_BRH_SIGNED_JUMP = 8,
+ I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
+};
+
+/* Boot mode command masks and stuff */
+enum i2400m_brh {
+ I2400M_BRH_SIGNATURE = 0xcbbc0000,
+ I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
+ I2400M_BRH_SIGNATURE_SHIFT = 16,
+ I2400M_BRH_OPCODE_MASK = 0x0000000f,
+ I2400M_BRH_RESPONSE_MASK = 0x000000f0,
+ I2400M_BRH_RESPONSE_SHIFT = 4,
+ I2400M_BRH_DIRECT_ACCESS = 0x00000400,
+ I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
+ I2400M_BRH_USE_CHECKSUM = 0x00000100,
+};
+
+
+/**
+ * i2400m_bootrom_header - Header for a boot-mode command
+ *
+ * @cmd: the above command descriptor
+ * @target_addr: where on the device memory should the action be performed.
+ * @data_size: for read/write, amount of data to be read/written
+ * @block_checksum: checksum value (if applicable)
+ * @payload: the beginning of data attached to this header
+ */
+struct i2400m_bootrom_header {
+ __le32 command; /* Compose with enum i2400_brh */
+ __le32 target_addr;
+ __le32 data_size;
+ __le32 block_checksum;
+ char payload[0];
+} __attribute__ ((packed));
+
+
+/*
+ * Data / control protocol
+ */
+
+/* Packet types for the host-device interface */
+enum i2400m_pt {
+ I2400M_PT_DATA = 0,
+ I2400M_PT_CTRL,
+ I2400M_PT_TRACE, /* For device debug */
+ I2400M_PT_RESET_WARM, /* device reset */
+ I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
+ I2400M_PT_EDATA, /* Extended RX data */
+ I2400M_PT_ILLEGAL
+};
+
+
+/*
+ * Payload for a data packet
+ *
+ * This is prefixed to each and every outgoing DATA type.
+ */
+struct i2400m_pl_data_hdr {
+ __le32 reserved;
+} __attribute__((packed));
+
+
+/*
+ * Payload for an extended data packet
+ *
+ * New in fw v1.4
+ *
+ * @reorder: if this payload has to be reorder or not (and how)
+ * @cs: the type of data in the packet, as defined per (802.16e
+ * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
+ *
+ * This is prefixed to each and every INCOMING DATA packet.
+ */
+struct i2400m_pl_edata_hdr {
+ __le32 reorder; /* bits defined in i2400m_ro */
+ __u8 cs;
+ __u8 reserved[11];
+} __attribute__((packed));
+
+enum i2400m_cs {
+ I2400M_CS_IPV4_0 = 0,
+ I2400M_CS_IPV4 = 2,
+};
+
+enum i2400m_ro {
+ I2400M_RO_NEEDED = 0x01,
+ I2400M_RO_TYPE = 0x03,
+ I2400M_RO_TYPE_SHIFT = 1,
+ I2400M_RO_CIN = 0x0f,
+ I2400M_RO_CIN_SHIFT = 4,
+ I2400M_RO_FBN = 0x07ff,
+ I2400M_RO_FBN_SHIFT = 8,
+ I2400M_RO_SN = 0x07ff,
+ I2400M_RO_SN_SHIFT = 21,
+};
+
+enum i2400m_ro_type {
+ I2400M_RO_TYPE_RESET = 0,
+ I2400M_RO_TYPE_PACKET,
+ I2400M_RO_TYPE_WS,
+ I2400M_RO_TYPE_PACKET_WS,
+};
+
+
+/* Misc constants */
+enum {
+ I2400M_PL_ALIGN = 16, /* Payload data size alignment */
+ I2400M_PL_SIZE_MAX = 0x3EFF,
+ I2400M_MAX_PLS_IN_MSG = 60,
+ /* protocol barkers: sync sequences; for notifications they
+ * are sent in groups of four. */
+ I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
+ I2400M_COLD_RESET_BARKER = 0xc01dc01d,
+ I2400M_WARM_RESET_BARKER = 0x50f750f7,
+ I2400M_NBOOT_BARKER = 0xdeadbeef,
+ I2400M_SBOOT_BARKER = 0x0ff1c1a1,
+ I2400M_SBOOT_BARKER_6050 = 0x80000001,
+ I2400M_ACK_BARKER = 0xfeedbabe,
+ I2400M_D2H_MSG_BARKER = 0xbeefbabe,
+};
+
+
+/*
+ * Hardware payload descriptor
+ *
+ * Bitfields encoded in a struct to enforce typing semantics.
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_pld {
+ __le32 val;
+} __attribute__ ((packed));
+
+#define I2400M_PLD_SIZE_MASK 0x00003fff
+#define I2400M_PLD_TYPE_SHIFT 16
+#define I2400M_PLD_TYPE_MASK 0x000f0000
+
+/*
+ * Header for a TX message or RX message
+ *
+ * @barker: preamble
+ * @size: used for management of the FIFO queue buffer; before
+ * sending, this is converted to be a real preamble. This
+ * indicates the real size of the TX message that starts at this
+ * point. If the highest bit is set, then this message is to be
+ * skipped.
+ * @sequence: sequence number of this message
+ * @offset: offset where the message itself starts -- see the comments
+ * in the file header about message header and payload descriptor
+ * alignment.
+ * @num_pls: number of payloads in this message
+ * @padding: amount of padding bytes at the end of the message to make
+ * it be of block-size aligned
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_msg_hdr {
+ union {
+ __le32 barker;
+ __u32 size; /* same size type as barker!! */
+ };
+ union {
+ __le32 sequence;
+ __u32 offset; /* same size type as barker!! */
+ };
+ __le16 num_pls;
+ __le16 rsv1;
+ __le16 padding;
+ __le16 rsv2;
+ struct i2400m_pld pld[0];
+} __attribute__ ((packed));
+
+
+
+/*
+ * L3/L4 control protocol
+ */
+
+enum {
+ /* Interface version */
+ I2400M_L3L4_VERSION = 0x0100,
+};
+
+/* Message types */
+enum i2400m_mt {
+ I2400M_MT_RESERVED = 0x0000,
+ I2400M_MT_INVALID = 0xffff,
+ I2400M_MT_REPORT_MASK = 0x8000,
+
+ I2400M_MT_GET_SCAN_RESULT = 0x4202,
+ I2400M_MT_SET_SCAN_PARAM = 0x4402,
+ I2400M_MT_CMD_RF_CONTROL = 0x4602,
+ I2400M_MT_CMD_SCAN = 0x4603,
+ I2400M_MT_CMD_CONNECT = 0x4604,
+ I2400M_MT_CMD_DISCONNECT = 0x4605,
+ I2400M_MT_CMD_EXIT_IDLE = 0x4606,
+ I2400M_MT_GET_LM_VERSION = 0x5201,
+ I2400M_MT_GET_DEVICE_INFO = 0x5202,
+ I2400M_MT_GET_LINK_STATUS = 0x5203,
+ I2400M_MT_GET_STATISTICS = 0x5204,
+ I2400M_MT_GET_STATE = 0x5205,
+ I2400M_MT_GET_MEDIA_STATUS = 0x5206,
+ I2400M_MT_SET_INIT_CONFIG = 0x5404,
+ I2400M_MT_CMD_INIT = 0x5601,
+ I2400M_MT_CMD_TERMINATE = 0x5602,
+ I2400M_MT_CMD_MODE_OF_OP = 0x5603,
+ I2400M_MT_CMD_RESET_DEVICE = 0x5604,
+ I2400M_MT_CMD_MONITOR_CONTROL = 0x5605,
+ I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606,
+ I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
+ I2400M_MT_SET_EAP_SUCCESS = 0x6402,
+ I2400M_MT_SET_EAP_FAIL = 0x6403,
+ I2400M_MT_SET_EAP_KEY = 0x6404,
+ I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
+ I2400M_MT_REPORT_SCAN_RESULT = 0xc002,
+ I2400M_MT_REPORT_STATE = 0xd002,
+ I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
+ I2400M_MT_REPORT_EAP_REQUEST = 0xe002,
+ I2400M_MT_REPORT_EAP_RESTART = 0xe003,
+ I2400M_MT_REPORT_ALT_ACCEPT = 0xe004,
+ I2400M_MT_REPORT_KEY_REQUEST = 0xe005,
+};
+
+
+/*
+ * Message Ack Status codes
+ *
+ * When a message is replied-to, this status is reported.
+ */
+enum i2400m_ms {
+ I2400M_MS_DONE_OK = 0,
+ I2400M_MS_DONE_IN_PROGRESS = 1,
+ I2400M_MS_INVALID_OP = 2,
+ I2400M_MS_BAD_STATE = 3,
+ I2400M_MS_ILLEGAL_VALUE = 4,
+ I2400M_MS_MISSING_PARAMS = 5,
+ I2400M_MS_VERSION_ERROR = 6,
+ I2400M_MS_ACCESSIBILITY_ERROR = 7,
+ I2400M_MS_BUSY = 8,
+ I2400M_MS_CORRUPTED_TLV = 9,
+ I2400M_MS_UNINITIALIZED = 10,
+ I2400M_MS_UNKNOWN_ERROR = 11,
+ I2400M_MS_PRODUCTION_ERROR = 12,
+ I2400M_MS_NO_RF = 13,
+ I2400M_MS_NOT_READY_FOR_POWERSAVE = 14,
+ I2400M_MS_THERMAL_CRITICAL = 15,
+ I2400M_MS_MAX
+};
+
+
+/**
+ * i2400m_tlv - enumeration of the different types of TLVs
+ *
+ * TLVs stand for type-length-value and are the header for a payload
+ * composed of almost anything. Each payload has a type assigned
+ * and a length.
+ */
+enum i2400m_tlv {
+ I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
+ I2400M_TLV_SYSTEM_STATE = 141,
+ I2400M_TLV_MEDIA_STATUS = 161,
+ I2400M_TLV_RF_OPERATION = 162,
+ I2400M_TLV_RF_STATUS = 163,
+ I2400M_TLV_DEVICE_RESET_TYPE = 132,
+ I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
+ I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
+ I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
+ I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
+};
+
+
+struct i2400m_tlv_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __u8 pl[0];
+} __attribute__((packed));
+
+
+struct i2400m_l3l4_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __le16 version;
+ __le16 resv1;
+ __le16 status;
+ __le16 resv2;
+ struct i2400m_tlv_hdr pl[0];
+} __attribute__((packed));
+
+
+/**
+ * i2400m_system_state - different states of the device
+ */
+enum i2400m_system_state {
+ I2400M_SS_UNINITIALIZED = 1,
+ I2400M_SS_INIT,
+ I2400M_SS_READY,
+ I2400M_SS_SCAN,
+ I2400M_SS_STANDBY,
+ I2400M_SS_CONNECTING,
+ I2400M_SS_WIMAX_CONNECTED,
+ I2400M_SS_DATA_PATH_CONNECTED,
+ I2400M_SS_IDLE,
+ I2400M_SS_DISCONNECTING,
+ I2400M_SS_OUT_OF_ZONE,
+ I2400M_SS_SLEEPACTIVE,
+ I2400M_SS_PRODUCTION,
+ I2400M_SS_CONFIG,
+ I2400M_SS_RF_OFF,
+ I2400M_SS_RF_SHUTDOWN,
+ I2400M_SS_DEVICE_DISCONNECT,
+ I2400M_SS_MAX,
+};
+
+
+/**
+ * i2400m_tlv_system_state - report on the state of the system
+ *
+ * @state: see enum i2400m_system_state
+ */
+struct i2400m_tlv_system_state {
+ struct i2400m_tlv_hdr hdr;
+ __le32 state;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_l4_message_versions {
+ struct i2400m_tlv_hdr hdr;
+ __le16 major;
+ __le16 minor;
+ __le16 branch;
+ __le16 reserved;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_detailed_device_info {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reserved1[400];
+ __u8 mac_address[ETH_ALEN];
+ __u8 reserved2[2];
+} __attribute__((packed));
+
+
+enum i2400m_rf_switch_status {
+ I2400M_RF_SWITCH_ON = 1,
+ I2400M_RF_SWITCH_OFF = 2,
+};
+
+struct i2400m_tlv_rf_switches_status {
+ struct i2400m_tlv_hdr hdr;
+ __u8 sw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 hw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 reserved[2];
+} __attribute__((packed));
+
+
+enum {
+ i2400m_rf_operation_on = 1,
+ i2400m_rf_operation_off = 2
+};
+
+struct i2400m_tlv_rf_operation {
+ struct i2400m_tlv_hdr hdr;
+ __le32 status; /* 1 ON, 2 OFF */
+} __attribute__((packed));
+
+
+enum i2400m_tlv_reset_type {
+ I2400M_RESET_TYPE_COLD = 1,
+ I2400M_RESET_TYPE_WARM
+};
+
+struct i2400m_tlv_device_reset_type {
+ struct i2400m_tlv_hdr hdr;
+ __le32 reset_type;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_config_idle_parameters {
+ struct i2400m_tlv_hdr hdr;
+ __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+ __le32 idle_paging_interval; /* frames */
+} __attribute__((packed));
+
+
+enum i2400m_media_status {
+ I2400M_MEDIA_STATUS_LINK_UP = 1,
+ I2400M_MEDIA_STATUS_LINK_DOWN,
+ I2400M_MEDIA_STATUS_LINK_RENEW,
+};
+
+struct i2400m_tlv_media_status {
+ struct i2400m_tlv_hdr hdr;
+ __le32 media_status;
+} __attribute__((packed));
+
+
+/* New in v1.4 */
+struct i2400m_tlv_config_idle_timeout {
+ struct i2400m_tlv_hdr hdr;
+ __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+} __attribute__((packed));
+
+/* New in v1.4 -- for backward compat, will be removed */
+struct i2400m_tlv_config_d2h_data_format {
+ struct i2400m_tlv_hdr hdr;
+ __u8 format; /* 0 old format, 1 enhanced */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+/* New in v1.4 */
+struct i2400m_tlv_config_dl_host_reorder {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reorder; /* 0 disabled, 1 enabled */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+
+#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/drivers/staging/wimax/i2400m/netdev.c b/drivers/staging/wimax/i2400m/netdev.c
new file mode 100644
index 000000000000..a7fcbceb6e6b
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/netdev.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Glue with the networking stack
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements an ethernet device for the i2400m.
+ *
+ * We fake being an ethernet device to simplify the support from user
+ * space and from the other side. The world is (sadly) configured to
+ * take in only Ethernet devices...
+ *
+ * Because of this, when using firmwares <= v1.3, there is an
+ * copy-each-rxed-packet overhead on the RX path. Each IP packet has
+ * to be reallocated to add an ethernet header (as there is no space
+ * in what we get from the device). This is a known drawback and
+ * firmwares >= 1.4 add header space that can be used to insert the
+ * ethernet header without having to reallocate and copy.
+ *
+ * TX error handling is tricky; because we have to FIFO/queue the
+ * buffers for transmission (as the hardware likes it aggregated), we
+ * just give the skb to the TX subsystem and by the time it is
+ * transmitted, we have long forgotten about it. So we just don't care
+ * too much about it.
+ *
+ * Note that when the device is in idle mode with the basestation, we
+ * need to negotiate coming back up online. That involves negotiation
+ * and possible user space interaction. Thus, we defer to a workqueue
+ * to do all that. By default, we only queue a single packet and drop
+ * the rest, as potentially the time to go back from idle to normal is
+ * long.
+ *
+ * ROADMAP
+ *
+ * i2400m_open Called on ifconfig up
+ * i2400m_stop Called on ifconfig down
+ *
+ * i2400m_hard_start_xmit Called by the network stack to send a packet
+ * i2400m_net_wake_tx Wake up device from basestation-IDLE & TX
+ * i2400m_wake_tx_work
+ * i2400m_cmd_exit_idle
+ * i2400m_tx
+ * i2400m_net_tx TX a data frame
+ * i2400m_tx
+ *
+ * i2400m_change_mtu Called on ifconfig mtu XXX
+ *
+ * i2400m_tx_timeout Called when the device times out
+ *
+ * i2400m_net_rx Called by the RX code when a data frame is
+ * available (firmware <= 1.3)
+ * i2400m_net_erx Called by the RX code when a data frame is
+ * available (firmware >= 1.4).
+ * i2400m_netdev_setup Called to setup all the netdev stuff from
+ * alloc_netdev.
+ */
+#include <linux/if_arp.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/export.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE netdev
+#include "debug-levels.h"
+
+enum {
+/* netdev interface */
+ /* 20 secs? yep, this is the maximum timeout that the device
+ * might take to get out of IDLE / negotiate it with the base
+ * station. We add 1sec for good measure. */
+ I2400M_TX_TIMEOUT = 21 * HZ,
+ /*
+ * Experimentation has determined that, 20 to be a good value
+ * for minimizing the jitter in the throughput.
+ */
+ I2400M_TX_QLEN = 20,
+};
+
+
+static
+int i2400m_open(struct net_device *net_dev)
+{
+ int result;
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+ /* Make sure we wait until init is complete... */
+ mutex_lock(&i2400m->init_mutex);
+ if (i2400m->updown)
+ result = 0;
+ else
+ result = -EBUSY;
+ mutex_unlock(&i2400m->init_mutex);
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
+ net_dev, i2400m, result);
+ return result;
+}
+
+
+static
+int i2400m_stop(struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
+ i2400m_net_wake_stop(i2400m);
+ d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
+ return 0;
+}
+
+
+/*
+ * Wake up the device and transmit a held SKB, then restart the net queue
+ *
+ * When the device goes into basestation-idle mode, we need to tell it
+ * to exit that mode; it will negotiate with the base station, user
+ * space may have to intervene to rehandshake crypto and then tell us
+ * when it is ready to transmit the packet we have "queued". Still we
+ * need to give it sometime after it reports being ok.
+ *
+ * On error, there is not much we can do. If the error was on TX, we
+ * still wake the queue up to see if the next packet will be luckier.
+ *
+ * If _cmd_exit_idle() fails...well, it could be many things; most
+ * commonly it is that something else took the device out of IDLE mode
+ * (for example, the base station). In that case we get an -EILSEQ and
+ * we are just going to ignore that one. If the device is back to
+ * connected, then fine -- if it is someother state, the packet will
+ * be dropped anyway.
+ */
+void i2400m_wake_tx_work(struct work_struct *ws)
+{
+ int result;
+ struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ skb = i2400m->wake_tx_skb;
+ i2400m->wake_tx_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
+ result = -EINVAL;
+ if (skb == NULL) {
+ dev_err(dev, "WAKE&TX: skb disappeared!\n");
+ goto out_put;
+ }
+ /* If we have, somehow, lost the connection after this was
+ * queued, don't do anything; this might be the device got
+ * reset or just disconnected. */
+ if (unlikely(!netif_carrier_ok(net_dev)))
+ goto out_kfree;
+ result = i2400m_cmd_exit_idle(i2400m);
+ if (result == -EILSEQ)
+ result = 0;
+ if (result < 0) {
+ dev_err(dev, "WAKE&TX: device didn't get out of idle: "
+ "%d - resetting\n", result);
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+ goto error;
+ }
+ result = wait_event_timeout(i2400m->state_wq,
+ i2400m->state != I2400M_SS_IDLE,
+ net_dev->watchdog_timeo - HZ/2);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ if (result < 0) {
+ dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
+ "%d - resetting\n", result);
+ i2400m_reset(i2400m, I2400M_RT_BUS);
+ goto error;
+ }
+ msleep(20); /* device still needs some time or it drops it */
+ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+error:
+ netif_wake_queue(net_dev);
+out_kfree:
+ kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
+out_put:
+ i2400m_put(i2400m);
+ d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n",
+ ws, i2400m, skb, result);
+}
+
+
+/*
+ * Prepare the data payload TX header
+ *
+ * The i2400m expects a 4 byte header in front of a data packet.
+ *
+ * Because we pretend to be an ethernet device, this packet comes with
+ * an ethernet header. Pull it and push our header.
+ */
+static
+void i2400m_tx_prep_header(struct sk_buff *skb)
+{
+ struct i2400m_pl_data_hdr *pl_hdr;
+ skb_pull(skb, ETH_HLEN);
+ pl_hdr = skb_push(skb, sizeof(*pl_hdr));
+ pl_hdr->reserved = 0;
+}
+
+
+
+/*
+ * Cleanup resources acquired during i2400m_net_wake_tx()
+ *
+ * This is called by __i2400m_dev_stop and means we have to make sure
+ * the workqueue is flushed from any pending work.
+ */
+void i2400m_net_wake_stop(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *wake_tx_skb;
+ unsigned long flags;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ /*
+ * See i2400m_hard_start_xmit(), references are taken there and
+ * here we release them if the packet was still pending.
+ */
+ cancel_work_sync(&i2400m->wake_tx_ws);
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ wake_tx_skb = i2400m->wake_tx_skb;
+ i2400m->wake_tx_skb = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ if (wake_tx_skb) {
+ i2400m_put(i2400m);
+ kfree_skb(wake_tx_skb);
+ }
+
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * TX an skb to an idle device
+ *
+ * When the device is in basestation-idle mode, we need to wake it up
+ * and then TX. So we queue a work_struct for doing so.
+ *
+ * We need to get an extra ref for the skb (so it is not dropped), as
+ * well as be careful not to queue more than one request (won't help
+ * at all). If more than one request comes or there are errors, we
+ * just drop the packets (see i2400m_hard_start_xmit()).
+ */
+static
+int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
+ struct sk_buff *skb)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned long flags;
+
+ d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+ if (net_ratelimit()) {
+ d_printf(3, dev, "WAKE&NETTX: "
+ "skb %p sending %d bytes to radio\n",
+ skb, skb->len);
+ d_dump(4, dev, skb->data, skb->len);
+ }
+ /* We hold a ref count for i2400m and skb, so when
+ * stopping() the device, we need to cancel that work
+ * and if pending, release those resources. */
+ result = 0;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (!i2400m->wake_tx_skb) {
+ netif_stop_queue(net_dev);
+ i2400m_get(i2400m);
+ i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */
+ i2400m_tx_prep_header(skb);
+ result = schedule_work(&i2400m->wake_tx_ws);
+ WARN_ON(result == 0);
+ }
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ if (result == 0) {
+ /* Yes, this happens even if we stopped the
+ * queue -- blame the queue disciplines that
+ * queue without looking -- I guess there is a reason
+ * for that. */
+ if (net_ratelimit())
+ d_printf(1, dev, "NETTX: device exiting idle, "
+ "dropping skb %p, queue running %d\n",
+ skb, netif_queue_stopped(net_dev));
+ result = -EBUSY;
+ }
+ d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+ return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack.
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * We need to pull the ethernet header and add the hardware header,
+ * which is currently set to all zeroes and reserved.
+ */
+static
+int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
+ struct sk_buff *skb)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
+ i2400m, net_dev, skb);
+ /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
+ netif_trans_update(net_dev);
+ i2400m_tx_prep_header(skb);
+ d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
+ skb, skb->len);
+ d_dump(4, dev, skb->data, skb->len);
+ result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
+ d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
+ i2400m, net_dev, skb, result);
+ return result;
+}
+
+
+/*
+ * Transmit a packet to the base station on behalf of the network stack
+ *
+ *
+ * Returns: NETDEV_TX_OK (always, even in case of error)
+ *
+ * In case of error, we just drop it. Reasons:
+ *
+ * - we add a hw header to each skb, and if the network stack
+ * retries, we have no way to know if that skb has it or not.
+ *
+ * - network protocols have their own drop-recovery mechanisms
+ *
+ * - there is not much else we can do
+ *
+ * If the device is idle, we need to wake it up; that is an operation
+ * that will sleep. See i2400m_net_wake_tx() for details.
+ */
+static
+netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ int result = -1;
+
+ d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
+
+ if (skb_cow_head(skb, 0))
+ goto drop;
+
+ if (i2400m->state == I2400M_SS_IDLE)
+ result = i2400m_net_wake_tx(i2400m, net_dev, skb);
+ else
+ result = i2400m_net_tx(i2400m, net_dev, skb);
+ if (result < 0) {
+drop:
+ net_dev->stats.tx_dropped++;
+ } else {
+ net_dev->stats.tx_packets++;
+ net_dev->stats.tx_bytes += skb->len;
+ }
+ dev_kfree_skb(skb);
+ d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
+ return NETDEV_TX_OK;
+}
+
+
+static
+void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
+{
+ /*
+ * We might want to kick the device
+ *
+ * There is not much we can do though, as the device requires
+ * that we send the data aggregated. By the time we receive
+ * this, there might be data pending to be sent or not...
+ */
+ net_dev->stats.tx_errors++;
+}
+
+
+/*
+ * Create a fake ethernet header
+ *
+ * For emulating an ethernet device, every received IP header has to
+ * be prefixed with an ethernet header. Fake it with the given
+ * protocol.
+ */
+static
+void i2400m_rx_fake_eth_header(struct net_device *net_dev,
+ void *_eth_hdr, __be16 protocol)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct ethhdr *eth_hdr = _eth_hdr;
+
+ memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest));
+ memcpy(eth_hdr->h_source, i2400m->src_mac_addr,
+ sizeof(eth_hdr->h_source));
+ eth_hdr->h_proto = protocol;
+}
+
+
+/*
+ * i2400m_net_rx - pass a network packet to the stack
+ *
+ * @i2400m: device instance
+ * @skb_rx: the skb where the buffer pointed to by @buf is
+ * @i: 1 if payload is the only one
+ * @buf: pointer to the buffer containing the data
+ * @len: buffer's length
+ *
+ * This is only used now for the v1.3 firmware. It will be deprecated
+ * in >= 2.6.31.
+ *
+ * Note that due to firmware limitations, we don't have space to add
+ * an ethernet header, so we need to copy each packet. Firmware
+ * versions >= v1.4 fix this [see i2400m_net_erx()].
+ *
+ * We just clone the skb and set it up so that it's skb->data pointer
+ * points to "buf" and it's length.
+ *
+ * Note that if the payload is the last (or the only one) in a
+ * multi-payload message, we don't clone the SKB but just reuse it.
+ *
+ * This function is normally run from a thread context. However, we
+ * still use netif_rx() instead of netif_receive_skb() as was
+ * recommended in the mailing list. Reason is in some stress tests
+ * when sending/receiving a lot of data we seem to hit a softlock in
+ * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
+ * netif_rx() took care of the issue.
+ *
+ * This is, of course, still open to do more research on why running
+ * with netif_receive_skb() hits this softlock. FIXME.
+ *
+ * FIXME: currently we don't do any efforts at distinguishing if what
+ * we got was an IPv4 or IPv6 header, to setup the protocol field
+ * correctly.
+ */
+void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ unsigned i, const void *buf, int buf_len)
+{
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb;
+
+ d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n",
+ i2400m, buf, buf_len);
+ if (i) {
+ skb = skb_get(skb_rx);
+ d_printf(2, dev, "RX: reusing first payload skb %p\n", skb);
+ skb_pull(skb, buf - (void *) skb->data);
+ skb_trim(skb, (void *) skb_end_pointer(skb) - buf);
+ } else {
+ /* Yes, this is bad -- a lot of overhead -- see
+ * comments at the top of the file */
+ skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL);
+ if (skb == NULL) {
+ dev_err(dev, "NETRX: no memory to realloc skb\n");
+ net_dev->stats.rx_dropped++;
+ goto error_skb_realloc;
+ }
+ skb_put_data(skb, buf, buf_len);
+ }
+ i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
+ skb->data - ETH_HLEN,
+ cpu_to_be16(ETH_P_IP));
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb->dev = i2400m->wimax_dev.net_dev;
+ skb->protocol = htons(ETH_P_IP);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += buf_len;
+ d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n",
+ buf_len);
+ d_dump(4, dev, buf, buf_len);
+ netif_rx_ni(skb); /* see notes in function header */
+error_skb_realloc:
+ d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n",
+ i2400m, buf, buf_len);
+}
+
+
+/*
+ * i2400m_net_erx - pass a network packet to the stack (extended version)
+ *
+ * @i2400m: device descriptor
+ * @skb: the skb where the packet is - the skb should be set to point
+ * at the IP packet; this function will add ethernet headers if
+ * needed.
+ * @cs: packet type
+ *
+ * This is only used now for firmware >= v1.4. Note it is quite
+ * similar to i2400m_net_rx() (used only for v1.3 firmware).
+ *
+ * This function is normally run from a thread context. However, we
+ * still use netif_rx() instead of netif_receive_skb() as was
+ * recommended in the mailing list. Reason is in some stress tests
+ * when sending/receiving a lot of data we seem to hit a softlock in
+ * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using
+ * netif_rx() took care of the issue.
+ *
+ * This is, of course, still open to do more research on why running
+ * with netif_receive_skb() hits this softlock. FIXME.
+ */
+void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb,
+ enum i2400m_cs cs)
+{
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n",
+ i2400m, skb, skb->len, cs);
+ switch(cs) {
+ case I2400M_CS_IPV4_0:
+ case I2400M_CS_IPV4:
+ i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev,
+ skb->data - ETH_HLEN,
+ cpu_to_be16(ETH_P_IP));
+ skb_set_mac_header(skb, -ETH_HLEN);
+ skb->dev = i2400m->wimax_dev.net_dev;
+ skb->protocol = htons(ETH_P_IP);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += skb->len;
+ break;
+ default:
+ dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs);
+ goto error;
+
+ }
+ d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n",
+ skb->len);
+ d_dump(4, dev, skb->data, skb->len);
+ netif_rx_ni(skb); /* see notes in function header */
+error:
+ d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n",
+ i2400m, skb, skb->len, cs);
+}
+
+static const struct net_device_ops i2400m_netdev_ops = {
+ .ndo_open = i2400m_open,
+ .ndo_stop = i2400m_stop,
+ .ndo_start_xmit = i2400m_hard_start_xmit,
+ .ndo_tx_timeout = i2400m_tx_timeout,
+};
+
+static void i2400m_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
+ if (net_dev->dev.parent)
+ strlcpy(info->bus_info, dev_name(net_dev->dev.parent),
+ sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops i2400m_ethtool_ops = {
+ .get_drvinfo = i2400m_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/**
+ * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
+ *
+ * Called by alloc_netdev()
+ */
+void i2400m_netdev_setup(struct net_device *net_dev)
+{
+ d_fnstart(3, NULL, "(net_dev %p)\n", net_dev);
+ ether_setup(net_dev);
+ net_dev->mtu = I2400M_MAX_MTU;
+ net_dev->min_mtu = 0;
+ net_dev->max_mtu = I2400M_MAX_MTU;
+ net_dev->tx_queue_len = I2400M_TX_QLEN;
+ net_dev->features =
+ NETIF_F_VLAN_CHALLENGED
+ | NETIF_F_HIGHDMA;
+ net_dev->flags =
+ IFF_NOARP /* i2400m is apure IP device */
+ & (~IFF_BROADCAST /* i2400m is P2P */
+ & ~IFF_MULTICAST);
+ net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
+ net_dev->netdev_ops = &i2400m_netdev_ops;
+ net_dev->ethtool_ops = &i2400m_ethtool_ops;
+ d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
+}
+EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
+
diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
new file mode 100644
index 000000000000..fbddf2e18c14
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/op-rfkill.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Implement backend for the WiMAX stack rfkill support
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * The WiMAX kernel stack integrates into RF-Kill and keeps the
+ * switches's status. We just need to:
+ *
+ * - report changes in the HW RF Kill switch [with
+ * wimax_rfkill_{sw,hw}_report(), which happens when we detect those
+ * indications coming through hardware reports]. We also do it on
+ * initialization to let the stack know the initial HW state.
+ *
+ * - implement indications from the stack to change the SW RF Kill
+ * switch (coming from sysfs, the wimax stack or user space).
+ */
+#include "i2400m.h"
+#include "linux-wimax-i2400m.h"
+#include <linux/slab.h>
+
+
+
+#define D_SUBMODULE rfkill
+#include "debug-levels.h"
+
+/*
+ * Return true if the i2400m radio is in the requested wimax_rf_state state
+ *
+ */
+static
+int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state)
+{
+ if (state == WIMAX_RF_OFF)
+ return i2400m->state == I2400M_SS_RF_OFF
+ || i2400m->state == I2400M_SS_RF_SHUTDOWN;
+ else if (state == WIMAX_RF_ON)
+ /* state == WIMAX_RF_ON */
+ return i2400m->state != I2400M_SS_RF_OFF
+ && i2400m->state != I2400M_SS_RF_SHUTDOWN;
+ else {
+ BUG();
+ return -EINVAL; /* shut gcc warnings on certain arches */
+ }
+}
+
+
+/*
+ * WiMAX stack operation: implement SW RFKill toggling
+ *
+ * @wimax_dev: device descriptor
+ * @skb: skb where the message has been received; skb->data is
+ * expected to point to the message payload.
+ * @genl_info: passed by the generic netlink layer
+ *
+ * Generic Netlink will call this function when a message is sent from
+ * userspace to change the software RF-Kill switch status.
+ *
+ * This function will set the device's software RF-Kill switch state to
+ * match what is requested.
+ *
+ * NOTE: the i2400m has a strict state machine; we can only set the
+ * RF-Kill switch when it is on, the HW RF-Kill is on and the
+ * device is initialized. So we ignore errors steaming from not
+ * being in the right state (-EILSEQ).
+ */
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev);
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *ack_skb;
+ struct {
+ struct i2400m_l3l4_hdr hdr;
+ struct i2400m_tlv_rf_operation sw_rf;
+ } __packed *cmd;
+ char strerr[32];
+
+ d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state);
+
+ result = -ENOMEM;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ goto error_alloc;
+ cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
+ cmd->hdr.length = sizeof(cmd->sw_rf);
+ cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
+ cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
+ cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
+ switch (state) {
+ case WIMAX_RF_OFF: /* RFKILL ON, radio OFF */
+ cmd->sw_rf.status = cpu_to_le32(2);
+ break;
+ case WIMAX_RF_ON: /* RFKILL OFF, radio ON */
+ cmd->sw_rf.status = cpu_to_le32(1);
+ break;
+ default:
+ BUG();
+ }
+
+ ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd));
+ result = PTR_ERR(ack_skb);
+ if (IS_ERR(ack_skb)) {
+ dev_err(dev, "Failed to issue 'RF Control' command: %d\n",
+ result);
+ goto error_msg_to_dev;
+ }
+ result = i2400m_msg_check_status(wimax_msg_data(ack_skb),
+ strerr, sizeof(strerr));
+ if (result < 0) {
+ dev_err(dev, "'RF Control' (0x%04x) command failed: %d - %s\n",
+ I2400M_MT_CMD_RF_CONTROL, result, strerr);
+ goto error_cmd;
+ }
+
+ /* Now we wait for the state to change to RADIO_OFF or RADIO_ON */
+ result = wait_event_timeout(
+ i2400m->state_wq, i2400m_radio_is(i2400m, state),
+ 5 * HZ);
+ if (result == 0)
+ result = -ETIMEDOUT;
+ if (result < 0)
+ dev_err(dev, "Error waiting for device to toggle RF state: "
+ "%d\n", result);
+ result = 0;
+error_cmd:
+ kfree_skb(ack_skb);
+error_msg_to_dev:
+error_alloc:
+ d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
+ wimax_dev, state, result);
+ kfree(cmd);
+ return result;
+}
+
+
+/*
+ * Inform the WiMAX stack of changes in the RF Kill switches reported
+ * by the device
+ *
+ * @i2400m: device descriptor
+ * @rfss: TLV for RF Switches status; already validated
+ *
+ * NOTE: the reports on RF switch status cannot be trusted
+ * or used until the device is in a state of RADIO_OFF
+ * or greater.
+ */
+void i2400m_report_tlv_rf_switches_status(
+ struct i2400m *i2400m,
+ const struct i2400m_tlv_rf_switches_status *rfss)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ enum i2400m_rf_switch_status hw, sw;
+ enum wimax_st wimax_state;
+
+ sw = le32_to_cpu(rfss->sw_rf_switch);
+ hw = le32_to_cpu(rfss->hw_rf_switch);
+
+ d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n",
+ i2400m, rfss, hw, sw);
+ /* We only process rw switch evens when the device has been
+ * fully initialized */
+ wimax_state = wimax_state_get(&i2400m->wimax_dev);
+ if (wimax_state < WIMAX_ST_RADIO_OFF) {
+ d_printf(3, dev, "ignoring RF switches report, state %u\n",
+ wimax_state);
+ goto out;
+ }
+ switch (sw) {
+ case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */
+ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON);
+ break;
+ case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */
+ wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+ break;
+ default:
+ dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw);
+ }
+
+ switch (hw) {
+ case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */
+ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON);
+ break;
+ case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */
+ wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF);
+ break;
+ default:
+ dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw);
+ }
+out:
+ d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n",
+ i2400m, rfss, hw, sw);
+}
diff --git a/drivers/staging/wimax/i2400m/rx.c b/drivers/staging/wimax/i2400m/rx.c
new file mode 100644
index 000000000000..c9fb619a9e01
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/rx.c
@@ -0,0 +1,1395 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Handle incoming traffic and deliver it to the control or data planes
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Use skb_clone(), break up processing in chunks
+ * - Split transport/device specific
+ * - Make buffer size dynamic to exert less memory pressure
+ * - RX reorder support
+ *
+ * This handles the RX path.
+ *
+ * We receive an RX message from the bus-specific driver, which
+ * contains one or more payloads that have potentially different
+ * destinataries (data or control paths).
+ *
+ * So we just take that payload from the transport specific code in
+ * the form of an skb, break it up in chunks (a cloned skb each in the
+ * case of network packets) and pass it to netdev or to the
+ * command/ack handler (and from there to the WiMAX stack).
+ *
+ * PROTOCOL FORMAT
+ *
+ * The format of the buffer is:
+ *
+ * HEADER (struct i2400m_msg_hdr)
+ * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld)
+ * PAYLOAD DESCRIPTOR 1
+ * ...
+ * PAYLOAD DESCRIPTOR N
+ * PAYLOAD 0 (raw bytes)
+ * PAYLOAD 1
+ * ...
+ * PAYLOAD N
+ *
+ * See tx.c for a deeper description on alignment requirements and
+ * other fun facts of it.
+ *
+ * DATA PACKETS
+ *
+ * In firmwares <= v1.3, data packets have no header for RX, but they
+ * do for TX (currently unused).
+ *
+ * In firmware >= 1.4, RX packets have an extended header (16
+ * bytes). This header conveys information for management of host
+ * reordering of packets (the device offloads storage of the packets
+ * for reordering to the host). Read below for more information.
+ *
+ * The header is used as dummy space to emulate an ethernet header and
+ * thus be able to act as an ethernet device without having to reallocate.
+ *
+ * DATA RX REORDERING
+ *
+ * Starting in firmware v1.4, the device can deliver packets for
+ * delivery with special reordering information; this allows it to
+ * more effectively do packet management when some frames were lost in
+ * the radio traffic.
+ *
+ * Thus, for RX packets that come out of order, the device gives the
+ * driver enough information to queue them properly and then at some
+ * point, the signal to deliver the whole (or part) of the queued
+ * packets to the networking stack. There are 16 such queues.
+ *
+ * This only happens when a packet comes in with the "need reorder"
+ * flag set in the RX header. When such bit is set, the following
+ * operations might be indicated:
+ *
+ * - reset queue: send all queued packets to the OS
+ *
+ * - queue: queue a packet
+ *
+ * - update ws: update the queue's window start and deliver queued
+ * packets that meet the criteria
+ *
+ * - queue & update ws: queue a packet, update the window start and
+ * deliver queued packets that meet the criteria
+ *
+ * (delivery criteria: the packet's [normalized] sequence number is
+ * lower than the new [normalized] window start).
+ *
+ * See the i2400m_roq_*() functions for details.
+ *
+ * ROADMAP
+ *
+ * i2400m_rx
+ * i2400m_rx_msg_hdr_check
+ * i2400m_rx_pl_descr_check
+ * i2400m_rx_payload
+ * i2400m_net_rx
+ * i2400m_rx_edata
+ * i2400m_net_erx
+ * i2400m_roq_reset
+ * i2400m_net_erx
+ * i2400m_roq_queue
+ * __i2400m_roq_queue
+ * i2400m_roq_update_ws
+ * __i2400m_roq_update_ws
+ * i2400m_net_erx
+ * i2400m_roq_queue_update_ws
+ * __i2400m_roq_queue
+ * __i2400m_roq_update_ws
+ * i2400m_net_erx
+ * i2400m_rx_ctl
+ * i2400m_msg_size_check
+ * i2400m_report_hook_work [in a workqueue]
+ * i2400m_report_hook
+ * wimax_msg_to_user
+ * i2400m_rx_ctl_ack
+ * wimax_msg_to_user_alloc
+ * i2400m_rx_trace
+ * i2400m_msg_size_check
+ * wimax_msg
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE rx
+#include "debug-levels.h"
+
+static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
+module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
+MODULE_PARM_DESC(rx_reorder_disabled,
+ "If true, RX reordering will be disabled.");
+
+struct i2400m_report_hook_args {
+ struct sk_buff *skb_rx;
+ const struct i2400m_l3l4_hdr *l3l4_hdr;
+ size_t size;
+ struct list_head list_node;
+};
+
+
+/*
+ * Execute i2400m_report_hook in a workqueue
+ *
+ * Goes over the list of queued reports in i2400m->rx_reports and
+ * processes them.
+ *
+ * NOTE: refcounts on i2400m are not needed because we flush the
+ * workqueue this runs on (i2400m->work_queue) before destroying
+ * i2400m.
+ */
+void i2400m_report_hook_work(struct work_struct *ws)
+{
+ struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_report_hook_args *args, *args_next;
+ LIST_HEAD(list);
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ list_splice_init(&i2400m->rx_reports, &list);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ if (list_empty(&list))
+ break;
+ else
+ d_printf(1, dev, "processing queued reports\n");
+ list_for_each_entry_safe(args, args_next, &list, list_node) {
+ d_printf(2, dev, "processing queued report %p\n", args);
+ i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
+ kfree_skb(args->skb_rx);
+ list_del(&args->list_node);
+ kfree(args);
+ }
+ }
+}
+
+
+/*
+ * Flush the list of queued reports
+ */
+static
+void i2400m_report_hook_flush(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_report_hook_args *args, *args_next;
+ LIST_HEAD(list);
+ unsigned long flags;
+
+ d_printf(1, dev, "flushing queued reports\n");
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ list_splice_init(&i2400m->rx_reports, &list);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ list_for_each_entry_safe(args, args_next, &list, list_node) {
+ d_printf(2, dev, "flushing queued report %p\n", args);
+ kfree_skb(args->skb_rx);
+ list_del(&args->list_node);
+ kfree(args);
+ }
+}
+
+
+/*
+ * Queue a report for later processing
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the payload (for reference counting)
+ * @l3l4_hdr: pointer to the control
+ * @size: size of the message
+ */
+static
+void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ const void *l3l4_hdr, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned long flags;
+ struct i2400m_report_hook_args *args;
+
+ args = kzalloc(sizeof(*args), GFP_NOIO);
+ if (args) {
+ args->skb_rx = skb_get(skb_rx);
+ args->l3l4_hdr = l3l4_hdr;
+ args->size = size;
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ list_add_tail(&args->list_node, &i2400m->rx_reports);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ d_printf(2, dev, "queued report %p\n", args);
+ rmb(); /* see i2400m->ready's documentation */
+ if (likely(i2400m->ready)) /* only send if up */
+ queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
+ } else {
+ if (printk_ratelimit())
+ dev_err(dev, "%s:%u: Can't allocate %zu B\n",
+ __func__, __LINE__, sizeof(*args));
+ }
+}
+
+
+/*
+ * Process an ack to a command
+ *
+ * @i2400m: device descriptor
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * Pass the acknodledgment (in an skb) to the thread that is waiting
+ * for it in i2400m->msg_completion.
+ *
+ * We need to coordinate properly with the thread waiting for the
+ * ack. Check if it is waiting or if it is gone. We loose the spinlock
+ * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
+ * but this is not so speed critical).
+ */
+static
+void i2400m_rx_ctl_ack(struct i2400m *i2400m,
+ const void *payload, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ unsigned long flags;
+ struct sk_buff *ack_skb;
+
+ /* Anyone waiting for an answer? */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+ dev_err(dev, "Huh? reply to command with no waiters\n");
+ goto error_no_waiter;
+ }
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
+ ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
+
+ /* Check waiter didn't time out waiting for the answer... */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
+ d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
+ goto error_waiter_cancelled;
+ }
+ if (IS_ERR(ack_skb))
+ dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
+ i2400m->ack_skb = ack_skb;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ complete(&i2400m->msg_completion);
+ return;
+
+error_waiter_cancelled:
+ if (!IS_ERR(ack_skb))
+ kfree_skb(ack_skb);
+error_no_waiter:
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+}
+
+
+/*
+ * Receive and process a control payload
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the payload (for reference counting)
+ * @payload: pointer to message
+ * @size: size of the message
+ *
+ * There are two types of control RX messages: reports (asynchronous,
+ * like your every day interrupts) and 'acks' (reponses to a command,
+ * get or set request).
+ *
+ * If it is a report, we run hooks on it (to extract information for
+ * things we need to do in the driver) and then pass it over to the
+ * WiMAX stack to send it to user space.
+ *
+ * NOTE: report processing is done in a workqueue specific to the
+ * generic driver, to avoid deadlocks in the system.
+ *
+ * If it is not a report, it is an ack to a previously executed
+ * command, set or get, so wake up whoever is waiting for it from
+ * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
+ *
+ * Note that the sizes we pass to other functions from here are the
+ * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
+ * verified in _msg_size_check() that they are congruent.
+ *
+ * For reports: We can't clone the original skb where the data is
+ * because we need to send this up via netlink; netlink has to add
+ * headers and we can't overwrite what's preceding the payload...as
+ * it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ const void *payload, size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+ unsigned msg_type;
+
+ result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? device sent a bad message: %d\n",
+ result);
+ goto error_check;
+ }
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
+ msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+ msg_type, size);
+ d_dump(2, dev, l3l4_hdr, size);
+ if (msg_type & I2400M_MT_REPORT_MASK) {
+ /*
+ * Process each report
+ *
+ * - has to be ran serialized as well
+ *
+ * - the handling might force the execution of
+ * commands. That might cause reentrancy issues with
+ * bus-specific subdrivers and workqueues, so the we
+ * run it in a separate workqueue.
+ *
+ * - when the driver is not yet ready to handle them,
+ * they are queued and at some point the queue is
+ * restarted [NOTE: we can't queue SKBs directly, as
+ * this might be a piece of a SKB, not the whole
+ * thing, and this is cheaper than cloning the
+ * SKB].
+ *
+ * Note we don't do refcounting for the device
+ * structure; this is because before destroying
+ * 'i2400m', we make sure to flush the
+ * i2400m->work_queue, so there are no issues.
+ */
+ i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
+ if (unlikely(i2400m->trace_msg_from_user))
+ wimax_msg(&i2400m->wimax_dev, "echo",
+ l3l4_hdr, size, GFP_KERNEL);
+ result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
+ GFP_KERNEL);
+ if (result < 0)
+ dev_err(dev, "error sending report to userspace: %d\n",
+ result);
+ } else /* an ack to a CMD, GET or SET */
+ i2400m_rx_ctl_ack(i2400m, payload, size);
+error_check:
+ return;
+}
+
+
+/*
+ * Receive and send up a trace
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the trace (for reference counting)
+ * @payload: pointer to trace message inside the skb
+ * @size: size of the message
+ *
+ * THe i2400m might produce trace information (diagnostics) and we
+ * send them through a different kernel-to-user pipe (to avoid
+ * clogging it).
+ *
+ * As in i2400m_rx_ctl(), we can't clone the original skb where the
+ * data is because we need to send this up via netlink; netlink has to
+ * add headers and we can't overwrite what's preceding the
+ * payload...as it is another message. So we just dup them.
+ */
+static
+void i2400m_rx_trace(struct i2400m *i2400m,
+ const void *payload, size_t size)
+{
+ int result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
+ unsigned msg_type;
+
+ result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
+ if (result < 0) {
+ dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
+ result);
+ goto error_check;
+ }
+ msg_type = le16_to_cpu(l3l4_hdr->type);
+ d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
+ msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
+ msg_type, size);
+ d_dump(2, dev, l3l4_hdr, size);
+ result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
+ if (result < 0)
+ dev_err(dev, "error sending trace to userspace: %d\n",
+ result);
+error_check:
+ return;
+}
+
+
+/*
+ * Reorder queue data stored on skb->cb while the skb is queued in the
+ * reorder queues.
+ */
+struct i2400m_roq_data {
+ unsigned sn; /* Serial number for the skb */
+ enum i2400m_cs cs; /* packet type for the skb */
+};
+
+
+/*
+ * ReOrder Queue
+ *
+ * @ws: Window Start; sequence number where the current window start
+ * is for this queue
+ * @queue: the skb queue itself
+ * @log: circular ring buffer used to log information about the
+ * reorder process in this queue that can be displayed in case of
+ * error to help diagnose it.
+ *
+ * This is the head for a list of skbs. In the skb->cb member of the
+ * skb when queued here contains a 'struct i2400m_roq_data' were we
+ * store the sequence number (sn) and the cs (packet type) coming from
+ * the RX payload header from the device.
+ */
+struct i2400m_roq
+{
+ unsigned ws;
+ struct sk_buff_head queue;
+ struct i2400m_roq_log *log;
+};
+
+
+static
+void __i2400m_roq_init(struct i2400m_roq *roq)
+{
+ roq->ws = 0;
+ skb_queue_head_init(&roq->queue);
+}
+
+
+static
+unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq)
+{
+ return ((unsigned long) roq - (unsigned long) i2400m->rx_roq)
+ / sizeof(*roq);
+}
+
+
+/*
+ * Normalize a sequence number based on the queue's window start
+ *
+ * nsn = (sn - ws) % 2048
+ *
+ * Note that if @sn < @roq->ws, we still need a positive number; %'s
+ * sign is implementation specific, so we normalize it by adding 2048
+ * to bring it to be positive.
+ */
+static
+unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn)
+{
+ int r;
+ r = ((int) sn - (int) roq->ws) % 2048;
+ if (r < 0)
+ r += 2048;
+ return r;
+}
+
+
+/*
+ * Circular buffer to keep the last N reorder operations
+ *
+ * In case something fails, dumb then to try to come up with what
+ * happened.
+ */
+enum {
+ I2400M_ROQ_LOG_LENGTH = 32,
+};
+
+struct i2400m_roq_log {
+ struct i2400m_roq_log_entry {
+ enum i2400m_ro_type type;
+ unsigned ws, count, sn, nsn, new_ws;
+ } entry[I2400M_ROQ_LOG_LENGTH];
+ unsigned in, out;
+};
+
+
+/* Print a log entry */
+static
+void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index,
+ unsigned e_index,
+ struct i2400m_roq_log_entry *e)
+{
+ struct device *dev = i2400m_dev(i2400m);
+
+ switch(e->type) {
+ case I2400M_RO_TYPE_RESET:
+ dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u"
+ " - new nws %u\n",
+ index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
+ break;
+ case I2400M_RO_TYPE_PACKET:
+ dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n",
+ index, e->ws, e->count, e->sn, e->nsn);
+ break;
+ case I2400M_RO_TYPE_WS:
+ dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u"
+ " - new nws %u\n",
+ index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
+ break;
+ case I2400M_RO_TYPE_PACKET_WS:
+ dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u"
+ " - new nws %u\n",
+ index, e->ws, e->count, e->sn, e->nsn, e->new_ws);
+ break;
+ default:
+ dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n",
+ index, e_index, e->type);
+ break;
+ }
+}
+
+
+static
+void i2400m_roq_log_add(struct i2400m *i2400m,
+ struct i2400m_roq *roq, enum i2400m_ro_type type,
+ unsigned ws, unsigned count, unsigned sn,
+ unsigned nsn, unsigned new_ws)
+{
+ struct i2400m_roq_log_entry *e;
+ unsigned cnt_idx;
+ int index = __i2400m_roq_index(i2400m, roq);
+
+ /* if we run out of space, we eat from the end */
+ if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH)
+ roq->log->out++;
+ cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH;
+ e = &roq->log->entry[cnt_idx];
+
+ e->type = type;
+ e->ws = ws;
+ e->count = count;
+ e->sn = sn;
+ e->nsn = nsn;
+ e->new_ws = new_ws;
+
+ if (d_test(1))
+ i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
+}
+
+
+/* Dump all the entries in the FIFO and reinitialize it */
+static
+void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq)
+{
+ unsigned cnt, cnt_idx;
+ struct i2400m_roq_log_entry *e;
+ int index = __i2400m_roq_index(i2400m, roq);
+
+ BUG_ON(roq->log->out > roq->log->in);
+ for (cnt = roq->log->out; cnt < roq->log->in; cnt++) {
+ cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH;
+ e = &roq->log->entry[cnt_idx];
+ i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e);
+ memset(e, 0, sizeof(*e));
+ }
+ roq->log->in = roq->log->out = 0;
+}
+
+
+/*
+ * Backbone for the queuing of an skb (by normalized sequence number)
+ *
+ * @i2400m: device descriptor
+ * @roq: reorder queue where to add
+ * @skb: the skb to add
+ * @sn: the sequence number of the skb
+ * @nsn: the normalized sequence number of the skb (pre-computed by the
+ * caller from the @sn and @roq->ws).
+ *
+ * We try first a couple of quick cases:
+ *
+ * - the queue is empty
+ * - the skb would be appended to the queue
+ *
+ * These will be the most common operations.
+ *
+ * If these fail, then we have to do a sorted insertion in the queue,
+ * which is the slowest path.
+ *
+ * We don't have to acquire a reference count as we are going to own it.
+ */
+static
+void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
+ struct sk_buff *skb, unsigned sn, unsigned nsn)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb_itr;
+ struct i2400m_roq_data *roq_data_itr, *roq_data;
+ unsigned nsn_itr;
+
+ d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n",
+ i2400m, roq, skb, sn, nsn);
+
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb));
+ roq_data->sn = sn;
+ d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n",
+ roq, roq->ws, nsn, roq_data->sn);
+
+ /* Queues will be empty on not-so-bad environments, so try
+ * that first */
+ if (skb_queue_empty(&roq->queue)) {
+ d_printf(2, dev, "ERX: roq %p - first one\n", roq);
+ __skb_queue_head(&roq->queue, skb);
+ goto out;
+ }
+ /* Now try append, as most of the operations will be that */
+ skb_itr = skb_peek_tail(&roq->queue);
+ roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
+ nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
+ /* NSN bounds assumed correct (checked when it was queued) */
+ if (nsn >= nsn_itr) {
+ d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n",
+ roq, skb_itr, nsn_itr, roq_data_itr->sn);
+ __skb_queue_tail(&roq->queue, skb);
+ goto out;
+ }
+ /* None of the fast paths option worked. Iterate to find the
+ * right spot where to insert the packet; we know the queue is
+ * not empty, so we are not the first ones; we also know we
+ * are not going to be the last ones. The list is sorted, so
+ * we have to insert before the the first guy with an nsn_itr
+ * greater that our nsn. */
+ skb_queue_walk(&roq->queue, skb_itr) {
+ roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
+ nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
+ /* NSN bounds assumed correct (checked when it was queued) */
+ if (nsn_itr > nsn) {
+ d_printf(2, dev, "ERX: roq %p - queued before %p "
+ "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr,
+ roq_data_itr->sn);
+ __skb_queue_before(&roq->queue, skb_itr, skb);
+ goto out;
+ }
+ }
+ /* If we get here, that is VERY bad -- print info to help
+ * diagnose and crash it */
+ dev_err(dev, "SW BUG? failed to insert packet\n");
+ dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n",
+ roq, roq->ws, skb, nsn, roq_data->sn);
+ skb_queue_walk(&roq->queue, skb_itr) {
+ roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
+ nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
+ /* NSN bounds assumed correct (checked when it was queued) */
+ dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n",
+ roq, skb_itr, nsn_itr, roq_data_itr->sn);
+ }
+ BUG();
+out:
+ d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
+ i2400m, roq, skb, sn, nsn);
+}
+
+
+/*
+ * Backbone for the update window start operation
+ *
+ * @i2400m: device descriptor
+ * @roq: Reorder queue
+ * @sn: New sequence number
+ *
+ * Updates the window start of a queue; when doing so, it must deliver
+ * to the networking stack all the queued skb's whose normalized
+ * sequence number is lower than the new normalized window start.
+ */
+static
+unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
+ unsigned sn)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb_itr, *tmp_itr;
+ struct i2400m_roq_data *roq_data_itr;
+ unsigned new_nws, nsn_itr;
+
+ new_nws = __i2400m_roq_nsn(roq, sn);
+ /*
+ * For type 2(update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
+ skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
+ roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
+ nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
+ /* NSN bounds assumed correct (checked when it was queued) */
+ if (nsn_itr < new_nws) {
+ d_printf(2, dev, "ERX: roq %p - release skb %p "
+ "(nsn %u/%u new nws %u)\n",
+ roq, skb_itr, nsn_itr, roq_data_itr->sn,
+ new_nws);
+ __skb_unlink(skb_itr, &roq->queue);
+ i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
+ }
+ else
+ break; /* rest of packets all nsn_itr > nws */
+ }
+ roq->ws = sn;
+ return new_nws;
+}
+
+
+/*
+ * Reset a queue
+ *
+ * @i2400m: device descriptor
+ * @cin: Queue Index
+ *
+ * Deliver all the packets and reset the window-start to zero. Name is
+ * kind of misleading.
+ */
+static
+void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct sk_buff *skb_itr, *tmp_itr;
+ struct i2400m_roq_data *roq_data_itr;
+
+ d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET,
+ roq->ws, skb_queue_len(&roq->queue),
+ ~0, ~0, 0);
+ skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
+ roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
+ d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n",
+ roq, skb_itr, roq_data_itr->sn);
+ __skb_unlink(skb_itr, &roq->queue);
+ i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs);
+ }
+ roq->ws = 0;
+ d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
+}
+
+
+/*
+ * Queue a packet
+ *
+ * @i2400m: device descriptor
+ * @cin: Queue Index
+ * @skb: containing the packet data
+ * @fbn: First block number of the packet in @skb
+ * @lbn: Last block number of the packet in @skb
+ *
+ * The hardware is asking the driver to queue a packet for later
+ * delivery to the networking stack.
+ */
+static
+void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
+ struct sk_buff * skb, unsigned lbn)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned nsn, len;
+
+ d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
+ i2400m, roq, skb, lbn);
+ len = skb_queue_len(&roq->queue);
+ nsn = __i2400m_roq_nsn(roq, lbn);
+ if (unlikely(nsn >= 1024)) {
+ dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
+ nsn, lbn, roq->ws);
+ i2400m_roq_log_dump(i2400m, roq);
+ i2400m_reset(i2400m, I2400M_RT_WARM);
+ } else {
+ __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
+ roq->ws, len, lbn, nsn, ~0);
+ }
+ d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
+ i2400m, roq, skb, lbn);
+}
+
+
+/*
+ * Update the window start in a reorder queue and deliver all skbs
+ * with a lower window start
+ *
+ * @i2400m: device descriptor
+ * @roq: Reorder queue
+ * @sn: New sequence number
+ */
+static
+void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
+ unsigned sn)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned old_ws, nsn, len;
+
+ d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn);
+ old_ws = roq->ws;
+ len = skb_queue_len(&roq->queue);
+ nsn = __i2400m_roq_update_ws(i2400m, roq, sn);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
+ old_ws, len, sn, nsn, roq->ws);
+ d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
+}
+
+
+/*
+ * Queue a packet and update the window start
+ *
+ * @i2400m: device descriptor
+ * @cin: Queue Index
+ * @skb: containing the packet data
+ * @fbn: First block number of the packet in @skb
+ * @sn: Last block number of the packet in @skb
+ *
+ * Note that unlike i2400m_roq_update_ws(), which sets the new window
+ * start to @sn, in here we'll set it to @sn + 1.
+ */
+static
+void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
+ struct sk_buff * skb, unsigned sn)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned nsn, old_ws, len;
+
+ d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n",
+ i2400m, roq, skb, sn);
+ len = skb_queue_len(&roq->queue);
+ nsn = __i2400m_roq_nsn(roq, sn);
+ /*
+ * For type 3(queue_update_window_start) rx messages, there is no
+ * need to check if the normalized sequence number is greater 1023.
+ * Simply insert and deliver all packets to the host up to the
+ * window start.
+ */
+ old_ws = roq->ws;
+ /* If the queue is empty, don't bother as we'd queue
+ * it and immediately unqueue it -- just deliver it.
+ */
+ if (len == 0) {
+ struct i2400m_roq_data *roq_data;
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ i2400m_net_erx(i2400m, skb, roq_data->cs);
+ } else
+ __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
+
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
+ i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
+ old_ws, len, sn, nsn, roq->ws);
+
+ d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
+ i2400m, roq, skb, sn);
+}
+
+
+/*
+ * This routine destroys the memory allocated for rx_roq, when no
+ * other thread is accessing it. Access to rx_roq is refcounted by
+ * rx_roq_refcount, hence memory allocated must be destroyed when
+ * rx_roq_refcount becomes zero. This routine gets executed when
+ * rx_roq_refcount becomes zero.
+ */
+static void i2400m_rx_roq_destroy(struct kref *ref)
+{
+ unsigned itr;
+ struct i2400m *i2400m
+ = container_of(ref, struct i2400m, rx_roq_refcount);
+ for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
+ __skb_queue_purge(&i2400m->rx_roq[itr].queue);
+ kfree(i2400m->rx_roq[0].log);
+ kfree(i2400m->rx_roq);
+ i2400m->rx_roq = NULL;
+}
+
+/*
+ * Receive and send up an extended data packet
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the extended data packet
+ * @single_last: 1 if the payload is the only one or the last one of
+ * the skb.
+ * @payload: pointer to the packet's data inside the skb
+ * @size: size of the payload
+ *
+ * Starting in v1.4 of the i2400m's firmware, the device can send data
+ * packets to the host in an extended format that; this incudes a 16
+ * byte header (struct i2400m_pl_edata_hdr). Using this header's space
+ * we can fake ethernet headers for ethernet device emulation without
+ * having to copy packets around.
+ *
+ * This function handles said path.
+ *
+ *
+ * Receive and send up an extended data packet that requires no reordering
+ *
+ * @i2400m: device descriptor
+ * @skb_rx: skb that contains the extended data packet
+ * @single_last: 1 if the payload is the only one or the last one of
+ * the skb.
+ * @payload: pointer to the packet's data (past the actual extended
+ * data payload header).
+ * @size: size of the payload
+ *
+ * Pass over to the networking stack a data packet that might have
+ * reordering requirements.
+ *
+ * This needs to the decide if the skb in which the packet is
+ * contained can be reused or if it needs to be cloned. Then it has to
+ * be trimmed in the edges so that the beginning is the space for eth
+ * header and then pass it to i2400m_net_erx() for the stack
+ *
+ * Assumes the caller has verified the sanity of the payload (size,
+ * etc) already.
+ */
+static
+void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ unsigned single_last, const void *payload, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_pl_edata_hdr *hdr = payload;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct sk_buff *skb;
+ enum i2400m_cs cs;
+ u32 reorder;
+ unsigned ro_needed, ro_type, ro_cin, ro_sn;
+ struct i2400m_roq *roq;
+ struct i2400m_roq_data *roq_data;
+ unsigned long flags;
+
+ BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
+
+ d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
+ "size %zu)\n", i2400m, skb_rx, single_last, payload, size);
+ if (size < sizeof(*hdr)) {
+ dev_err(dev, "ERX: HW BUG? message with short header (%zu "
+ "vs %zu bytes expected)\n", size, sizeof(*hdr));
+ goto error;
+ }
+
+ if (single_last) {
+ skb = skb_get(skb_rx);
+ d_printf(3, dev, "ERX: skb %p reusing\n", skb);
+ } else {
+ skb = skb_clone(skb_rx, GFP_KERNEL);
+ if (skb == NULL) {
+ dev_err(dev, "ERX: no memory to clone skb\n");
+ net_dev->stats.rx_dropped++;
+ goto error_skb_clone;
+ }
+ d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx);
+ }
+ /* now we have to pull and trim so that the skb points to the
+ * beginning of the IP packet; the netdev part will add the
+ * ethernet header as needed - we know there is enough space
+ * because we checked in i2400m_rx_edata(). */
+ skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data);
+ skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr));
+
+ reorder = le32_to_cpu(hdr->reorder);
+ ro_needed = reorder & I2400M_RO_NEEDED;
+ cs = hdr->cs;
+ if (ro_needed) {
+ ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE;
+ ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
+ ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ if (i2400m->rx_roq == NULL) {
+ kfree_skb(skb); /* rx_roq is already destroyed */
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ goto error;
+ }
+ roq = &i2400m->rx_roq[ro_cin];
+ kref_get(&i2400m->rx_roq_refcount);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+
+ roq_data = (struct i2400m_roq_data *) &skb->cb;
+ roq_data->sn = ro_sn;
+ roq_data->cs = cs;
+ d_printf(2, dev, "ERX: reorder needed: "
+ "type %u cin %u [ws %u] sn %u/%u len %zuB\n",
+ ro_type, ro_cin, roq->ws, ro_sn,
+ __i2400m_roq_nsn(roq, ro_sn), size);
+ d_dump(2, dev, payload, size);
+ switch(ro_type) {
+ case I2400M_RO_TYPE_RESET:
+ i2400m_roq_reset(i2400m, roq);
+ kfree_skb(skb); /* no data here */
+ break;
+ case I2400M_RO_TYPE_PACKET:
+ i2400m_roq_queue(i2400m, roq, skb, ro_sn);
+ break;
+ case I2400M_RO_TYPE_WS:
+ i2400m_roq_update_ws(i2400m, roq, ro_sn);
+ kfree_skb(skb); /* no data here */
+ break;
+ case I2400M_RO_TYPE_PACKET_WS:
+ i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn);
+ break;
+ default:
+ dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
+ }
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ }
+ else
+ i2400m_net_erx(i2400m, skb, cs);
+error_skb_clone:
+error:
+ d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
+ "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
+}
+
+
+/*
+ * Act on a received payload
+ *
+ * @i2400m: device instance
+ * @skb_rx: skb where the transaction was received
+ * @single_last: 1 this is the only payload or the last one (so the
+ * skb can be reused instead of cloned).
+ * @pld: payload descriptor
+ * @payload: payload data
+ *
+ * Upon reception of a payload, look at its guts in the payload
+ * descriptor and decide what to do with it. If it is a single payload
+ * skb or if the last skb is a data packet, the skb will be referenced
+ * and modified (so it doesn't have to be cloned).
+ */
+static
+void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
+ unsigned single_last, const struct i2400m_pld *pld,
+ const void *payload)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t pl_size = i2400m_pld_size(pld);
+ enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+ d_printf(7, dev, "RX: received payload type %u, %zu bytes\n",
+ pl_type, pl_size);
+ d_dump(8, dev, payload, pl_size);
+
+ switch (pl_type) {
+ case I2400M_PT_DATA:
+ d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
+ i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size);
+ break;
+ case I2400M_PT_CTRL:
+ i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
+ break;
+ case I2400M_PT_TRACE:
+ i2400m_rx_trace(i2400m, payload, pl_size);
+ break;
+ case I2400M_PT_EDATA:
+ d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size);
+ i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size);
+ break;
+ default: /* Anything else shouldn't come to the host */
+ if (printk_ratelimit())
+ dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
+ pl_type);
+ }
+}
+
+
+/*
+ * Check a received transaction's message header
+ *
+ * @i2400m: device descriptor
+ * @msg_hdr: message header
+ * @buf_size: size of the received buffer
+ *
+ * Check that the declarations done by a RX buffer message header are
+ * sane and consistent with the amount of data that was received.
+ */
+static
+int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
+ const struct i2400m_msg_hdr *msg_hdr,
+ size_t buf_size)
+{
+ int result = -EIO;
+ struct device *dev = i2400m_dev(i2400m);
+ if (buf_size < sizeof(*msg_hdr)) {
+ dev_err(dev, "RX: HW BUG? message with short header (%zu "
+ "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
+ goto error;
+ }
+ if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
+ dev_err(dev, "RX: HW BUG? message received with unknown "
+ "barker 0x%08x (buf_size %zu bytes)\n",
+ le32_to_cpu(msg_hdr->barker), buf_size);
+ goto error;
+ }
+ if (msg_hdr->num_pls == 0) {
+ dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
+ goto error;
+ }
+ if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
+ dev_err(dev, "RX: HW BUG? message contains more payload "
+ "than maximum; ignoring.\n");
+ goto error;
+ }
+ result = 0;
+error:
+ return result;
+}
+
+
+/*
+ * Check a payload descriptor against the received data
+ *
+ * @i2400m: device descriptor
+ * @pld: payload descriptor
+ * @pl_itr: offset (in bytes) in the received buffer the payload is
+ * located
+ * @buf_size: size of the received buffer
+ *
+ * Given a payload descriptor (part of a RX buffer), check it is sane
+ * and that the data it declares fits in the buffer.
+ */
+static
+int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
+ const struct i2400m_pld *pld,
+ size_t pl_itr, size_t buf_size)
+{
+ int result = -EIO;
+ struct device *dev = i2400m_dev(i2400m);
+ size_t pl_size = i2400m_pld_size(pld);
+ enum i2400m_pt pl_type = i2400m_pld_type(pld);
+
+ if (pl_size > i2400m->bus_pl_size_max) {
+ dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
+ "bigger than maximum %zu; ignoring message\n",
+ pl_itr, pl_size, i2400m->bus_pl_size_max);
+ goto error;
+ }
+ if (pl_itr + pl_size > buf_size) { /* enough? */
+ dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
+ "goes beyond the received buffer "
+ "size (%zu bytes); ignoring message\n",
+ pl_itr, pl_size, buf_size);
+ goto error;
+ }
+ if (pl_type >= I2400M_PT_ILLEGAL) {
+ dev_err(dev, "RX: HW BUG? illegal payload type %u; "
+ "ignoring message\n", pl_type);
+ goto error;
+ }
+ result = 0;
+error:
+ return result;
+}
+
+
+/**
+ * i2400m_rx - Receive a buffer of data from the device
+ *
+ * @i2400m: device descriptor
+ * @skb: skbuff where the data has been received
+ *
+ * Parse in a buffer of data that contains an RX message sent from the
+ * device. See the file header for the format. Run all checks on the
+ * buffer header, then run over each payload's descriptors, verify
+ * their consistency and act on each payload's contents. If
+ * everything is successful, update the device's statistics.
+ *
+ * Note: You need to set the skb to contain only the length of the
+ * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
+ *
+ * Returns:
+ *
+ * 0 if ok, < 0 errno on error
+ *
+ * If ok, this function owns now the skb and the caller DOESN'T have
+ * to run kfree_skb() on it. However, on error, the caller still owns
+ * the skb and it is responsible for releasing it.
+ */
+int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
+{
+ int i, result;
+ struct device *dev = i2400m_dev(i2400m);
+ const struct i2400m_msg_hdr *msg_hdr;
+ size_t pl_itr, pl_size;
+ unsigned long flags;
+ unsigned num_pls, single_last, skb_len;
+
+ skb_len = skb->len;
+ d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
+ i2400m, skb, skb_len);
+ msg_hdr = (void *) skb->data;
+ result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
+ if (result < 0)
+ goto error_msg_hdr_check;
+ result = -EIO;
+ num_pls = le16_to_cpu(msg_hdr->num_pls);
+ /* Check payload descriptor(s) */
+ pl_itr = struct_size(msg_hdr, pld, num_pls);
+ pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
+ if (pl_itr > skb_len) { /* got all the payload descriptors? */
+ dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
+ "%u payload descriptors (%zu each, total %zu)\n",
+ skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+ goto error_pl_descr_short;
+ }
+ /* Walk each payload payload--check we really got it */
+ for (i = 0; i < num_pls; i++) {
+ /* work around old gcc warnings */
+ pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
+ result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
+ pl_itr, skb_len);
+ if (result < 0)
+ goto error_pl_descr_check;
+ single_last = num_pls == 1 || i == num_pls - 1;
+ i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i],
+ skb->data + pl_itr);
+ pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN);
+ cond_resched(); /* Don't monopolize */
+ }
+ kfree_skb(skb);
+ /* Update device statistics */
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400m->rx_pl_num += i;
+ if (i > i2400m->rx_pl_max)
+ i2400m->rx_pl_max = i;
+ if (i < i2400m->rx_pl_min)
+ i2400m->rx_pl_min = i;
+ i2400m->rx_num++;
+ i2400m->rx_size_acc += skb_len;
+ if (skb_len < i2400m->rx_size_min)
+ i2400m->rx_size_min = skb_len;
+ if (skb_len > i2400m->rx_size_max)
+ i2400m->rx_size_max = skb_len;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+error_pl_descr_check:
+error_pl_descr_short:
+error_msg_hdr_check:
+ d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
+ i2400m, skb, skb_len, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_rx);
+
+
+void i2400m_unknown_barker(struct i2400m *i2400m,
+ const void *buf, size_t size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ char prefix[64];
+ const __le32 *barker = buf;
+ dev_err(dev, "RX: HW BUG? unknown barker %08x, "
+ "dropping %zu bytes\n", le32_to_cpu(*barker), size);
+ snprintf(prefix, sizeof(prefix), "%s %s: ",
+ dev_driver_string(dev), dev_name(dev));
+ if (size > 64) {
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 8, 4, buf, 64, 0);
+ printk(KERN_ERR "%s... (only first 64 bytes "
+ "dumped)\n", prefix);
+ } else
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 8, 4, buf, size, 0);
+}
+EXPORT_SYMBOL(i2400m_unknown_barker);
+
+
+/*
+ * Initialize the RX queue and infrastructure
+ *
+ * This sets up all the RX reordering infrastructures, which will not
+ * be used if reordering is not enabled or if the firmware does not
+ * support it. The device is told to do reordering in
+ * i2400m_dev_initialize(), where it also looks at the value of the
+ * i2400m->rx_reorder switch before taking a decission.
+ *
+ * Note we allocate the roq queues in one chunk and the actual logging
+ * support for it (logging) in another one and then we setup the
+ * pointers from the first to the last.
+ */
+int i2400m_rx_setup(struct i2400m *i2400m)
+{
+ int result = 0;
+
+ i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1;
+ if (i2400m->rx_reorder) {
+ unsigned itr;
+ struct i2400m_roq_log *rd;
+
+ result = -ENOMEM;
+
+ i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1,
+ sizeof(i2400m->rx_roq[0]), GFP_KERNEL);
+ if (i2400m->rx_roq == NULL)
+ goto error_roq_alloc;
+
+ rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
+ GFP_KERNEL);
+ if (rd == NULL) {
+ result = -ENOMEM;
+ goto error_roq_log_alloc;
+ }
+
+ for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) {
+ __i2400m_roq_init(&i2400m->rx_roq[itr]);
+ i2400m->rx_roq[itr].log = &rd[itr];
+ }
+ kref_init(&i2400m->rx_roq_refcount);
+ }
+ return 0;
+
+error_roq_log_alloc:
+ kfree(i2400m->rx_roq);
+error_roq_alloc:
+ return result;
+}
+
+
+/* Tear down the RX queue and infrastructure */
+void i2400m_rx_release(struct i2400m *i2400m)
+{
+ unsigned long flags;
+
+ if (i2400m->rx_reorder) {
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ }
+ /* at this point, nothing can be received... */
+ i2400m_report_hook_flush(i2400m);
+}
diff --git a/drivers/staging/wimax/i2400m/sysfs.c b/drivers/staging/wimax/i2400m/sysfs.c
new file mode 100644
index 000000000000..895ee265909b
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/sysfs.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Sysfs interfaces to show driver and device information
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE sysfs
+#include "debug-levels.h"
+
+
+/*
+ * Set the idle timeout (msecs)
+ *
+ * FIXME: eventually this should be a common WiMAX stack method, but
+ * would like to wait to see how other devices manage it.
+ */
+static
+ssize_t i2400m_idle_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct i2400m *i2400m = net_dev_to_i2400m(to_net_dev(dev));
+ unsigned val;
+
+ result = -EINVAL;
+ if (sscanf(buf, "%u\n", &val) != 1)
+ goto error_no_unsigned;
+ if (val != 0 && (val < 100 || val > 300000 || val % 100 != 0)) {
+ dev_err(dev, "idle_timeout: %u: invalid msecs specification; "
+ "valid values are 0, 100-300000 in 100 increments\n",
+ val);
+ goto error_bad_value;
+ }
+ result = i2400m_set_idle_timeout(i2400m, val);
+ if (result >= 0)
+ result = size;
+error_no_unsigned:
+error_bad_value:
+ return result;
+}
+
+static
+DEVICE_ATTR_WO(i2400m_idle_timeout);
+
+static
+struct attribute *i2400m_dev_attrs[] = {
+ &dev_attr_i2400m_idle_timeout.attr,
+ NULL,
+};
+
+struct attribute_group i2400m_dev_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = i2400m_dev_attrs,
+};
diff --git a/drivers/staging/wimax/i2400m/tx.c b/drivers/staging/wimax/i2400m/tx.c
new file mode 100644
index 000000000000..1255302e251e
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/tx.c
@@ -0,0 +1,1011 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Generic (non-bus specific) TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Rewritten to use a single FIFO to lower the memory allocation
+ * pressure and optimize cache hits when copying to the queue, as
+ * well as splitting out bus-specific code.
+ *
+ *
+ * Implements data transmission to the device; this is done through a
+ * software FIFO, as data/control frames can be coalesced (while the
+ * device is reading the previous tx transaction, others accumulate).
+ *
+ * A FIFO is used because at the end it is resource-cheaper that trying
+ * to implement scatter/gather over USB. As well, most traffic is going
+ * to be download (vs upload).
+ *
+ * The format for sending/receiving data to/from the i2400m is
+ * described in detail in rx.c:PROTOCOL FORMAT. In here we implement
+ * the transmission of that. This is split between a bus-independent
+ * part that just prepares everything and a bus-specific part that
+ * does the actual transmission over the bus to the device (in the
+ * bus-specific driver).
+ *
+ *
+ * The general format of a device-host transaction is MSG-HDR, PLD1,
+ * PLD2...PLDN, PL1, PL2,...PLN, PADDING.
+ *
+ * Because we need the send payload descriptors and then payloads and
+ * because it is kind of expensive to do scatterlists in USB (one URB
+ * per node), it becomes cheaper to append all the data to a FIFO
+ * (copying to a FIFO potentially in cache is cheaper).
+ *
+ * Then the bus-specific code takes the parts of that FIFO that are
+ * written and passes them to the device.
+ *
+ * So the concepts to keep in mind there are:
+ *
+ * We use a FIFO to queue the data in a linear buffer. We first append
+ * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then
+ * go appending payloads until we run out of space or of payload
+ * descriptors. Then we append padding to make the whole transaction a
+ * multiple of i2400m->bus_tx_block_size (as defined by the bus layer).
+ *
+ * - A TX message: a combination of a message header, payload
+ * descriptors and payloads.
+ *
+ * Open: it is marked as active (i2400m->tx_msg is valid) and we
+ * can keep adding payloads to it.
+ *
+ * Closed: we are not appending more payloads to this TX message
+ * (exahusted space in the queue, too many payloads or
+ * whichever). We have appended padding so the whole message
+ * length is aligned to i2400m->bus_tx_block_size (as set by the
+ * bus/transport layer).
+ *
+ * - Most of the time we keep a TX message open to which we append
+ * payloads.
+ *
+ * - If we are going to append and there is no more space (we are at
+ * the end of the FIFO), we close the message, mark the rest of the
+ * FIFO space unusable (skip_tail), create a new message at the
+ * beginning of the FIFO (if there is space) and append the message
+ * there.
+ *
+ * This is because we need to give linear TX messages to the bus
+ * engine. So we don't write a message to the remaining FIFO space
+ * until the tail and continue at the head of it.
+ *
+ * - We overload one of the fields in the message header to use it as
+ * 'size' of the TX message, so we can iterate over them. It also
+ * contains a flag that indicates if we have to skip it or not.
+ * When we send the buffer, we update that to its real on-the-wire
+ * value.
+ *
+ * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16.
+ *
+ * It follows that if MSG-HDR says we have N messages, the whole
+ * header + descriptors is 16 + 4*N; for those to be a multiple of
+ * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80...
+ * bytes).
+ *
+ * So if we have only 1 payload, we have to submit a header that in
+ * all truth has space for 4.
+ *
+ * The implication is that we reserve space for 12 (64 bytes); but
+ * if we fill up only (eg) 2, our header becomes 32 bytes only. So
+ * the TX engine has to shift those 32 bytes of msg header and 2
+ * payloads and padding so that right after it the payloads start
+ * and the TX engine has to know about that.
+ *
+ * It is cheaper to move the header up than the whole payloads down.
+ *
+ * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'.
+ *
+ * - Each payload has to be size-padded to 16 bytes; before appending
+ * it, we just do it.
+ *
+ * - The whole message has to be padded to i2400m->bus_tx_block_size;
+ * we do this at close time. Thus, when reserving space for the
+ * payload, we always make sure there is also free space for this
+ * padding that sooner or later will happen.
+ *
+ * When we append a message, we tell the bus specific code to kick in
+ * TXs. It will TX (in parallel) until the buffer is exhausted--hence
+ * the lockin we do. The TX code will only send a TX message at the
+ * time (which remember, might contain more than one payload). Of
+ * course, when the bus-specific driver attempts to TX a message that
+ * is still open, it gets closed first.
+ *
+ * Gee, this is messy; well a picture. In the example below we have a
+ * partially full FIFO, with a closed message ready to be delivered
+ * (with a moved message header to make sure it is size-aligned to
+ * 16), TAIL room that was unusable (and thus is marked with a message
+ * header that says 'skip this') and at the head of the buffer, an
+ * incomplete message with a couple of payloads.
+ *
+ * N ___________________________________________________
+ * | |
+ * | TAIL room |
+ * | |
+ * | msg_hdr to skip (size |= 0x80000) |
+ * |---------------------------------------------------|-------
+ * | | /|\
+ * | | |
+ * | TX message padding | |
+ * | | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | | |
+ * | payload 1 | |
+ * | | N * tx_block_size
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | | |
+ * | payload 1 | |
+ * | | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - -
+ * | padding 3 /|\ | | /|\
+ * | padding 2 | | | |
+ * | pld 1 32 bytes (2 * 16) | | |
+ * | pld 0 | | | |
+ * | moved msg_hdr \|/ | \|/ |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - |
+ * | | _PLD_SIZE
+ * | unused | |
+ * | | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
+ * | msg_hdr (size X) [this message is closed] | \|/
+ * |===================================================|========== <=== OUT
+ * | |
+ * | |
+ * | |
+ * | Free rooom |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * |===================================================|========== <=== IN
+ * | |
+ * | |
+ * | |
+ * | |
+ * | payload 1 |
+ * | |
+ * | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | |
+ * | payload 0 |
+ * | |
+ * | |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | pld 11 /|\ |
+ * | ... | |
+ * | pld 1 64 bytes (2 * 16) |
+ * | pld 0 | |
+ * | msg_hdr (size X) \|/ [message is open] |
+ * 0 ---------------------------------------------------
+ *
+ *
+ * ROADMAP
+ *
+ * i2400m_tx_setup() Called by i2400m_setup
+ * i2400m_tx_release() Called by i2400m_release()
+ *
+ * i2400m_tx() Called to send data or control frames
+ * i2400m_tx_fifo_push() Allocates append-space in the FIFO
+ * i2400m_tx_new() Opens a new message in the FIFO
+ * i2400m_tx_fits() Checks if a new payload fits in the message
+ * i2400m_tx_close() Closes an open message in the FIFO
+ * i2400m_tx_skip_tail() Marks unusable FIFO tail space
+ * i2400m->bus_tx_kick()
+ *
+ * Now i2400m->bus_tx_kick() is the the bus-specific driver backend
+ * implementation; that would do:
+ *
+ * i2400m->bus_tx_kick()
+ * i2400m_tx_msg_get() Gets first message ready to go
+ * ...sends it...
+ * i2400m_tx_msg_sent() Ack the message is sent; repeat from
+ * _tx_msg_get() until it returns NULL
+ * (FIFO empty).
+ */
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "i2400m.h"
+
+
+#define D_SUBMODULE tx
+#include "debug-levels.h"
+
+enum {
+ /**
+ * TX Buffer size
+ *
+ * Doc says maximum transaction is 16KiB. If we had 16KiB en
+ * route and 16KiB being queued, it boils down to needing
+ * 32KiB.
+ * 32KiB is insufficient for 1400 MTU, hence increasing
+ * tx buffer size to 64KiB.
+ */
+ I2400M_TX_BUF_SIZE = 65536,
+ /**
+ * Message header and payload descriptors have to be 16
+ * aligned (16 + 4 * N = 16 * M). If we take that average sent
+ * packets are MTU size (~1400-~1500) it follows that we could
+ * fit at most 10-11 payloads in one transaction. To meet the
+ * alignment requirement, that means we need to leave space
+ * for 12 (64 bytes). To simplify, we leave space for that. If
+ * at the end there are less, we pad up to the nearest
+ * multiple of 16.
+ */
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum number of payloads per message can be
+ * up to 60. Increasing the number of payloads to 60 per message
+ * helps to accommodate smaller payloads in a single transaction.
+ */
+ I2400M_TX_PLD_MAX = 60,
+ I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
+ I2400M_TX_SKIP = 0x80000000,
+ /*
+ * According to Intel Wimax i3200, i5x50 and i6x50 specification
+ * documents, the maximum size of each message can be up to 16KiB.
+ */
+ I2400M_TX_MSG_SIZE = 16384,
+};
+
+#define TAIL_FULL ((void *)~(unsigned long)NULL)
+
+/*
+ * Calculate how much tail room is available
+ *
+ * Note the trick here. This path is ONLY caleed for Case A (see
+ * i2400m_tx_fifo_push() below), where we have:
+ *
+ * Case A
+ * N ___________
+ * | tail room |
+ * | |
+ * |<- IN ->|
+ * | |
+ * | data |
+ * | |
+ * |<- OUT ->|
+ * | |
+ * | head room |
+ * 0 -----------
+ *
+ * When calculating the tail_room, tx_in might get to be zero if
+ * i2400m->tx_in is right at the end of the buffer (really full
+ * buffer) if there is no head room. In this case, tail_room would be
+ * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final
+ * mod (%) operation. However, when doing this kind of optimization,
+ * i2400m->tx_in being zero would fail, so we treat is an a special
+ * case.
+ */
+static inline
+size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
+{
+ size_t tail_room;
+ size_t tx_in;
+
+ if (unlikely(i2400m->tx_in == 0))
+ return I2400M_TX_BUF_SIZE;
+ tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ tail_room = I2400M_TX_BUF_SIZE - tx_in;
+ tail_room %= I2400M_TX_BUF_SIZE;
+ return tail_room;
+}
+
+
+/*
+ * Allocate @size bytes in the TX fifo, return a pointer to it
+ *
+ * @i2400m: device descriptor
+ * @size: size of the buffer we need to allocate
+ * @padding: ensure that there is at least this many bytes of free
+ * contiguous space in the fifo. This is needed because later on
+ * we might need to add padding.
+ * @try_head: specify either to allocate head room or tail room space
+ * in the TX FIFO. This boolean is required to avoids a system hang
+ * due to an infinite loop caused by i2400m_tx_fifo_push().
+ * The caller must always try to allocate tail room space first by
+ * calling this routine with try_head = 0. In case if there
+ * is not enough tail room space but there is enough head room space,
+ * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
+ * room space, by calling this routine again with try_head = 1.
+ *
+ * Returns:
+ *
+ * Pointer to the allocated space. NULL if there is no
+ * space. TAIL_FULL if there is no space at the tail but there is at
+ * the head (Case B below).
+ *
+ * These are the two basic cases we need to keep an eye for -- it is
+ * much better explained in linux/kernel/kfifo.c, but this code
+ * basically does the same. No rocket science here.
+ *
+ * Case A Case B
+ * N ___________ ___________
+ * | tail room | | data |
+ * | | | |
+ * |<- IN ->| |<- OUT ->|
+ * | | | |
+ * | data | | room |
+ * | | | |
+ * |<- OUT ->| |<- IN ->|
+ * | | | |
+ * | head room | | data |
+ * 0 ----------- -----------
+ *
+ * We allocate only *contiguous* space.
+ *
+ * We can allocate only from 'room'. In Case B, it is simple; in case
+ * A, we only try from the tail room; if it is not enough, we just
+ * fail and return TAIL_FULL and let the caller figure out if we wants to
+ * skip the tail room and try to allocate from the head.
+ *
+ * There is a corner case, wherein i2400m_tx_new() can get into
+ * an infinite loop calling i2400m_tx_fifo_push().
+ * In certain situations, tx_in would have reached on the top of TX FIFO
+ * and i2400m_tx_tail_room() returns 0, as described below:
+ *
+ * N ___________ tail room is zero
+ * |<- IN ->|
+ * | |
+ * | |
+ * | |
+ * | data |
+ * |<- OUT ->|
+ * | |
+ * | |
+ * | head room |
+ * 0 -----------
+ * During such a time, where tail room is zero in the TX FIFO and if there
+ * is a request to add a payload to TX FIFO, which calls:
+ * i2400m_tx()
+ * ->calls i2400m_tx_close()
+ * ->calls i2400m_tx_skip_tail()
+ * goto try_new;
+ * ->calls i2400m_tx_new()
+ * |----> [try_head:]
+ * infinite loop | ->calls i2400m_tx_fifo_push()
+ * | if (tail_room < needed)
+ * | if (head_room => needed)
+ * | return TAIL_FULL;
+ * |<---- goto try_head;
+ *
+ * i2400m_tx() calls i2400m_tx_close() to close the message, since there
+ * is no tail room to accommodate the payload and calls
+ * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
+ * i2400m_tx_new() to allocate space for new message header calling
+ * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
+ * to accommodate the message header, but there is enough head space.
+ * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
+ * ending up in a loop causing system freeze.
+ *
+ * This corner case is avoided by using a try_head boolean,
+ * as an argument to i2400m_tx_fifo_push().
+ *
+ * Note:
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ * The indexes keep increasing and we reset them to zero when we
+ * pop data off the queue
+ */
+static
+void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
+ size_t padding, bool try_head)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t room, tail_room, needed_size;
+ void *ptr;
+
+ needed_size = size + padding;
+ room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out);
+ if (room < needed_size) { /* this takes care of Case B */
+ d_printf(2, dev, "fifo push %zu/%zu: no space\n",
+ size, padding);
+ return NULL;
+ }
+ /* Is there space at the tail? */
+ tail_room = __i2400m_tx_tail_room(i2400m);
+ if (!try_head && tail_room < needed_size) {
+ /*
+ * If the tail room space is not enough to push the message
+ * in the TX FIFO, then there are two possibilities:
+ * 1. There is enough head room space to accommodate
+ * this message in the TX FIFO.
+ * 2. There is not enough space in the head room and
+ * in tail room of the TX FIFO to accommodate the message.
+ * In the case (1), return TAIL_FULL so that the caller
+ * can figure out, if the caller wants to push the message
+ * into the head room space.
+ * In the case (2), return NULL, indicating that the TX FIFO
+ * cannot accommodate the message.
+ */
+ if (room - tail_room >= needed_size) {
+ d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
+ size, padding);
+ return TAIL_FULL; /* There might be head space */
+ } else {
+ d_printf(2, dev, "fifo push %zu/%zu: no head space\n",
+ size, padding);
+ return NULL; /* There is no space */
+ }
+ }
+ ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding,
+ i2400m->tx_in % I2400M_TX_BUF_SIZE);
+ i2400m->tx_in += size;
+ return ptr;
+}
+
+
+/*
+ * Mark the tail of the FIFO buffer as 'to-skip'
+ *
+ * We should never hit the BUG_ON() because all the sizes we push to
+ * the FIFO are padded to be a multiple of 16 -- the size of *msg
+ * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
+ * header).
+ *
+ * Tail room can get to be zero if a message was opened when there was
+ * space only for a header. _tx_close() will mark it as to-skip (as it
+ * will have no payloads) and there will be no more space to flush, so
+ * nothing has to be done here. This is probably cheaper than ensuring
+ * in _tx_new() that there is some space for payloads...as we could
+ * always possibly hit the same problem if the payload wouldn't fit.
+ *
+ * Note:
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ *
+ * This path is only taken for Case A FIFO situations [see
+ * i2400m_tx_fifo_push()]
+ */
+static
+void i2400m_tx_skip_tail(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
+ size_t tail_room = __i2400m_tx_tail_room(i2400m);
+ struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
+ if (unlikely(tail_room == 0))
+ return;
+ BUG_ON(tail_room < sizeof(*msg));
+ msg->size = tail_room | I2400M_TX_SKIP;
+ d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
+ tail_room, tx_in);
+ i2400m->tx_in += tail_room;
+}
+
+
+/*
+ * Check if a skb will fit in the TX queue's current active TX
+ * message (if there are still descriptors left unused).
+ *
+ * Returns:
+ * 0 if the message won't fit, 1 if it will.
+ *
+ * Note:
+ *
+ * Assumes a TX message is active (i2400m->tx_msg).
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+unsigned i2400m_tx_fits(struct i2400m *i2400m)
+{
+ struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg;
+ return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX;
+
+}
+
+
+/*
+ * Start a new TX message header in the queue.
+ *
+ * Reserve memory from the base FIFO engine and then just initialize
+ * the message header.
+ *
+ * We allocate the biggest TX message header we might need (one that'd
+ * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be
+ * 'ironed it out' and the unneeded parts removed.
+ *
+ * NOTE:
+ *
+ * Assumes that the previous message is CLOSED (eg: either
+ * there was none or 'i2400m_tx_close()' was called on it).
+ *
+ * Assumes i2400m->tx_lock is taken, and we use that as a barrier
+ */
+static
+void i2400m_tx_new(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg;
+ bool try_head = false;
+ BUG_ON(i2400m->tx_msg != NULL);
+ /*
+ * In certain situations, TX queue might have enough space to
+ * accommodate the new message header I2400M_TX_PLD_SIZE, but
+ * might not have enough space to accommodate the payloads.
+ * Adding bus_tx_room_min padding while allocating a new TX message
+ * increases the possibilities of including at least one payload of the
+ * size <= bus_tx_room_min.
+ */
+try_head:
+ tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
+ i2400m->bus_tx_room_min, try_head);
+ if (tx_msg == NULL)
+ goto out;
+ else if (tx_msg == TAIL_FULL) {
+ i2400m_tx_skip_tail(i2400m);
+ d_printf(2, dev, "new TX message: tail full, trying head\n");
+ try_head = true;
+ goto try_head;
+ }
+ memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
+ tx_msg->size = I2400M_TX_PLD_SIZE;
+out:
+ i2400m->tx_msg = tx_msg;
+ d_printf(2, dev, "new TX message: %p @%zu\n",
+ tx_msg, (void *) tx_msg - i2400m->tx_buf);
+}
+
+
+/*
+ * Finalize the current TX message header
+ *
+ * Sets the message header to be at the proper location depending on
+ * how many descriptors we have (check documentation at the file's
+ * header for more info on that).
+ *
+ * Appends padding bytes to make sure the whole TX message (counting
+ * from the 'relocated' message header) is aligned to
+ * tx_block_size. We assume the _append() code has left enough space
+ * in the FIFO for that. If there are no payloads, just pass, as it
+ * won't be transferred.
+ *
+ * The amount of padding bytes depends on how many payloads are in the
+ * TX message, as the "msg header and payload descriptors" will be
+ * shifted up in the buffer.
+ */
+static
+void i2400m_tx_close(struct i2400m *i2400m)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+ struct i2400m_msg_hdr *tx_msg_moved;
+ size_t aligned_size, padding, hdr_size;
+ void *pad_buf;
+ unsigned num_pls;
+
+ if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
+ goto out;
+ num_pls = le16_to_cpu(tx_msg->num_pls);
+ /* We can get this situation when a new message was started
+ * and there was no space to add payloads before hitting the
+ tail (and taking padding into consideration). */
+ if (num_pls == 0) {
+ tx_msg->size |= I2400M_TX_SKIP;
+ goto out;
+ }
+ /* Relocate the message header
+ *
+ * Find the current header size, align it to 16 and if we need
+ * to move it so the tail is next to the payloads, move it and
+ * set the offset.
+ *
+ * If it moved, this header is good only for transmission; the
+ * original one (it is kept if we moved) is still used to
+ * figure out where the next TX message starts (and where the
+ * offset to the moved header is).
+ */
+ hdr_size = struct_size(tx_msg, pld, le16_to_cpu(tx_msg->num_pls));
+ hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
+ tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
+ tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+ memmove(tx_msg_moved, tx_msg, hdr_size);
+ tx_msg_moved->size -= tx_msg->offset;
+ /*
+ * Now figure out how much we have to add to the (moved!)
+ * message so the size is a multiple of i2400m->bus_tx_block_size.
+ */
+ aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
+ padding = aligned_size - tx_msg_moved->size;
+ if (padding > 0) {
+ pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
+ if (WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL)) {
+ /* This should not happen -- append should verify
+ * there is always space left at least to append
+ * tx_block_size */
+ dev_err(dev,
+ "SW BUG! Possible data leakage from memory the "
+ "device should not read for padding - "
+ "size %lu aligned_size %zu tx_buf %p in "
+ "%zu out %zu\n",
+ (unsigned long) tx_msg_moved->size,
+ aligned_size, i2400m->tx_buf, i2400m->tx_in,
+ i2400m->tx_out);
+ } else
+ memset(pad_buf, 0xad, padding);
+ }
+ tx_msg_moved->padding = cpu_to_le16(padding);
+ tx_msg_moved->size += padding;
+ if (tx_msg != tx_msg_moved)
+ tx_msg->size += padding;
+out:
+ i2400m->tx_msg = NULL;
+}
+
+
+/**
+ * i2400m_tx - send the data in a buffer to the device
+ *
+ * @buf: pointer to the buffer to transmit
+ *
+ * @buf_len: buffer size
+ *
+ * @pl_type: type of the payload we are sending.
+ *
+ * Returns:
+ * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more
+ * room for the message in the queue).
+ *
+ * Appends the buffer to the TX FIFO and notifies the bus-specific
+ * part of the driver that there is new data ready to transmit.
+ * Once this function returns, the buffer has been copied, so it can
+ * be reused.
+ *
+ * The steps followed to append are explained in detail in the file
+ * header.
+ *
+ * Whenever we write to a message, we increase msg->size, so it
+ * reflects exactly how big the message is. This is needed so that if
+ * we concatenate two messages before they can be sent, the code that
+ * sends the messages can find the boundaries (and it will replace the
+ * size with the real barker before sending).
+ *
+ * Note:
+ *
+ * Cold and warm reset payloads need to be sent as a single
+ * payload, so we handle that.
+ */
+int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
+ enum i2400m_pt pl_type)
+{
+ int result = -ENOSPC;
+ struct device *dev = i2400m_dev(i2400m);
+ unsigned long flags;
+ size_t padded_len;
+ void *ptr;
+ bool try_head = false;
+ unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
+ || pl_type == I2400M_PT_RESET_COLD;
+
+ d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
+ i2400m, buf, buf_len, pl_type);
+ padded_len = ALIGN(buf_len, I2400M_PL_ALIGN);
+ d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
+ /* If there is no current TX message, create one; if the
+ * current one is out of payload slots or we have a singleton,
+ * close it and start a new one */
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ /* If tx_buf is NULL, device is shutdown */
+ if (i2400m->tx_buf == NULL) {
+ result = -ESHUTDOWN;
+ goto error_tx_new;
+ }
+try_new:
+ if (unlikely(i2400m->tx_msg == NULL))
+ i2400m_tx_new(i2400m);
+ else if (unlikely(!i2400m_tx_fits(i2400m)
+ || (is_singleton && i2400m->tx_msg->num_pls != 0))) {
+ d_printf(2, dev, "closing TX message (fits %u singleton "
+ "%u num_pls %u)\n", i2400m_tx_fits(i2400m),
+ is_singleton, i2400m->tx_msg->num_pls);
+ i2400m_tx_close(i2400m);
+ i2400m_tx_new(i2400m);
+ }
+ if (i2400m->tx_msg == NULL)
+ goto error_tx_new;
+ /*
+ * Check if this skb will fit in the TX queue's current active
+ * TX message. The total message size must not exceed the maximum
+ * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
+ * close the current message and push this skb into the new message.
+ */
+ if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
+ d_printf(2, dev, "TX: message too big, going new\n");
+ i2400m_tx_close(i2400m);
+ i2400m_tx_new(i2400m);
+ }
+ if (i2400m->tx_msg == NULL)
+ goto error_tx_new;
+ /* So we have a current message header; now append space for
+ * the message -- if there is not enough, try the head */
+ ptr = i2400m_tx_fifo_push(i2400m, padded_len,
+ i2400m->bus_tx_block_size, try_head);
+ if (ptr == TAIL_FULL) { /* Tail is full, try head */
+ d_printf(2, dev, "pl append: tail full\n");
+ i2400m_tx_close(i2400m);
+ i2400m_tx_skip_tail(i2400m);
+ try_head = true;
+ goto try_new;
+ } else if (ptr == NULL) { /* All full */
+ result = -ENOSPC;
+ d_printf(2, dev, "pl append: all full\n");
+ } else { /* Got space, copy it, set padding */
+ struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
+ unsigned num_pls = le16_to_cpu(tx_msg->num_pls);
+ memcpy(ptr, buf, buf_len);
+ memset(ptr + buf_len, 0xad, padded_len - buf_len);
+ i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type);
+ d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
+ le32_to_cpu(tx_msg->pld[num_pls].val),
+ pl_type, buf_len);
+ tx_msg->num_pls = le16_to_cpu(num_pls+1);
+ tx_msg->size += padded_len;
+ d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
+ padded_len, tx_msg->size, num_pls+1);
+ d_printf(2, dev,
+ "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
+ (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size,
+ num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len);
+ result = 0;
+ if (is_singleton)
+ i2400m_tx_close(i2400m);
+ }
+error_tx_new:
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ /* kick in most cases, except when the TX subsys is down, as
+ * it might free space */
+ if (likely(result != -ESHUTDOWN))
+ i2400m->bus_tx_kick(i2400m);
+ d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
+ i2400m, buf, buf_len, pl_type, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx);
+
+
+/**
+ * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it
+ *
+ * @i2400m: device descriptors
+ * @bus_size: where to place the size of the TX message
+ *
+ * Called by the bus-specific driver to get the first TX message at
+ * the FIF that is ready for transmission.
+ *
+ * It sets the state in @i2400m to indicate the bus-specific driver is
+ * transferring that message (i2400m->tx_msg_size).
+ *
+ * Once the transfer is completed, call i2400m_tx_msg_sent().
+ *
+ * Notes:
+ *
+ * The size of the TX message to be transmitted might be smaller than
+ * that of the TX message in the FIFO (in case the header was
+ * shorter). Hence, we copy it in @bus_size, for the bus layer to
+ * use. We keep the message's size in i2400m->tx_msg_size so that
+ * when the bus later is done transferring we know how much to
+ * advance the fifo.
+ *
+ * We collect statistics here as all the data is available and we
+ * assume it is going to work [see i2400m_tx_msg_sent()].
+ */
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
+ size_t *bus_size)
+{
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400m_msg_hdr *tx_msg, *tx_msg_moved;
+ unsigned long flags, pls;
+
+ d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ tx_msg_moved = NULL;
+ if (i2400m->tx_buf == NULL)
+ goto out_unlock;
+skip:
+ tx_msg_moved = NULL;
+ if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ d_printf(2, dev, "TX: FIFO empty: resetting\n");
+ goto out_unlock;
+ }
+ tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE;
+ if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */
+ d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n",
+ i2400m->tx_out % I2400M_TX_BUF_SIZE,
+ (size_t) tx_msg->size & ~I2400M_TX_SKIP);
+ i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+ goto skip;
+ }
+
+ if (tx_msg->num_pls == 0) { /* No payloads? */
+ if (tx_msg == i2400m->tx_msg) { /* open, we are done */
+ d_printf(2, dev,
+ "TX: FIFO empty: open msg w/o payloads @%zu\n",
+ (void *) tx_msg - i2400m->tx_buf);
+ tx_msg = NULL;
+ goto out_unlock;
+ } else { /* closed, skip it */
+ d_printf(2, dev,
+ "TX: skip msg w/o payloads @%zu (%zu b)\n",
+ (void *) tx_msg - i2400m->tx_buf,
+ (size_t) tx_msg->size);
+ i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
+ goto skip;
+ }
+ }
+ if (tx_msg == i2400m->tx_msg) /* open msg? */
+ i2400m_tx_close(i2400m);
+
+ /* Now we have a valid TX message (with payloads) to TX */
+ tx_msg_moved = (void *) tx_msg + tx_msg->offset;
+ i2400m->tx_msg_size = tx_msg->size;
+ *bus_size = tx_msg_moved->size;
+ d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu "
+ "size %zu bus_size %zu\n",
+ current->pid, (void *) tx_msg - i2400m->tx_buf,
+ (size_t) tx_msg->offset, (size_t) tx_msg->size,
+ (size_t) tx_msg_moved->size);
+ tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
+ tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
+
+ pls = le32_to_cpu(tx_msg_moved->num_pls);
+ i2400m->tx_pl_num += pls; /* Update stats */
+ if (pls > i2400m->tx_pl_max)
+ i2400m->tx_pl_max = pls;
+ if (pls < i2400m->tx_pl_min)
+ i2400m->tx_pl_min = pls;
+ i2400m->tx_num++;
+ i2400m->tx_size_acc += *bus_size;
+ if (*bus_size < i2400m->tx_size_min)
+ i2400m->tx_size_min = *bus_size;
+ if (*bus_size > i2400m->tx_size_max)
+ i2400m->tx_size_max = *bus_size;
+out_unlock:
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n",
+ i2400m, bus_size, *bus_size, tx_msg_moved);
+ return tx_msg_moved;
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_get);
+
+
+/**
+ * i2400m_tx_msg_sent - indicate the transmission of a TX message
+ *
+ * @i2400m: device descriptor
+ *
+ * Called by the bus-specific driver when a message has been sent;
+ * this pops it from the FIFO; and as there is space, start the queue
+ * in case it was stopped.
+ *
+ * Should be called even if the message send failed and we are
+ * dropping this TX message.
+ */
+void i2400m_tx_msg_sent(struct i2400m *i2400m)
+{
+ unsigned n;
+ unsigned long flags;
+ struct device *dev = i2400m_dev(i2400m);
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ if (i2400m->tx_buf == NULL)
+ goto out_unlock;
+ i2400m->tx_out += i2400m->tx_msg_size;
+ d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
+ i2400m->tx_msg_size = 0;
+ BUG_ON(i2400m->tx_out > i2400m->tx_in);
+ /* level them FIFO markers off */
+ n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
+ i2400m->tx_out %= I2400M_TX_BUF_SIZE;
+ i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
+out_unlock:
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
+
+
+/**
+ * i2400m_tx_setup - Initialize the TX queue and infrastructure
+ *
+ * Make sure we reset the TX sequence to zero, as when this function
+ * is called, the firmware has been just restarted. Same rational
+ * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
+ * the memory for TX queue is reallocated.
+ */
+int i2400m_tx_setup(struct i2400m *i2400m)
+{
+ int result = 0;
+ void *tx_buf;
+ unsigned long flags;
+
+ /* Do this here only once -- can't do on
+ * i2400m_hard_start_xmit() as we'll cause race conditions if
+ * the WS was scheduled on another CPU */
+ INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
+
+ tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
+ if (tx_buf == NULL) {
+ result = -ENOMEM;
+ goto error_kmalloc;
+ }
+
+ /*
+ * Fail the build if we can't fit at least two maximum size messages
+ * on the TX FIFO [one being delivered while one is constructed].
+ */
+ BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400m->tx_sequence = 0;
+ i2400m->tx_in = 0;
+ i2400m->tx_out = 0;
+ i2400m->tx_msg_size = 0;
+ i2400m->tx_msg = NULL;
+ i2400m->tx_buf = tx_buf;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ /* Huh? the bus layer has to define this... */
+ BUG_ON(i2400m->bus_tx_block_size == 0);
+error_kmalloc:
+ return result;
+
+}
+
+
+/**
+ * i2400m_tx_release - Tear down the TX queue and infrastructure
+ */
+void i2400m_tx_release(struct i2400m *i2400m)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ kfree(i2400m->tx_buf);
+ i2400m->tx_buf = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+}
diff --git a/drivers/staging/wimax/i2400m/usb-debug-levels.h b/drivers/staging/wimax/i2400m/usb-debug-levels.h
new file mode 100644
index 000000000000..8fd0111560f6
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb-debug-levels.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Debug levels control file for the i2400m-usb module
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME i2400m_usb
+#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
+
+#include "../linux-wimax-debug.h"
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(usb),
+ D_SUBMODULE_DECLARE(fw),
+ D_SUBMODULE_DECLARE(notif),
+ D_SUBMODULE_DECLARE(rx),
+ D_SUBMODULE_DECLARE(tx),
+};
+
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/staging/wimax/i2400m/usb-fw.c b/drivers/staging/wimax/i2400m/usb-fw.c
new file mode 100644
index 000000000000..27ab233650d5
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb-fw.c
@@ -0,0 +1,365 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Firmware uploader's USB specifics
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - bus generic/specific split
+ *
+ * THE PROCEDURE
+ *
+ * See fw.c for the generic description of this procedure.
+ *
+ * This file implements only the USB specifics. It boils down to how
+ * to send a command and waiting for an acknowledgement from the
+ * device.
+ *
+ * This code (and process) is single threaded. It assumes it is the
+ * only thread poking around (guaranteed by fw.c).
+ *
+ * COMMAND EXECUTION
+ *
+ * A write URB is posted with the buffer to the bulk output endpoint.
+ *
+ * ACK RECEPTION
+ *
+ * We just post a URB to the notification endpoint and wait for
+ * data. We repeat until we get all the data we expect (as indicated
+ * by the call from the bus generic code).
+ *
+ * The data is not read from the bulk in endpoint for boot mode.
+ *
+ * ROADMAP
+ *
+ * i2400mu_bus_bm_cmd_send
+ * i2400m_bm_cmd_prepare...
+ * i2400mu_tx_bulk_out
+ *
+ * i2400mu_bus_bm_wait_for_ack
+ * i2400m_notif_submit
+ */
+#include <linux/usb.h>
+#include <linux/gfp.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE fw
+#include "usb-debug-levels.h"
+
+
+/*
+ * Synchronous write to the device
+ *
+ * Takes care of updating EDC counts and thus, handle device errors.
+ */
+static
+ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
+{
+ int result;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int len;
+ struct usb_endpoint_descriptor *epd;
+ int pipe, do_autopm = 1;
+
+ result = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (result < 0) {
+ dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
+ pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200);
+ switch (result) {
+ case 0:
+ if (len != buf_size) {
+ dev_err(dev, "BM-CMD: short write (%u B vs %zu "
+ "expected)\n", len, buf_size);
+ result = -EIO;
+ break;
+ }
+ result = len;
+ break;
+ case -EPIPE:
+ /*
+ * Stall -- maybe the device is choking with our
+ * requests. Clear it and give it some time. If they
+ * happen to often, it might be another symptom, so we
+ * reset.
+ *
+ * No error handling for usb_clear_halt(0; if it
+ * works, the retry works; if it fails, this switch
+ * does the error handling for us.
+ */
+ if (edc_inc(&i2400mu->urb_edc,
+ 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "BM-CMD: too many stalls in "
+ "URB; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ } else {
+ usb_clear_halt(i2400mu->usb_dev, pipe);
+ msleep(10); /* give the device some time */
+ goto retry;
+ }
+ fallthrough;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ break;
+ case -ETIMEDOUT: /* bah... */
+ break;
+ default: /* any other? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "BM-CMD: maximum errors in "
+ "URB exceeded; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ result = -ENODEV;
+ break;
+ }
+ dev_err(dev, "BM-CMD: URB error %d, retrying\n",
+ result);
+ goto retry;
+ }
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ return result;
+}
+
+
+/*
+ * Send a boot-mode command over the bulk-out pipe
+ *
+ * Command can be a raw command, which requires no preparation (and
+ * which might not even be following the command format). Checks that
+ * the right amount of data was transferred.
+ *
+ * To satisfy USB requirements (no onstack, vmalloc or in data segment
+ * buffers), we copy the command to i2400m->bm_cmd_buf and send it from
+ * there.
+ *
+ * @flags: pass thru from i2400m_bm_cmd()
+ * @return: cmd_size if ok, < 0 errno code on error.
+ */
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
+ const struct i2400m_bootrom_header *_cmd,
+ size_t cmd_size, int flags)
+{
+ ssize_t result;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
+ struct i2400m_bootrom_header *cmd;
+ size_t cmd_size_a = ALIGN(cmd_size, 16); /* USB restriction */
+
+ d_fnstart(8, dev, "(i2400m %p cmd %p size %zu)\n",
+ i2400m, _cmd, cmd_size);
+ result = -E2BIG;
+ if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
+ goto error_too_big;
+ if (_cmd != i2400m->bm_cmd_buf)
+ memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
+ cmd = i2400m->bm_cmd_buf;
+ if (cmd_size_a > cmd_size) /* Zero pad space */
+ memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
+ if ((flags & I2400M_BM_CMD_RAW) == 0) {
+ if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
+ dev_warn(dev, "SW BUG: response_required == 0\n");
+ i2400m_bm_cmd_prepare(cmd);
+ }
+ result = i2400mu_tx_bulk_out(i2400mu, i2400m->bm_cmd_buf, cmd_size);
+ if (result < 0) {
+ dev_err(dev, "boot-mode cmd %d: cannot send: %zd\n",
+ opcode, result);
+ goto error_cmd_send;
+ }
+ if (result != cmd_size) { /* all was transferred? */
+ dev_err(dev, "boot-mode cmd %d: incomplete transfer "
+ "(%zd vs %zu submitted)\n", opcode, result, cmd_size);
+ result = -EIO;
+ goto error_cmd_size;
+ }
+error_cmd_size:
+error_cmd_send:
+error_too_big:
+ d_fnend(8, dev, "(i2400m %p cmd %p size %zu) = %zd\n",
+ i2400m, _cmd, cmd_size, result);
+ return result;
+}
+
+
+static
+void __i2400mu_bm_notif_cb(struct urb *urb)
+{
+ complete(urb->context);
+}
+
+
+/*
+ * submit a read to the notification endpoint
+ *
+ * @i2400m: device descriptor
+ * @urb: urb to use
+ * @completion: completion variable to complete when done
+ *
+ * Data is always read to i2400m->bm_ack_buf
+ */
+static
+int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
+ struct completion *completion)
+{
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct usb_endpoint_descriptor *epd;
+ int pipe;
+
+ epd = usb_get_epd(i2400mu->usb_iface,
+ i2400mu->endpoint_cfg.notification);
+ pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
+ i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
+ __i2400mu_bm_notif_cb, completion,
+ epd->bInterval);
+ return usb_submit_urb(urb, GFP_KERNEL);
+}
+
+
+/*
+ * Read an ack from the notification endpoint
+ *
+ * @i2400m:
+ * @_ack: pointer to where to store the read data
+ * @ack_size: how many bytes we should read
+ *
+ * Returns: < 0 errno code on error; otherwise, amount of received bytes.
+ *
+ * Submits a notification read, appends the read data to the given ack
+ * buffer and then repeats (until @ack_size bytes have been
+ * received).
+ */
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
+ struct i2400m_bootrom_header *_ack,
+ size_t ack_size)
+{
+ ssize_t result = -ENOMEM;
+ struct device *dev = i2400m_dev(i2400m);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct urb notif_urb;
+ void *ack = _ack;
+ size_t offset, len;
+ long val;
+ int do_autopm = 1;
+ DECLARE_COMPLETION_ONSTACK(notif_completion);
+
+ d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
+ i2400m, ack, ack_size);
+ BUG_ON(_ack == i2400m->bm_ack_buf);
+ result = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (result < 0) {
+ dev_err(dev, "BM-ACK: can't get autopm: %d\n", (int) result);
+ do_autopm = 0;
+ }
+ usb_init_urb(&notif_urb); /* ready notifications */
+ usb_get_urb(&notif_urb);
+ offset = 0;
+ while (offset < ack_size) {
+ init_completion(&notif_completion);
+ result = i2400mu_notif_submit(i2400mu, &notif_urb,
+ &notif_completion);
+ if (result < 0)
+ goto error_notif_urb_submit;
+ val = wait_for_completion_interruptible_timeout(
+ &notif_completion, HZ);
+ if (val == 0) {
+ result = -ETIMEDOUT;
+ usb_kill_urb(&notif_urb); /* Timedout */
+ goto error_notif_wait;
+ }
+ if (val == -ERESTARTSYS) {
+ result = -EINTR; /* Interrupted */
+ usb_kill_urb(&notif_urb);
+ goto error_notif_wait;
+ }
+ result = notif_urb.status; /* How was the ack? */
+ switch (result) {
+ case 0:
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ goto error_dev_gone;
+ default: /* any other? */
+ usb_kill_urb(&notif_urb); /* Timedout */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ dev_err(dev, "BM-ACK: URB error %d, "
+ "retrying\n", notif_urb.status);
+ continue; /* retry */
+ }
+ if (notif_urb.actual_length == 0) {
+ d_printf(6, dev, "ZLP received, retrying\n");
+ continue;
+ }
+ /* Got data, append it to the buffer */
+ len = min(ack_size - offset, (size_t) notif_urb.actual_length);
+ memcpy(ack + offset, i2400m->bm_ack_buf, len);
+ offset += len;
+ }
+ result = offset;
+error_notif_urb_submit:
+error_notif_wait:
+error_dev_gone:
+out:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
+ i2400m, ack, ack_size, (long) result);
+ usb_put_urb(&notif_urb);
+ return result;
+
+error_exceeded:
+ dev_err(dev, "bm: maximum errors in notification URB exceeded; "
+ "resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ goto out;
+}
diff --git a/drivers/staging/wimax/i2400m/usb-notif.c b/drivers/staging/wimax/i2400m/usb-notif.c
new file mode 100644
index 000000000000..5d429f816125
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb-notif.c
@@ -0,0 +1,258 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m over USB
+ * Notification handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * The notification endpoint is active when the device is not in boot
+ * mode; in here we just read and get notifications; based on those,
+ * we act to either reinitialize the device after a reboot or to
+ * submit a RX request.
+ *
+ * ROADMAP
+ *
+ * i2400mu_usb_notification_setup()
+ *
+ * i2400mu_usb_notification_release()
+ *
+ * i2400mu_usb_notification_cb() Called when a URB is ready
+ * i2400mu_notif_grok()
+ * i2400m_is_boot_barker()
+ * i2400m_dev_reset_handle()
+ * i2400mu_rx_kick()
+ */
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE notif
+#include "usb-debug-levels.h"
+
+
+static const
+__le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 };
+
+
+/*
+ * Process a received notification
+ *
+ * In normal operation mode, we can only receive two types of payloads
+ * on the notification endpoint:
+ *
+ * - a reboot barker, we do a bootstrap (the device has reseted).
+ *
+ * - a block of zeroes: there is pending data in the IN endpoint
+ */
+static
+int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
+ size_t buf_len)
+{
+ int ret;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
+ i2400mu, buf, buf_len);
+ ret = -EIO;
+ if (buf_len < sizeof(i2400m_ZERO_BARKER))
+ /* Not a bug, just ignore */
+ goto error_bad_size;
+ ret = 0;
+ if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
+ i2400mu_rx_kick(i2400mu);
+ goto out;
+ }
+ ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
+ if (unlikely(ret >= 0))
+ ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
+ else /* Unknown or unexpected data in the notif message */
+ i2400m_unknown_barker(i2400m, buf, buf_len);
+error_bad_size:
+out:
+ d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
+ i2400mu, buf, buf_len, ret);
+ return ret;
+}
+
+
+/*
+ * URB callback for the notification endpoint
+ *
+ * @urb: the urb received from the notification endpoint
+ *
+ * This function will just process the USB side of the transaction,
+ * checking everything is fine, pass the processing to
+ * i2400m_notification_grok() and resubmit the URB.
+ */
+static
+void i2400mu_notification_cb(struct urb *urb)
+{
+ int ret;
+ struct i2400mu *i2400mu = urb->context;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n",
+ urb, urb->status, urb->actual_length);
+ ret = urb->status;
+ switch (ret) {
+ case 0:
+ ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer,
+ urb->actual_length);
+ if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ if (ret == -ENOMEM) /* uff...power cycle? shutdown? */
+ goto error_exceeded;
+ break;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* ditto */
+ case -ESHUTDOWN: /* URB killed */
+ case -ECONNRESET: /* disconnection */
+ goto out; /* Notify around */
+ default: /* Some error? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_exceeded;
+ dev_err(dev, "notification: URB error %d, retrying\n",
+ urb->status);
+ }
+ usb_mark_last_busy(i2400mu->usb_dev);
+ ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC);
+ switch (ret) {
+ case 0:
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* ditto */
+ case -ESHUTDOWN: /* URB killed */
+ case -ECONNRESET: /* disconnection */
+ break; /* just ignore */
+ default: /* Some error? */
+ dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+ goto error_submit;
+ }
+ d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+ urb, urb->status, urb->actual_length);
+ return;
+
+error_exceeded:
+ dev_err(dev, "maximum errors in notification URB exceeded; "
+ "resetting device\n");
+error_submit:
+ usb_queue_reset_device(i2400mu->usb_iface);
+out:
+ d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
+ urb, urb->status, urb->actual_length);
+}
+
+
+/*
+ * setup the notification endpoint
+ *
+ * @i2400m: device descriptor
+ *
+ * This procedure prepares the notification urb and handler for receiving
+ * unsolicited barkers from the device.
+ */
+int i2400mu_notification_setup(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, ret = 0;
+ struct usb_endpoint_descriptor *epd;
+ char *buf;
+
+ d_fnstart(4, dev, "(i2400m %p)\n", i2400mu);
+ buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto error_buf_alloc;
+ }
+
+ i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!i2400mu->notif_urb) {
+ ret = -ENOMEM;
+ goto error_alloc_urb;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface,
+ i2400mu->endpoint_cfg.notification);
+ usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
+ buf, I2400MU_MAX_NOTIFICATION_LEN,
+ i2400mu_notification_cb, i2400mu, epd->bInterval);
+ ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL);
+ if (ret != 0) {
+ dev_err(dev, "notification: cannot submit URB: %d\n", ret);
+ goto error_submit;
+ }
+ d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+ return ret;
+
+error_submit:
+ usb_free_urb(i2400mu->notif_urb);
+error_alloc_urb:
+ kfree(buf);
+error_buf_alloc:
+ d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret);
+ return ret;
+}
+
+
+/*
+ * Tear down of the notification mechanism
+ *
+ * @i2400m: device descriptor
+ *
+ * Kill the interrupt endpoint urb, free any allocated resources.
+ *
+ * We need to check if we have done it before as for example,
+ * _suspend() call this; if after a suspend() we get a _disconnect()
+ * (as the case is when hibernating), nothing bad happens.
+ */
+void i2400mu_notification_release(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ if (i2400mu->notif_urb != NULL) {
+ usb_kill_urb(i2400mu->notif_urb);
+ kfree(i2400mu->notif_urb->transfer_buffer);
+ usb_free_urb(i2400mu->notif_urb);
+ i2400mu->notif_urb = NULL;
+ }
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+}
diff --git a/drivers/staging/wimax/i2400m/usb-rx.c b/drivers/staging/wimax/i2400m/usb-rx.c
new file mode 100644
index 000000000000..5b64bda7d9e7
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb-rx.c
@@ -0,0 +1,462 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB RX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Use skb_clone(), break up processing in chunks
+ * - Split transport/device specific
+ * - Make buffer size dynamic to exert less memory pressure
+ *
+ *
+ * This handles the RX path on USB.
+ *
+ * When a notification is received that says 'there is RX data ready',
+ * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
+ * reads a buffer from USB and passes it to i2400m_rx() in the generic
+ * handling code. The RX buffer has an specific format that is
+ * described in rx.c.
+ *
+ * We use a kernel thread in a loop because:
+ *
+ * - we want to be able to call the USB power management get/put
+ * functions (blocking) before each transaction.
+ *
+ * - We might get a lot of notifications and we don't want to submit
+ * a zillion reads; by serializing, we are throttling.
+ *
+ * - RX data processing can get heavy enough so that it is not
+ * appropriate for doing it in the USB callback; thus we run it in a
+ * process context.
+ *
+ * We provide a read buffer of an arbitrary size (short of a page); if
+ * the callback reports -EOVERFLOW, it means it was too small, so we
+ * just double the size and retry (being careful to append, as
+ * sometimes the device provided some data). Every now and then we
+ * check if the average packet size is smaller than the current packet
+ * size and if so, we halve it. At the end, the size of the
+ * preallocated buffer should be following the average received
+ * transaction size, adapting dynamically to it.
+ *
+ * ROADMAP
+ *
+ * i2400mu_rx_kick() Called from notif.c when we get a
+ * 'data ready' notification
+ * i2400mu_rxd() Kernel RX daemon
+ * i2400mu_rx() Receive USB data
+ * i2400m_rx() Send data to generic i2400m RX handling
+ *
+ * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
+ *
+ * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
+ */
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE rx
+#include "usb-debug-levels.h"
+
+/*
+ * Dynamic RX size
+ *
+ * We can't let the rx_size be a multiple of 512 bytes (the RX
+ * endpoint's max packet size). On some USB host controllers (we
+ * haven't been able to fully characterize which), if the device is
+ * about to send (for example) X bytes and we only post a buffer to
+ * receive n*512, it will fail to mark that as babble (so that
+ * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
+ * rest).
+ *
+ * So on growing or shrinking, if it is a multiple of the
+ * maxpacketsize, we remove some (instead of incresing some, so in a
+ * buddy allocator we try to waste less space).
+ *
+ * Note we also need a hook for this on i2400mu_rx() -- when we do the
+ * first read, we are sure we won't hit this spot because
+ * i240mm->rx_size has been set properly. However, if we have to
+ * double because of -EOVERFLOW, when we launch the read to get the
+ * rest of the data, we *have* to make sure that also is not a
+ * multiple of the max_pkt_size.
+ */
+
+static
+size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
+{
+ struct device *dev = &i2400mu->usb_iface->dev;
+ size_t rx_size;
+ const size_t max_pkt_size = 512;
+
+ rx_size = 2 * i2400mu->rx_size;
+ if (rx_size % max_pkt_size == 0) {
+ rx_size -= 8;
+ d_printf(1, dev,
+ "RX: expected size grew to %zu [adjusted -8] "
+ "from %zu\n",
+ rx_size, i2400mu->rx_size);
+ } else
+ d_printf(1, dev,
+ "RX: expected size grew to %zu from %zu\n",
+ rx_size, i2400mu->rx_size);
+ return rx_size;
+}
+
+
+static
+void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
+{
+ const size_t max_pkt_size = 512;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ if (unlikely(i2400mu->rx_size_cnt >= 100
+ && i2400mu->rx_size_auto_shrink)) {
+ size_t avg_rx_size =
+ i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
+ size_t new_rx_size = i2400mu->rx_size / 2;
+ if (avg_rx_size < new_rx_size) {
+ if (new_rx_size % max_pkt_size == 0) {
+ new_rx_size -= 8;
+ d_printf(1, dev,
+ "RX: expected size shrank to %zu "
+ "[adjusted -8] from %zu\n",
+ new_rx_size, i2400mu->rx_size);
+ } else
+ d_printf(1, dev,
+ "RX: expected size shrank to %zu "
+ "from %zu\n",
+ new_rx_size, i2400mu->rx_size);
+ i2400mu->rx_size = new_rx_size;
+ i2400mu->rx_size_cnt = 0;
+ i2400mu->rx_size_acc = i2400mu->rx_size;
+ }
+ }
+}
+
+/*
+ * Receive a message with payloads from the USB bus into an skb
+ *
+ * @i2400mu: USB device descriptor
+ * @rx_skb: skb where to place the received message
+ *
+ * Deals with all the USB-specifics of receiving, dynamically
+ * increasing the buffer size if so needed. Returns the payload in the
+ * skb, ready to process. On a zero-length packet, we retry.
+ *
+ * On soft USB errors, we retry (until they become too frequent and
+ * then are promoted to hard); on hard USB errors, we reset the
+ * device. On other errors (skb realloacation, we just drop it and
+ * hope for the next invocation to solve it).
+ *
+ * Returns: pointer to the skb if ok, ERR_PTR on error.
+ * NOTE: this function might realloc the skb (if it is too small),
+ * so always update with the one returned.
+ * ERR_PTR() is < 0 on error.
+ * Will return NULL if it cannot reallocate -- this can be
+ * considered a transient retryable error.
+ */
+static
+struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
+{
+ int result = 0;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, read_size, rx_size, do_autopm;
+ struct usb_endpoint_descriptor *epd;
+ const size_t max_pkt_size = 512;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ do_autopm = atomic_read(&i2400mu->do_autopm);
+ result = do_autopm ?
+ usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+ if (result < 0) {
+ dev_err(dev, "RX: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
+ usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
+ if (unlikely(rx_size % max_pkt_size == 0)) {
+ rx_size -= 8;
+ d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
+ }
+ result = usb_bulk_msg(
+ i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
+ rx_size, &read_size, 200);
+ usb_mark_last_busy(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ if (read_size == 0)
+ goto retry; /* ZLP, just resubmit */
+ skb_put(rx_skb, read_size);
+ break;
+ case -EPIPE:
+ /*
+ * Stall -- maybe the device is choking with our
+ * requests. Clear it and give it some time. If they
+ * happen to often, it might be another symptom, so we
+ * reset.
+ *
+ * No error handling for usb_clear_halt(0; if it
+ * works, the retry works; if it fails, this switch
+ * does the error handling for us.
+ */
+ if (edc_inc(&i2400mu->urb_edc,
+ 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "BM-CMD: too many stalls in "
+ "URB; resetting device\n");
+ goto do_reset;
+ }
+ usb_clear_halt(i2400mu->usb_dev, usb_pipe);
+ msleep(10); /* give the device some time */
+ goto retry;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ break;
+ case -EOVERFLOW: { /* too small, reallocate */
+ struct sk_buff *new_skb;
+ rx_size = i2400mu_rx_size_grow(i2400mu);
+ if (rx_size <= (1 << 16)) /* cap it */
+ i2400mu->rx_size = rx_size;
+ else if (printk_ratelimit()) {
+ dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
+ result = -EINVAL;
+ goto out;
+ }
+ skb_put(rx_skb, read_size);
+ new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
+ GFP_KERNEL);
+ if (new_skb == NULL) {
+ kfree_skb(rx_skb);
+ rx_skb = NULL;
+ goto out; /* drop it...*/
+ }
+ kfree_skb(rx_skb);
+ rx_skb = new_skb;
+ i2400mu->rx_size_cnt = 0;
+ i2400mu->rx_size_acc = i2400mu->rx_size;
+ d_printf(1, dev, "RX: size changed to %d, received %d, "
+ "copied %d, capacity %ld\n",
+ rx_size, read_size, rx_skb->len,
+ (long) skb_end_offset(new_skb));
+ goto retry;
+ }
+ /* In most cases, it happens due to the hardware scheduling a
+ * read when there was no data - unfortunately, we have no way
+ * to tell this timeout from a USB timeout. So we just ignore
+ * it. */
+ case -ETIMEDOUT:
+ dev_err(dev, "RX: timeout: %d\n", result);
+ result = 0;
+ break;
+ default: /* Any error */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
+ goto error_reset;
+ dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
+ goto retry;
+ }
+out:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
+ return rx_skb;
+
+error_reset:
+ dev_err(dev, "RX: maximum errors in URB exceeded; "
+ "resetting device\n");
+do_reset:
+ usb_queue_reset_device(i2400mu->usb_iface);
+ rx_skb = ERR_PTR(result);
+ goto out;
+}
+
+
+/*
+ * Kernel thread for USB reception of data
+ *
+ * This thread waits for a kick; once kicked, it will allocate an skb
+ * and receive a single message to it from USB (using
+ * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
+ * code for processing.
+ *
+ * When done processing, it runs some dirty statistics to verify if
+ * the last 100 messages received were smaller than half of the
+ * current RX buffer size. In that case, the RX buffer size is
+ * halved. This will helps lowering the pressure on the memory
+ * allocator.
+ *
+ * Hard errors force the thread to exit.
+ */
+static
+int i2400mu_rxd(void *_i2400mu)
+{
+ int result = 0;
+ struct i2400mu *i2400mu = _i2400mu;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ size_t pending;
+ int rx_size;
+ struct sk_buff *rx_skb;
+ unsigned long flags;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ BUG_ON(i2400mu->rx_kthread != NULL);
+ i2400mu->rx_kthread = current;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ while (1) {
+ d_printf(2, dev, "RX: waiting for messages\n");
+ pending = 0;
+ wait_event_interruptible(
+ i2400mu->rx_wq,
+ (kthread_should_stop() /* check this first! */
+ || (pending = atomic_read(&i2400mu->rx_pending_count)))
+ );
+ if (kthread_should_stop())
+ break;
+ if (pending == 0)
+ continue;
+ rx_size = i2400mu->rx_size;
+ d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
+ rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
+ if (rx_skb == NULL) {
+ dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
+ rx_size);
+ msleep(50); /* give it some time? */
+ continue;
+ }
+
+ /* Receive the message with the payloads */
+ rx_skb = i2400mu_rx(i2400mu, rx_skb);
+ result = PTR_ERR(rx_skb);
+ if (IS_ERR(rx_skb))
+ goto out;
+ atomic_dec(&i2400mu->rx_pending_count);
+ if (rx_skb == NULL || rx_skb->len == 0) {
+ /* some "ignorable" condition */
+ kfree_skb(rx_skb);
+ continue;
+ }
+
+ /* Deliver the message to the generic i2400m code */
+ i2400mu->rx_size_cnt++;
+ i2400mu->rx_size_acc += rx_skb->len;
+ result = i2400m_rx(i2400m, rx_skb);
+ if (result == -EIO
+ && edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ goto error_reset;
+ }
+
+ /* Maybe adjust RX buffer size */
+ i2400mu_rx_size_maybe_shrink(i2400mu);
+ }
+ result = 0;
+out:
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ i2400mu->rx_kthread = NULL;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
+ return result;
+
+error_reset:
+ dev_err(dev, "RX: maximum errors in received buffer exceeded; "
+ "resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ goto out;
+}
+
+
+/*
+ * Start reading from the device
+ *
+ * @i2400m: device instance
+ *
+ * Notify the RX thread that there is data pending.
+ */
+void i2400mu_rx_kick(struct i2400mu *i2400mu)
+{
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
+ atomic_inc(&i2400mu->rx_pending_count);
+ wake_up_all(&i2400mu->rx_wq);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_rx_setup(struct i2400mu *i2400mu)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct task_struct *kthread;
+
+ kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
+ wimax_dev->name);
+ /* the kthread function sets i2400mu->rx_thread */
+ if (IS_ERR(kthread)) {
+ result = PTR_ERR(kthread);
+ dev_err(dev, "RX: cannot start thread: %d\n", result);
+ }
+ return result;
+}
+
+
+void i2400mu_rx_release(struct i2400mu *i2400mu)
+{
+ unsigned long flags;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = i2400m_dev(i2400m);
+ struct task_struct *kthread;
+
+ spin_lock_irqsave(&i2400m->rx_lock, flags);
+ kthread = i2400mu->rx_kthread;
+ i2400mu->rx_kthread = NULL;
+ spin_unlock_irqrestore(&i2400m->rx_lock, flags);
+ if (kthread)
+ kthread_stop(kthread);
+ else
+ d_printf(1, dev, "RX: kthread had already exited\n");
+}
+
diff --git a/drivers/staging/wimax/i2400m/usb-tx.c b/drivers/staging/wimax/i2400m/usb-tx.c
new file mode 100644
index 000000000000..3ba9d70cca1b
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb-tx.c
@@ -0,0 +1,273 @@
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * USB specific TX handling
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ * - Initial implementation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Split transport/device specific
+ *
+ *
+ * Takes the TX messages in the i2400m's driver TX FIFO and sends them
+ * to the device until there are no more.
+ *
+ * If we fail sending the message, we just drop it. There isn't much
+ * we can do at this point. We could also retry, but the USB stack has
+ * already retried and still failed, so there is not much of a
+ * point. As well, most of the traffic is network, which has recovery
+ * methods for dropped packets.
+ *
+ * For sending we just obtain a FIFO buffer to send, send it to the
+ * USB bulk out, tell the TX FIFO code we have sent it; query for
+ * another one, etc... until done.
+ *
+ * We use a thread so we can call usb_autopm_enable() and
+ * usb_autopm_disable() for each transaction; this way when the device
+ * goes idle, it will suspend. It also has less overhead than a
+ * dedicated workqueue, as it is being used for a single task.
+ *
+ * ROADMAP
+ *
+ * i2400mu_tx_setup()
+ * i2400mu_tx_release()
+ *
+ * i2400mu_bus_tx_kick() - Called by the tx.c code when there
+ * is new data in the FIFO.
+ * i2400mu_txd()
+ * i2400m_tx_msg_get()
+ * i2400m_tx_msg_sent()
+ */
+#include "i2400m-usb.h"
+
+
+#define D_SUBMODULE tx
+#include "usb-debug-levels.h"
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note that any iteration consumes a message to be sent, no matter if
+ * it succeeds or fails (we have no real way to retry or complain).
+ *
+ * Return: 0 if ok, < 0 errno code on hard error.
+ */
+static
+int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
+ size_t tx_msg_size)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ int usb_pipe, sent_size, do_autopm;
+ struct usb_endpoint_descriptor *epd;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+ do_autopm = atomic_read(&i2400mu->do_autopm);
+ result = do_autopm ?
+ usb_autopm_get_interface(i2400mu->usb_iface) : 0;
+ if (result < 0) {
+ dev_err(dev, "TX: can't get autopm: %d\n", result);
+ do_autopm = 0;
+ }
+ epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
+ usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+retry:
+ result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
+ tx_msg, tx_msg_size, &sent_size, 200);
+ usb_mark_last_busy(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ if (sent_size != tx_msg_size) { /* Too short? drop it */
+ dev_err(dev, "TX: short write (%d B vs %zu "
+ "expected)\n", sent_size, tx_msg_size);
+ result = -EIO;
+ }
+ break;
+ case -EPIPE:
+ /*
+ * Stall -- maybe the device is choking with our
+ * requests. Clear it and give it some time. If they
+ * happen to often, it might be another symptom, so we
+ * reset.
+ *
+ * No error handling for usb_clear_halt(0; if it
+ * works, the retry works; if it fails, this switch
+ * does the error handling for us.
+ */
+ if (edc_inc(&i2400mu->urb_edc,
+ 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "BM-CMD: too many stalls in "
+ "URB; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ } else {
+ usb_clear_halt(i2400mu->usb_dev, usb_pipe);
+ msleep(10); /* give the device some time */
+ goto retry;
+ }
+ fallthrough;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ result = -ESHUTDOWN;
+ break;
+ default: /* Some error? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "TX: maximum errors in URB "
+ "exceeded; resetting device\n");
+ usb_queue_reset_device(i2400mu->usb_iface);
+ } else {
+ dev_err(dev, "TX: cannot send URB; retrying. "
+ "tx_msg @%zu %zu B [%d sent]: %d\n",
+ (void *) tx_msg - i2400m->tx_buf,
+ tx_msg_size, sent_size, result);
+ goto retry;
+ }
+ }
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu);
+ return result;
+}
+
+
+/*
+ * Get the next TX message in the TX FIFO and send it to the device
+ *
+ * Note we exit the loop if i2400mu_tx() fails; that function only
+ * fails on hard error (failing to tx a buffer not being one of them,
+ * see its doc).
+ *
+ * Return: 0
+ */
+static
+int i2400mu_txd(void *_i2400mu)
+{
+ struct i2400mu *i2400mu = _i2400mu;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct i2400m_msg_hdr *tx_msg;
+ size_t tx_msg_size;
+ unsigned long flags;
+
+ d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ BUG_ON(i2400mu->tx_kthread != NULL);
+ i2400mu->tx_kthread = current;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ while (1) {
+ d_printf(2, dev, "TX: waiting for messages\n");
+ tx_msg = NULL;
+ wait_event_interruptible(
+ i2400mu->tx_wq,
+ (kthread_should_stop() /* check this first! */
+ || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size)))
+ );
+ if (kthread_should_stop())
+ break;
+ WARN_ON(tx_msg == NULL); /* should not happen...*/
+ d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
+ d_dump(5, dev, tx_msg, tx_msg_size);
+ /* Yeah, we ignore errors ... not much we can do */
+ i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
+ i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
+ }
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ i2400mu->tx_kthread = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+ return 0;
+}
+
+
+/*
+ * i2400m TX engine notifies us that there is data in the FIFO ready
+ * for TX
+ *
+ * If there is a URB in flight, don't do anything; when it finishes,
+ * it will see there is data in the FIFO and send it. Else, just
+ * submit a write.
+ */
+void i2400mu_bus_tx_kick(struct i2400m *i2400m)
+{
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
+ wake_up_all(&i2400mu->tx_wq);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+int i2400mu_tx_setup(struct i2400mu *i2400mu)
+{
+ int result = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
+ struct task_struct *kthread;
+
+ kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
+ wimax_dev->name);
+ /* the kthread function sets i2400mu->tx_thread */
+ if (IS_ERR(kthread)) {
+ result = PTR_ERR(kthread);
+ dev_err(dev, "TX: cannot start thread: %d\n", result);
+ }
+ return result;
+}
+
+void i2400mu_tx_release(struct i2400mu *i2400mu)
+{
+ unsigned long flags;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct device *dev = i2400m_dev(i2400m);
+ struct task_struct *kthread;
+
+ spin_lock_irqsave(&i2400m->tx_lock, flags);
+ kthread = i2400mu->tx_kthread;
+ i2400mu->tx_kthread = NULL;
+ spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+ if (kthread)
+ kthread_stop(kthread);
+ else
+ d_printf(1, dev, "TX: kthread had already exited\n");
+}
diff --git a/drivers/staging/wimax/i2400m/usb.c b/drivers/staging/wimax/i2400m/usb.c
new file mode 100644
index 000000000000..3b84dd7b5567
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/usb.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Wireless WiMAX Connection 2400m
+ * Linux driver model glue for USB device, reset & fw upload
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Yanir Lubetkin <yanirx.lubetkin@intel.com>
+ *
+ * See i2400m-usb.h for a general description of this driver.
+ *
+ * This file implements driver model glue, and hook ups for the
+ * generic driver to implement the bus-specific functions (device
+ * communication setup/tear down, firmware upload and resetting).
+ *
+ * ROADMAP
+ *
+ * i2400mu_probe()
+ * alloc_netdev()...
+ * i2400mu_netdev_setup()
+ * i2400mu_init()
+ * i2400m_netdev_setup()
+ * i2400m_setup()...
+ *
+ * i2400mu_disconnect
+ * i2400m_release()
+ * free_netdev()
+ *
+ * i2400mu_suspend()
+ * i2400m_cmd_enter_powersave()
+ * i2400mu_notification_release()
+ *
+ * i2400mu_resume()
+ * i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_start() Called by i2400m_dev_start() [who is
+ * i2400mu_tx_setup() called by i2400m_setup()]
+ * i2400mu_rx_setup()
+ * i2400mu_notification_setup()
+ *
+ * i2400mu_bus_dev_stop() Called by i2400m_dev_stop() [who is
+ * i2400mu_notification_release() called by i2400m_release()]
+ * i2400mu_rx_release()
+ * i2400mu_tx_release()
+ *
+ * i2400mu_bus_reset() Called by i2400m_reset
+ * __i2400mu_reset()
+ * __i2400mu_send_barker()
+ * usb_reset_device()
+ */
+#include "i2400m-usb.h"
+#include "linux-wimax-i2400m.h"
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+
+#define D_SUBMODULE usb
+#include "usb-debug-levels.h"
+
+static char i2400mu_debug_params[128];
+module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params),
+ 0644);
+MODULE_PARM_DESC(debug,
+ "String of space-separated NAME:VALUE pairs, where NAMEs "
+ "are the different debug submodules and VALUE are the "
+ "initial debug value to set.");
+
+/* Our firmware file name */
+static const char *i2400mu_bus_fw_names_5x50[] = {
+#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
+ I2400MU_FW_FILE_NAME_v1_5,
+#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
+ I2400MU_FW_FILE_NAME_v1_4,
+ NULL,
+};
+
+
+static const char *i2400mu_bus_fw_names_6050[] = {
+#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf"
+ I6050U_FW_FILE_NAME_v1_5,
+ NULL,
+};
+
+
+static
+int i2400mu_bus_dev_start(struct i2400m *i2400m)
+{
+ int result;
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ result = i2400mu_tx_setup(i2400mu);
+ if (result < 0)
+ goto error_usb_tx_setup;
+ result = i2400mu_rx_setup(i2400mu);
+ if (result < 0)
+ goto error_usb_rx_setup;
+ result = i2400mu_notification_setup(i2400mu);
+ if (result < 0)
+ goto error_notif_setup;
+ d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
+ return result;
+
+error_notif_setup:
+ i2400mu_rx_release(i2400mu);
+error_usb_rx_setup:
+ i2400mu_tx_release(i2400mu);
+error_usb_tx_setup:
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+ return result;
+}
+
+
+static
+void i2400mu_bus_dev_stop(struct i2400m *i2400m)
+{
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = &i2400mu->usb_iface->dev;
+
+ d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
+ i2400mu_notification_release(i2400mu);
+ i2400mu_rx_release(i2400mu);
+ i2400mu_tx_release(i2400mu);
+ d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
+}
+
+
+/*
+ * Sends a barker buffer to the device
+ *
+ * This helper will allocate a kmalloced buffer and use it to transmit
+ * (then free it). Reason for this is that other arches cannot use
+ * stack/vmalloc/text areas for DMA transfers.
+ *
+ * Error recovery here is simpler: anything is considered a hard error
+ * and will move the reset code to use a last-resort bus-based reset.
+ */
+static
+int __i2400mu_send_barker(struct i2400mu *i2400mu,
+ const __le32 *barker,
+ size_t barker_size,
+ unsigned endpoint)
+{
+ struct usb_endpoint_descriptor *epd = NULL;
+ int pipe, actual_len, ret;
+ struct device *dev = &i2400mu->usb_iface->dev;
+ void *buffer;
+ int do_autopm = 1;
+
+ ret = usb_autopm_get_interface(i2400mu->usb_iface);
+ if (ret < 0) {
+ dev_err(dev, "RESET: can't get autopm: %d\n", ret);
+ do_autopm = 0;
+ }
+ ret = -ENOMEM;
+ buffer = kmalloc(barker_size, GFP_KERNEL);
+ if (buffer == NULL)
+ goto error_kzalloc;
+ epd = usb_get_epd(i2400mu->usb_iface, endpoint);
+ pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
+ memcpy(buffer, barker, barker_size);
+retry:
+ ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
+ &actual_len, 200);
+ switch (ret) {
+ case 0:
+ if (actual_len != barker_size) { /* Too short? drop it */
+ dev_err(dev, "E: %s: short write (%d B vs %zu "
+ "expected)\n",
+ __func__, actual_len, barker_size);
+ ret = -EIO;
+ }
+ break;
+ case -EPIPE:
+ /*
+ * Stall -- maybe the device is choking with our
+ * requests. Clear it and give it some time. If they
+ * happen to often, it might be another symptom, so we
+ * reset.
+ *
+ * No error handling for usb_clear_halt(0; if it
+ * works, the retry works; if it fails, this switch
+ * does the error handling for us.
+ */
+ if (edc_inc(&i2400mu->urb_edc,
+ 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "E: %s: too many stalls in "
+ "URB; resetting device\n", __func__);
+ usb_queue_reset_device(i2400mu->usb_iface);
+ /* fallthrough */
+ } else {
+ usb_clear_halt(i2400mu->usb_dev, pipe);
+ msleep(10); /* give the device some time */
+ goto retry;
+ }
+ fallthrough;
+ case -EINVAL: /* while removing driver */
+ case -ENODEV: /* dev disconnect ... */
+ case -ENOENT: /* just ignore it */
+ case -ESHUTDOWN: /* and exit */
+ case -ECONNRESET:
+ ret = -ESHUTDOWN;
+ break;
+ default: /* Some error? */
+ if (edc_inc(&i2400mu->urb_edc,
+ EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "E: %s: maximum errors in URB "
+ "exceeded; resetting device\n",
+ __func__);
+ usb_queue_reset_device(i2400mu->usb_iface);
+ } else {
+ dev_warn(dev, "W: %s: cannot send URB: %d\n",
+ __func__, ret);
+ goto retry;
+ }
+ }
+ kfree(buffer);
+error_kzalloc:
+ if (do_autopm)
+ usb_autopm_put_interface(i2400mu->usb_iface);
+ return ret;
+}
+
+
+/*
+ * Reset a device at different levels (warm, cold or bus)
+ *
+ * @i2400m: device descriptor
+ * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
+ *
+ * Warm and cold resets get a USB reset if they fail.
+ *
+ * Warm reset:
+ *
+ * The device will be fully reset internally, but won't be
+ * disconnected from the USB bus (so no reenumeration will
+ * happen). Firmware upload will be necessary.
+ *
+ * The device will send a reboot barker in the notification endpoint
+ * that will trigger the driver to reinitialize the state
+ * automatically from notif.c:i2400m_notification_grok() into
+ * i2400m_dev_bootstrap_delayed().
+ *
+ * Cold and bus (USB) reset:
+ *
+ * The device will be fully reset internally, disconnected from the
+ * USB bus an a reenumeration will happen. Firmware upload will be
+ * necessary. Thus, we don't do any locking or struct
+ * reinitialization, as we are going to be fully disconnected and
+ * reenumerated.
+ *
+ * Note we need to return -ENODEV if a warm reset was requested and we
+ * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
+ * and wimax_dev->op_reset.
+ *
+ * WARNING: no driver state saved/fixed
+ */
+static
+int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
+{
+ int result;
+ struct i2400mu *i2400mu =
+ container_of(i2400m, struct i2400mu, i2400m);
+ struct device *dev = i2400m_dev(i2400m);
+ static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
+ cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ cpu_to_le32(I2400M_WARM_RESET_BARKER),
+ };
+ static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
+ cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ cpu_to_le32(I2400M_COLD_RESET_BARKER),
+ };
+
+ d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
+ if (rt == I2400M_RT_WARM)
+ result = __i2400mu_send_barker(
+ i2400mu, i2400m_WARM_BOOT_BARKER,
+ sizeof(i2400m_WARM_BOOT_BARKER),
+ i2400mu->endpoint_cfg.bulk_out);
+ else if (rt == I2400M_RT_COLD)
+ result = __i2400mu_send_barker(
+ i2400mu, i2400m_COLD_BOOT_BARKER,
+ sizeof(i2400m_COLD_BOOT_BARKER),
+ i2400mu->endpoint_cfg.reset_cold);
+ else if (rt == I2400M_RT_BUS) {
+ result = usb_reset_device(i2400mu->usb_dev);
+ switch (result) {
+ case 0:
+ case -EINVAL: /* device is gone */
+ case -ENODEV:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ result = 0;
+ break; /* We assume the device is disconnected */
+ default:
+ dev_err(dev, "USB reset failed (%d), giving up!\n",
+ result);
+ }
+ } else {
+ result = -EINVAL; /* shut gcc up in certain arches */
+ BUG();
+ }
+ if (result < 0
+ && result != -EINVAL /* device is gone */
+ && rt != I2400M_RT_BUS) {
+ /*
+ * Things failed -- resort to lower level reset, that
+ * we queue in another context; the reason for this is
+ * that the pre and post reset functionality requires
+ * the i2400m->init_mutex; RT_WARM and RT_COLD can
+ * come from areas where i2400m->init_mutex is taken.
+ */
+ dev_err(dev, "%s reset failed (%d); trying USB reset\n",
+ rt == I2400M_RT_WARM ? "warm" : "cold", result);
+ usb_queue_reset_device(i2400mu->usb_iface);
+ result = -ENODEV;
+ }
+ d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
+ return result;
+}
+
+static void i2400mu_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ struct usb_device *udev = i2400mu->usb_dev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->fw_version, i2400m->fw_name ? : "",
+ sizeof(info->fw_version));
+ usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops i2400mu_ethtool_ops = {
+ .get_drvinfo = i2400mu_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static
+void i2400mu_netdev_setup(struct net_device *net_dev)
+{
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+ struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ i2400mu_init(i2400mu);
+ i2400m_netdev_setup(net_dev);
+ net_dev->ethtool_ops = &i2400mu_ethtool_ops;
+}
+
+
+/*
+ * Debug levels control; see debug.h
+ */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(usb),
+ D_SUBMODULE_DEFINE(fw),
+ D_SUBMODULE_DEFINE(notif),
+ D_SUBMODULE_DEFINE(rx),
+ D_SUBMODULE_DEFINE(tx),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+static
+void i2400mu_debugfs_add(struct i2400mu *i2400mu)
+{
+ struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
+
+ dentry = debugfs_create_dir("i2400m-usb", dentry);
+ i2400mu->debugfs_dentry = dentry;
+
+ d_level_register_debugfs("dl_", usb, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", notif, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
+
+ /* Don't touch these if you don't know what you are doing */
+ debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
+ &i2400mu->rx_size_auto_shrink);
+
+ debugfs_create_size_t("rx_size", 0600, dentry, &i2400mu->rx_size);
+}
+
+
+static struct device_type i2400mu_type = {
+ .name = "wimax",
+};
+
+/*
+ * Probe a i2400m interface and register it
+ *
+ * @iface: USB interface to link to
+ * @id: USB class/subclass/protocol id
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Alloc a net device, initialize the bus-specific details and then
+ * calls the bus-generic initialization routine. That will register
+ * the wimax and netdev devices, upload the firmware [using
+ * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
+ * communication with the device and then will start to talk to it to
+ * finnish setting it up.
+ */
+static
+int i2400mu_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct net_device *net_dev;
+ struct device *dev = &iface->dev;
+ struct i2400m *i2400m;
+ struct i2400mu *i2400mu;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
+ if (usb_dev->speed != USB_SPEED_HIGH)
+ dev_err(dev, "device not connected as high speed\n");
+
+ /* Allocate instance [calls i2400m_netdev_setup() on it]. */
+ result = -ENOMEM;
+ net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d", NET_NAME_UNKNOWN,
+ i2400mu_netdev_setup);
+ if (net_dev == NULL) {
+ dev_err(dev, "no memory for network device instance\n");
+ goto error_alloc_netdev;
+ }
+ SET_NETDEV_DEV(net_dev, dev);
+ SET_NETDEV_DEVTYPE(net_dev, &i2400mu_type);
+ i2400m = net_dev_to_i2400m(net_dev);
+ i2400mu = container_of(i2400m, struct i2400mu, i2400m);
+ i2400m->wimax_dev.net_dev = net_dev;
+ i2400mu->usb_dev = usb_get_dev(usb_dev);
+ i2400mu->usb_iface = iface;
+ usb_set_intfdata(iface, i2400mu);
+
+ i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
+ /*
+ * Room required in the Tx queue for USB message to accommodate
+ * a smallest payload while allocating header space is 16 bytes.
+ * Adding this room for the new tx message increases the
+ * possibilities of including any payload with size <= 16 bytes.
+ */
+ i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
+ i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
+ i2400m->bus_setup = NULL;
+ i2400m->bus_dev_start = i2400mu_bus_dev_start;
+ i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
+ i2400m->bus_release = NULL;
+ i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
+ i2400m->bus_reset = i2400mu_bus_reset;
+ i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES;
+ i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
+ i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
+ i2400m->bus_bm_mac_addr_impaired = 0;
+
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
+ case USB_DEVICE_ID_I6150:
+ case USB_DEVICE_ID_I6150_2:
+ case USB_DEVICE_ID_I6150_3:
+ case USB_DEVICE_ID_I6250:
+ i2400mu->i6050 = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (i2400mu->i6050) {
+ i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
+ i2400mu->endpoint_cfg.bulk_out = 0;
+ i2400mu->endpoint_cfg.notification = 3;
+ i2400mu->endpoint_cfg.reset_cold = 2;
+ i2400mu->endpoint_cfg.bulk_in = 1;
+ } else {
+ i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50;
+ i2400mu->endpoint_cfg.bulk_out = 0;
+ i2400mu->endpoint_cfg.notification = 1;
+ i2400mu->endpoint_cfg.reset_cold = 2;
+ i2400mu->endpoint_cfg.bulk_in = 3;
+ }
+#ifdef CONFIG_PM
+ iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
+ device_init_wakeup(dev, 1);
+ pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
+ usb_enable_autosuspend(usb_dev);
+#endif
+
+ result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
+ if (result < 0) {
+ dev_err(dev, "cannot setup device: %d\n", result);
+ goto error_setup;
+ }
+ i2400mu_debugfs_add(i2400mu);
+ return 0;
+
+error_setup:
+ usb_set_intfdata(iface, NULL);
+ usb_put_dev(i2400mu->usb_dev);
+ free_netdev(net_dev);
+error_alloc_netdev:
+ return result;
+}
+
+
+/*
+ * Disconnect a i2400m from the system.
+ *
+ * i2400m_stop() has been called before, so al the rx and tx contexts
+ * have been taken down already. Make sure the queue is stopped,
+ * unregister netdev and i2400m, free and kill.
+ */
+static
+void i2400mu_disconnect(struct usb_interface *iface)
+{
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct i2400m *i2400m = &i2400mu->i2400m;
+ struct net_device *net_dev = i2400m->wimax_dev.net_dev;
+ struct device *dev = &iface->dev;
+
+ d_fnstart(3, dev, "(iface %p i2400m %p)\n", iface, i2400m);
+
+ debugfs_remove_recursive(i2400mu->debugfs_dentry);
+ i2400m_release(i2400m);
+ usb_set_intfdata(iface, NULL);
+ usb_put_dev(i2400mu->usb_dev);
+ free_netdev(net_dev);
+ d_fnend(3, dev, "(iface %p i2400m %p) = void\n", iface, i2400m);
+}
+
+
+/*
+ * Get the device ready for USB port or system standby and hibernation
+ *
+ * USB port and system standby are handled the same.
+ *
+ * When the system hibernates, the USB device is powered down and then
+ * up, so we don't really have to do much here, as it will be seen as
+ * a reconnect. Still for simplicity we consider this case the same as
+ * suspend, so that the device has a chance to do notify the base
+ * station (if connected).
+ *
+ * So at the end, the three cases require common handling.
+ *
+ * If at the time of this call the device's firmware is not loaded,
+ * nothing has to be done. Note we can be "loose" about not reading
+ * i2400m->updown under i2400m->init_mutex. If it happens to change
+ * inmediately, other parts of the call flow will fail and effectively
+ * catch it.
+ *
+ * If the firmware is loaded, we need to:
+ *
+ * - tell the device to go into host interface power save mode, wait
+ * for it to ack
+ *
+ * This is quite more interesting than it is; we need to execute a
+ * command, but this time, we don't want the code in usb-{tx,rx}.c
+ * to call the usb_autopm_get/put_interface() barriers as it'd
+ * deadlock, so we need to decrement i2400mu->do_autopm, that acts
+ * as a poor man's semaphore. Ugly, but it works.
+ *
+ * As well, the device might refuse going to sleep for whichever
+ * reason. In this case we just fail. For system suspend/hibernate,
+ * we *can't* fail. We check PMSG_IS_AUTO to see if the
+ * suspend call comes from the USB stack or from the system and act
+ * in consequence.
+ *
+ * - stop the notification endpoint polling
+ */
+static
+int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
+{
+ int result = 0;
+ struct device *dev = &iface->dev;
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ unsigned is_autosuspend = 0;
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+#ifdef CONFIG_PM
+ if (PMSG_IS_AUTO(pm_msg))
+ is_autosuspend = 1;
+#endif
+
+ d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
+ rmb(); /* see i2400m->updown's documentation */
+ if (i2400m->updown == 0)
+ goto no_firmware;
+ if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
+ /* ugh -- the device is connected and this suspend
+ * request is an autosuspend one (not a system standby
+ * / hibernate).
+ *
+ * The only way the device can go to standby is if the
+ * link with the base station is in IDLE mode; that
+ * were the case, we'd be in status
+ * I2400M_SS_CONNECTED_IDLE. But we are not.
+ *
+ * If we *tell* him to go power save now, it'll reset
+ * as a precautionary measure, so if this is an
+ * autosuspend thing, say no and it'll come back
+ * later, when the link is IDLE
+ */
+ result = -EBADF;
+ d_printf(1, dev, "fw up, link up, not-idle, autosuspend: "
+ "not entering powersave\n");
+ goto error_not_now;
+ }
+ d_printf(1, dev, "fw up: entering powersave\n");
+ atomic_dec(&i2400mu->do_autopm);
+ result = i2400m_cmd_enter_powersave(i2400m);
+ atomic_inc(&i2400mu->do_autopm);
+ if (result < 0 && !is_autosuspend) {
+ /* System suspend, can't fail */
+ dev_err(dev, "failed to suspend, will reset on resume\n");
+ result = 0;
+ }
+ if (result < 0)
+ goto error_enter_powersave;
+ i2400mu_notification_release(i2400mu);
+ d_printf(1, dev, "powersave requested\n");
+error_enter_powersave:
+error_not_now:
+no_firmware:
+ d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n",
+ iface, pm_msg.event, result);
+ return result;
+}
+
+
+static
+int i2400mu_resume(struct usb_interface *iface)
+{
+ int ret = 0;
+ struct device *dev = &iface->dev;
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(3, dev, "(iface %p)\n", iface);
+ rmb(); /* see i2400m->updown's documentation */
+ if (i2400m->updown == 0) {
+ d_printf(1, dev, "fw was down, no resume needed\n");
+ goto out;
+ }
+ d_printf(1, dev, "fw was up, resuming\n");
+ i2400mu_notification_setup(i2400mu);
+ /* USB has flow control, so we don't need to give it time to
+ * come back; otherwise, we'd use something like a get-state
+ * command... */
+out:
+ d_fnend(3, dev, "(iface %p) = %d\n", iface, ret);
+ return ret;
+}
+
+
+static
+int i2400mu_reset_resume(struct usb_interface *iface)
+{
+ int result;
+ struct device *dev = &iface->dev;
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ struct i2400m *i2400m = &i2400mu->i2400m;
+
+ d_fnstart(3, dev, "(iface %p)\n", iface);
+ result = i2400m_dev_reset_handle(i2400m, "device reset on resume");
+ d_fnend(3, dev, "(iface %p) = %d\n", iface, result);
+ return result < 0 ? result : 0;
+}
+
+
+/*
+ * Another driver or user space is triggering a reset on the device
+ * which contains the interface passed as an argument. Cease IO and
+ * save any device state you need to restore.
+ *
+ * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
+ * you are in atomic context.
+ */
+static
+int i2400mu_pre_reset(struct usb_interface *iface)
+{
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ return i2400m_pre_reset(&i2400mu->i2400m);
+}
+
+
+/*
+ * The reset has completed. Restore any saved device state and begin
+ * using the device again.
+ *
+ * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
+ * you are in atomic context.
+ */
+static
+int i2400mu_post_reset(struct usb_interface *iface)
+{
+ struct i2400mu *i2400mu = usb_get_intfdata(iface);
+ return i2400m_post_reset(&i2400mu->i2400m);
+}
+
+
+static
+struct usb_device_id i2400mu_id_table[] = {
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
+ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
+ { USB_DEVICE(0x8086, 0x0181) },
+ { USB_DEVICE(0x8086, 0x1403) },
+ { USB_DEVICE(0x8086, 0x1405) },
+ { USB_DEVICE(0x8086, 0x0180) },
+ { USB_DEVICE(0x8086, 0x0182) },
+ { USB_DEVICE(0x8086, 0x1406) },
+ { USB_DEVICE(0x8086, 0x1403) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, i2400mu_id_table);
+
+
+static
+struct usb_driver i2400mu_driver = {
+ .name = KBUILD_MODNAME,
+ .suspend = i2400mu_suspend,
+ .resume = i2400mu_resume,
+ .reset_resume = i2400mu_reset_resume,
+ .probe = i2400mu_probe,
+ .disconnect = i2400mu_disconnect,
+ .pre_reset = i2400mu_pre_reset,
+ .post_reset = i2400mu_post_reset,
+ .id_table = i2400mu_id_table,
+ .supports_autosuspend = 1,
+};
+
+static
+int __init i2400mu_driver_init(void)
+{
+ d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params,
+ "i2400m_usb.debug");
+ return usb_register(&i2400mu_driver);
+}
+module_init(i2400mu_driver_init);
+
+
+static
+void __exit i2400mu_driver_exit(void)
+{
+ usb_deregister(&i2400mu_driver);
+}
+module_exit(i2400mu_driver_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
+ "(5x50 & 6050)");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
+MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);