aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/spi/spi.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/spi/spi.h')
-rw-r--r--include/linux/spi/spi.h889
1 files changed, 539 insertions, 350 deletions
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 99380c0825db..4789f91dae94 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,30 +6,44 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
-#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
-#include <linux/gpio/consumer.h>
-#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
+
+#include <uapi/linux/spi/spi.h>
+
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 16
struct dma_chan;
-struct property_entry;
+struct software_node;
+struct ptp_system_timestamp;
struct spi_controller;
struct spi_transfer;
struct spi_controller_mem_ops;
+struct spi_controller_mem_caps;
+struct spi_message;
+struct spi_offload;
+struct spi_offload_config;
/*
- * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * INTERFACES between SPI controller-side drivers and SPI target protocol handlers,
* and SPI infrastructure.
*/
-extern struct bus_type spi_bus_type;
+extern const struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @lock: lock protecting this structure
+ * @syncp: seqcount to protect members in this struct for per-cpu update
+ * on 32-bit systems
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled
@@ -47,48 +61,55 @@ extern struct bus_type spi_bus_type;
* @bytes_rx: number of bytes received from device
*
* @transfer_bytes_histo:
- * transfer bytes histogramm
+ * transfer bytes histogram
*
* @transfers_split_maxsize:
* number of transfers that have been split because of
* maxsize limit
*/
struct spi_statistics {
- spinlock_t lock; /* lock for the whole structure */
+ struct u64_stats_sync syncp;
- unsigned long messages;
- unsigned long transfers;
- unsigned long errors;
- unsigned long timedout;
+ u64_stats_t messages;
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t timedout;
- unsigned long spi_sync;
- unsigned long spi_sync_immediate;
- unsigned long spi_async;
+ u64_stats_t spi_sync;
+ u64_stats_t spi_sync_immediate;
+ u64_stats_t spi_async;
- unsigned long long bytes;
- unsigned long long bytes_rx;
- unsigned long long bytes_tx;
+ u64_stats_t bytes;
+ u64_stats_t bytes_rx;
+ u64_stats_t bytes_tx;
#define SPI_STATISTICS_HISTO_SIZE 17
- unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+ u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
- unsigned long transfers_split_maxsize;
+ u64_stats_t transfers_split_maxsize;
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr);
-
-#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
- do { \
- unsigned long flags; \
- spin_lock_irqsave(&(stats)->lock, flags); \
- (stats)->field += count; \
- spin_unlock_irqrestore(&(stats)->lock, flags); \
+#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_add(&__lstats->field, count); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
} while (0)
-#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
- SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_inc(&__lstats->field); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
+ } while (0)
/**
* struct spi_delay - SPI delay information
@@ -105,21 +126,16 @@ struct spi_delay {
extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer);
/**
- * struct spi_device - Controller side proxy for an SPI slave device
+ * struct spi_device - Controller side proxy for an SPI target device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
- * @master: Copy of controller, for backwards compatibility.
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
- * @mode: The spi mode defines how data is clocked out and in.
- * This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden
- * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
- * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -127,6 +143,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* default (0) indicating protocol words are eight bit bytes.
* The spi_transfer.bits_per_word can override this for each transfer.
* @rt: Make the pump thread real time priority.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @irq: Negative, or the number passed to request_irq() to receive
* interrupts from this device.
* @controller_state: Controller's runtime state
@@ -137,17 +158,23 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* for driver coldplugging, and in uevents used for hotplugging
* @driver_override: If the name of a driver is written to this attribute, then
* the device will bind to the named driver and only the named driver.
- * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
- * not using a GPIO line) use cs_gpiod in new drivers by opting in on
- * the spi_master.
- * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * Do not set directly, because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
*
- * @statistics: statistics for the spi_device
- *
- * A @spi_device is used to interchange data between an SPI slave
+ * A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
*
* In @dev, the platform_data is used to hold information about this
@@ -159,46 +186,62 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
struct spi_device {
struct device dev;
struct spi_controller *controller;
- struct spi_controller *master; /* compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
u8 bits_per_word;
bool rt;
+#define SPI_NO_TX BIT(31) /* No transmit wire */
+#define SPI_NO_RX BIT(30) /* No receive wire */
+ /*
+ * TPM specification defines flow control over SPI. Client device
+ * can insert a wait state on MISO when address is transmitted by
+ * controller on MOSI. Detecting the wait state in software is only
+ * possible for full duplex controllers. For controllers that support
+ * only half-duplex, the wait state detection needs to be implemented
+ * in hardware. TPM devices would set this flag when hardware flow
+ * control is expected from SPI controller.
+ */
+#define SPI_TPM_HW_FLOW BIT(29) /* TPM HW flow control */
+ /*
+ * All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
+ * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
+ * which is defined in 'include/uapi/linux/spi/spi.h'.
+ * The bits defined here are from bit 31 downwards, while in
+ * SPI_MODE_USER_MASK are from 0 upwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to decrease the bit index below as well.
+ */
+#define SPI_MODE_KERNEL_MASK (~(BIT(29) - 1))
u32 mode;
-#define SPI_CPHA 0x01 /* clock phase */
-#define SPI_CPOL 0x02 /* clock polarity */
-#define SPI_MODE_0 (0|0) /* (original MicroWire) */
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-#define SPI_CS_HIGH 0x04 /* chipselect active high? */
-#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define SPI_3WIRE 0x10 /* SI/SO signals shared */
-#define SPI_LOOP 0x20 /* loopback mode */
-#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define SPI_READY 0x80 /* slave pulls low to pause */
-#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
-#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
-#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
-#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
-#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
-#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
-#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- int cs_gpio; /* LEGACY: chip select gpio */
- struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- struct spi_delay word_delay; /* inter-word delay */
- /* the statistics */
- struct spi_statistics statistics;
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
+ struct spi_delay word_delay; /* Inter-word delay */
+
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
+
+ u8 chip_select[SPI_CS_CNT_MAX];
/*
- * likely need more hooks for more protocol options affecting how
+ * Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array. When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
+
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
+ /*
+ * Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
@@ -207,12 +250,13 @@ struct spi_device {
*/
};
-static inline struct spi_device *to_spi_device(struct device *dev)
-{
- return dev ? container_of(dev, struct spi_device, dev) : NULL;
-}
+/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */
+static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
+ "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
+
+#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
-/* most drivers won't need to care about device refcounting */
+/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
{
return (spi && get_device(&spi->dev)) ? spi : NULL;
@@ -225,7 +269,7 @@ static inline void spi_dev_put(struct spi_device *spi)
}
/* ctldata is for the bus_controller driver's runtime state */
-static inline void *spi_get_ctldata(struct spi_device *spi)
+static inline void *spi_get_ctldata(const struct spi_device *spi)
{
return spi->controller_state;
}
@@ -235,29 +279,57 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state)
spi->controller_state = state;
}
-/* device driver data */
+/* Device driver data */
static inline void spi_set_drvdata(struct spi_device *spi, void *data)
{
dev_set_drvdata(&spi->dev, data);
}
-static inline void *spi_get_drvdata(struct spi_device *spi)
+static inline void *spi_get_drvdata(const struct spi_device *spi)
{
return dev_get_drvdata(&spi->dev);
}
-struct spi_message;
-struct spi_transfer;
+static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
+{
+ return spi->chip_select[idx];
+}
+
+static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
+{
+ spi->chip_select[idx] = chipselect;
+}
+
+static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
+{
+ return spi->cs_gpiod[idx];
+}
+
+static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
+{
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
+}
/**
* struct spi_driver - Host side "protocol" driver
* @id_table: List of SPI devices supported by this driver
- * @probe: Binds this driver to the spi device. Drivers can verify
+ * @probe: Binds this driver to the SPI device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
* the initial configuration done during system setup.
- * @remove: Unbinds this driver from the spi device
+ * @remove: Unbinds this driver from the SPI device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
* @driver: SPI device drivers should initialize the name and owner
@@ -278,15 +350,13 @@ struct spi_transfer;
struct spi_driver {
const struct spi_device_id *id_table;
int (*probe)(struct spi_device *spi);
- int (*remove)(struct spi_device *spi);
+ void (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
struct device_driver driver;
};
-static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct spi_driver, driver) : NULL;
-}
+#define to_spi_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct spi_driver, driver) : NULL )
extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
@@ -301,7 +371,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
-/* use a define to avoid include chaining to get THIS_MODULE */
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
+/* Use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -318,15 +390,15 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
spi_unregister_driver)
/**
- * struct spi_controller - interface to SPI master or slave controller
+ * struct spi_controller - interface to SPI host or target controller
* @dev: device interface to this driver
* @list: link with the global spi_controller list
* @bus_num: board-specific (and often SOC-specific) identifier for a
* given SPI controller.
* @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
+ * SPI targets, and are numbered from zero to num_chipselects.
+ * each target has a chipselect signal, but it's common that not
+ * every chipselect is connected to a target.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
* @buswidth_override_bits: flags to override for this controller driver
@@ -339,11 +411,14 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @slave: indicates that this is an SPI slave controller
+ * @target: indicates that this is an SPI target controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @max_message_size: function that returns the max message size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
+ * @add_lock: mutex to avoid adding devices to the same chipselect
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -352,24 +427,34 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
- * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * @set_cs_timing: optional hook for SPI devices to request SPI
* controller for configuring specific CS setup time, hold time and inactive
- * delay interms of clock counts
+ * delay in terms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
+ * @dma_map_dev: device which can be used for DMA mapping
+ * @cur_rx_dma_dev: device which is currently used for RX DMA mapping
+ * @cur_tx_dma_dev: device which is currently used for TX DMA mapping
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
+ * @queue_lock: spinlock to synchronise access to message queue
* @queue: message queue
- * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
- * @cur_msg_prepared: spi_prepare_message was called for the currently
- * in-flight message
- * @cur_msg_mapped: message has been mapped for DMA
- * @last_cs_enable: was enable true on the last call to set_cs.
+ * @cur_msg_completion: a completion for the current in-flight message
+ * @cur_msg_incomplete: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to check if the driver has
+ * already called spi_finalize_current_message().
+ * @cur_msg_need_completion: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to signal the context that
+ * is running spi_finalize_current_message() that it needs to complete()
+ * @fallback: fallback to PIO if DMA transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
+ * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
+ * selected
+ * @last_cs_index_mask: bit mask the last chip selects that were used
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -392,6 +477,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
*
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
+ * @optimize_message: optimize the message for reuse
+ * @unoptimize_message: release resources allocated by optimize_message
* @prepare_message: set up the controller to transfer a single message,
* for example doing DMA mapping. Called from threaded
* context.
@@ -401,40 +488,40 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
+ * @get_offload: callback for controllers with offload support to get matching
+ * offload instance. Implementations should return -ENODEV if no match is
+ * found.
+ * @put_offload: release the offload instance acquired by @get_offload.
+ * @mem_caps: controller capabilities for the handling of memory operations.
+ * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability.
+ * QSPI based controller should fill this based on controller's capability.
* @unprepare_message: undo any work done by prepare_message().
- * @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- * deasserted. If @cs_change_delay is used from @spi_transfer, then the
- * two delays will be added up.
- * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
- * CS number. Any individual value may be -ENOENT for CS lines that
- * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
- * in new drivers.
- * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * @target_abort: abort the ongoing transfer request on an SPI target controller
+ * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
- * GPIO descriptors rather than using global GPIO numbers grabbed by the
- * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
- * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have
+ * the cs_gpiod assigned if a GPIO line is found for the chipselect.
* @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
* fill in this field with the first unused native CS, to be used by SPI
* controller drivers that need to drive a native CS when using GPIO CS.
* @max_native_cs: When cs_gpiods is used, and this field is filled in,
* spi_register_controller() will validate all native CS (including the
* unused native CS) against this value.
- * @statistics: statistics for the spi_controller
+ * @pcpu_statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -449,8 +536,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
- * @fallback: fallback to pio if dma transfer return failure with
- * SPI_TRANS_FAIL_NO_START.
+ * @queue_empty: signal green light for opportunistically skipping the queue
+ * for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
+ * @defer_optimize_message: set to true if controller cannot pre-optimize messages
+ * and needs to defer the optimization step until the message is actually
+ * being transferred
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -460,7 +551,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
*
* The driver for an SPI controller manages access to those devices through
* a queue of spi_message transactions, copying data between CPU memory and
- * an SPI slave device. For each such message it queues, it calls the
+ * an SPI target device. For each such message it queues, it calls the
* message's completion function when the transaction completes.
*/
struct spi_controller {
@@ -468,20 +559,22 @@ struct spi_controller {
struct list_head list;
- /* other than negative (== assign one dynamically), bus_num is fully
- * board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 0..2,
- * and one board's schematics might show it using SPI-2. software
+ /*
+ * Other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. Usually that simplifies to being SoC-specific.
+ * example: one SoC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. Software
* would normally use bus_num=2 for that controller.
*/
s16 bus_num;
- /* chipselects will be integral to many controllers; some others
+ /*
+ * Chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
*/
u16 num_chipselect;
- /* some SPI controllers pose alignment requirements on DMAable
+ /* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
u16 dma_alignment;
@@ -492,31 +585,43 @@ struct spi_controller {
/* spi_device.mode flags override flags for this controller */
u32 buswidth_override_bits;
- /* bitmask of supported bits_per_word for transfers */
+ /* Bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
- /* limits on transfer speed */
+ /* Limits on transfer speed */
u32 min_speed_hz;
u32 max_speed_hz;
- /* other constraints relevant to this driver */
+ /* Other constraints relevant to this driver */
u16 flags;
-#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
-#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
-#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
-#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
+#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */
+#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */
+#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select target device */
+#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
-#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+ /* Flag indicating if the allocation of this struct is devres-managed */
+ bool devm_allocated;
- /* flag indicating this is an SPI slave controller */
- bool slave;
+ union {
+ /* Flag indicating this is an SPI slave controller */
+ bool slave;
+ /* Flag indicating this is an SPI target controller */
+ bool target;
+ };
/*
- * on some hardware transfer / message size may be constrained
- * the limit may depend on device transfer settings
+ * On some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings.
*/
size_t (*max_transfer_size)(struct spi_device *spi);
size_t (*max_message_size)(struct spi_device *spi);
@@ -524,14 +629,18 @@ struct spi_controller {
/* I/O mutex */
struct mutex io_mutex;
- /* lock and mutex for SPI bus locking */
+ /* Used to avoid adding the same CS twice */
+ struct mutex add_lock;
+
+ /* Lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
- /* flag indicating that the SPI bus is locked for exclusive use */
+ /* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
- /* Setup mode and clock, etc (spi driver may call many times).
+ /*
+ * Setup mode and clock, etc (SPI driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
@@ -547,21 +656,21 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive);
+ int (*set_cs_timing)(struct spi_device *spi);
- /* bidirectional bulk transfers
+ /*
+ * Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
* + For now there's no remove-from-queue operation, or
* any other request management
- * + To a given spi_device, message queueing is pure fifo
+ * + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
- * selecting a chip (for masters), then transferring data
+ * selecting a chip (for controllers), then transferring data
* + If there are multiple spi_device children, the i/o queue
- * arbitration algorithm is unspecified (round robin, fifo,
+ * arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
*
* + Chipselect stays active during the entire message
@@ -572,7 +681,7 @@ struct spi_controller {
int (*transfer)(struct spi_device *spi,
struct spi_message *mesg);
- /* called on release() to free memory provided by spi_controller */
+ /* Called on release() to free memory provided by spi_controller */
void (*cleanup)(struct spi_device *spi);
/*
@@ -585,6 +694,9 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
+ struct device *cur_rx_dma_dev;
+ struct device *cur_tx_dma_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -598,19 +710,22 @@ struct spi_controller {
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
- bool idling;
+ struct completion cur_msg_completion;
+ bool cur_msg_incomplete;
+ bool cur_msg_need_completion;
bool busy;
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_prepared;
- bool cur_msg_mapped;
- bool last_cs_enable;
- bool last_cs_mode_high;
bool fallback;
+ bool last_cs_mode_high;
+ s8 last_cs[SPI_CS_CNT_MAX];
+ u32 last_cs_index_mask : SPI_CS_CNT_MAX;
struct completion xfer_completion;
size_t max_dma_len;
+ int (*optimize_message)(struct spi_message *msg);
+ int (*unoptimize_message)(struct spi_message *msg);
int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
int (*transfer_one_message)(struct spi_controller *ctlr,
struct spi_message *mesg);
@@ -619,11 +734,11 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- int (*slave_abort)(struct spi_controller *ctlr);
+ int (*target_abort)(struct spi_controller *ctlr);
/*
* These hooks are for drivers that use a generic implementation
- * of transfer_one_message() provied by the core.
+ * of transfer_one_message() provided by the core.
*/
void (*set_cs)(struct spi_device *spi, bool enable);
int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
@@ -633,27 +748,29 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
+ const struct spi_controller_mem_caps *mem_caps;
- /* CS delays */
- struct spi_delay cs_setup;
- struct spi_delay cs_hold;
- struct spi_delay cs_inactive;
+ /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */
+ bool dtr_caps;
- /* gpio chip select */
- int *cs_gpios;
+ struct spi_offload *(*get_offload)(struct spi_device *spi,
+ const struct spi_offload_config *config);
+ void (*put_offload)(struct spi_offload *offload);
+
+ /* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
- /* statistics */
- struct spi_statistics statistics;
+ /* Statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
- /* dummy data for full duplex devices */
+ /* Dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
@@ -667,6 +784,11 @@ struct spi_controller {
/* Interrupt enable state during PTP system timestamping */
unsigned long irq_flags;
+
+ /* Flag for enabling opportunistic skipping of the queue in spi_sync */
+ bool queue_empty;
+ bool must_async;
+ bool defer_optimize_message;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -693,9 +815,9 @@ static inline void spi_controller_put(struct spi_controller *ctlr)
put_device(&ctlr->dev);
}
-static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
+static inline bool spi_controller_is_target(struct spi_controller *ctlr)
{
- return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+ return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
}
/* PM calls that need to be issued by the driver */
@@ -715,23 +837,42 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* the spi driver core manages memory for the spi_controller classdev */
+/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
- unsigned int size, bool slave);
+ unsigned int size, bool target);
+
+static inline struct spi_controller *spi_alloc_host(struct device *dev,
+ unsigned int size)
+{
+ return __spi_alloc_controller(dev, size, false);
+}
-static inline struct spi_controller *spi_alloc_master(struct device *host,
+static inline struct spi_controller *spi_alloc_target(struct device *dev,
unsigned int size)
{
- return __spi_alloc_controller(host, size, false);
+ if (!IS_ENABLED(CONFIG_SPI_SLAVE))
+ return NULL;
+
+ return __spi_alloc_controller(dev, size, true);
+}
+
+struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
+ unsigned int size,
+ bool target);
+
+static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
+ unsigned int size)
+{
+ return __devm_spi_alloc_controller(dev, size, false);
}
-static inline struct spi_controller *spi_alloc_slave(struct device *host,
- unsigned int size)
+static inline struct spi_controller *devm_spi_alloc_target(struct device *dev,
+ unsigned int size)
{
if (!IS_ENABLED(CONFIG_SPI_SLAVE))
return NULL;
- return __spi_alloc_controller(host, size, true);
+ return __devm_spi_alloc_controller(dev, size, true);
}
extern int spi_register_controller(struct spi_controller *ctlr);
@@ -739,7 +880,30 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-extern struct spi_controller *spi_busnum_to_master(u16 busnum);
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
+extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index);
+int acpi_spi_count_resources(struct acpi_device *adev);
+#else
+static inline struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
+{
+ return NULL;
+}
+
+static inline struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ return 0;
+}
+#endif
/*
* SPI resource management while processing a SPI message
@@ -750,29 +914,20 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
void *res);
/**
- * struct spi_res - spi resource management structure
+ * struct spi_res - SPI resource management structure
* @entry: list entry
* @release: release code called prior to freeing this resource
* @data: extra data allocated for the specific use-case
*
- * this is based on ideas from devres, but focused on life-cycle
- * management during spi_message processing
+ * This is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing.
*/
struct spi_res {
struct list_head entry;
spi_res_release_t release;
- unsigned long long data[]; /* guarantee ull alignment */
+ unsigned long long data[]; /* Guarantee ull alignment */
};
-extern void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp);
-extern void spi_res_add(struct spi_message *message, void *res);
-extern void spi_res_free(void *res);
-
-extern void spi_res_release(struct spi_controller *ctlr,
- struct spi_message *message);
-
/*---------------------------------------------------------------------------*/
/*
@@ -783,7 +938,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
*
* The spi_messages themselves consist of a series of read+write transfer
* segments. Those segments always read the same number of bits as they
- * write; but one or the other is easily ignored by passing a null buffer
+ * write; but one or the other is easily ignored by passing a NULL buffer
* pointer. (This is unlike most types of I/O API, because SPI hardware
* is full duplex.)
*
@@ -794,10 +949,10 @@ extern void spi_res_release(struct spi_controller *ctlr,
/**
* struct spi_transfer - a read/write buffer pair
- * @tx_buf: data to be written (dma-safe memory), or NULL
- * @rx_buf: data to be read (dma-safe memory), or NULL
- * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
- * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_buf: data to be written (DMA-safe memory), or NULL
+ * @rx_buf: data to be read (DMA-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, currently not for client use
+ * @rx_dma: DMA address of rx_buf, currently not for client use
* @tx_nbits: number of bits used for writing. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
@@ -807,23 +962,26 @@ extern void spi_res_release(struct spi_controller *ctlr,
* transfer. If 0 the default (from @spi_device) is used.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
+ * @dummy_data: indicates transfer is dummy bytes transfer.
+ * @cs_off: performs the transfer with chipselect off.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
* @delay: delay to be introduced after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
- * @delay_usecs: microseconds to delay after this transfer before
- * (optionally) changing the chipselect status, then starting
- * the next transfer or completing this @spi_message.
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
- * transfer this transfer. Set to 0 if the spi bus driver does
+ * transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @offload_flags: Flags that are only applicable to specialized SPI offload
+ * transfers. See %SPI_OFFLOAD_XFER_* in spi-offload.h.
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
* within @tx_buf for which the SPI device is requesting that the time
* snapshot for this transfer begins. Upon completing the SPI transfer,
@@ -838,26 +996,27 @@ extern void spi_res_release(struct spi_controller *ctlr,
* purposefully (instead of setting to spi_transfer->len - 1) to denote
* that a transfer-level snapshot taken from within the driver may still
* be of higher quality.
- * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * @ptp_sts: Pointer to a memory location held by the SPI target device where a
* PTP system timestamp structure may lie. If drivers use PIO or their
* hardware has some sort of assist for retrieving exact transfer timing,
* they can (and should) assert @ptp_sts_supported and populate this
* structure using the ptp_read_system_*ts helper functions.
- * The timestamp must represent the time at which the SPI slave device has
+ * The timestamp must represent the time at which the SPI target device has
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
+ * @dtr_mode: true if supports double transfer rate.
* @timestamped: true if the transfer has been timestamped
- * @error: Error status logged by spi controller driver.
+ * @error: Error status logged by SPI controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
- * underlying driver uses dma.
+ * underlying driver uses DMA.
*
- * If the transmit buffer is null, zeroes will be shifted out
- * while filling @rx_buf. If the receive buffer is null, the data
+ * If the transmit buffer is NULL, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is NULL, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -891,13 +1050,16 @@ extern void spi_res_release(struct spi_controller *ctlr,
* Some devices need protocol transactions to be built from a series of
* spi_message submissions, where the content of one message is determined
* by the results of previous messages and where the whole transaction
- * ends when the chipselect goes intactive.
+ * ends when the chipselect goes inactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this transfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
+ * User may also set dtr_mode to true to use dual transfer mode if desired. if
+ * not, default considered as single transfer mode.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -905,28 +1067,39 @@ extern void spi_res_release(struct spi_controller *ctlr,
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* it's ok if tx_buf == rx_buf (right?)
- * for MicroWire, one buffer must be null
- * buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping
+ /*
+ * It's okay if tx_buf == rx_buf (right?).
+ * For MicroWire, one buffer must be NULL.
+ * Buffers must work with dma_*map_single() calls.
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
- dma_addr_t tx_dma;
- dma_addr_t rx_dma;
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
+ u16 error;
+
+ bool tx_sg_mapped;
+ bool rx_sg_mapped;
+
struct sg_table tx_sg;
struct sg_table rx_sg;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ unsigned dummy_data:1;
+ unsigned cs_off:1;
unsigned cs_change:1;
- unsigned tx_nbits:3;
- unsigned rx_nbits:3;
-#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
-#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
-#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+ unsigned tx_nbits:4;
+ unsigned rx_nbits:4;
+ unsigned timestamped:1;
+ bool dtr_mode;
+#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
+#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
u8 bits_per_word;
- u16 delay_usecs;
struct spi_delay delay;
struct spi_delay cs_change_delay;
struct spi_delay word_delay;
@@ -934,34 +1107,35 @@ struct spi_transfer {
u32 effective_speed_hz;
+ /* Use %SPI_OFFLOAD_XFER_* from spi-offload.h */
+ unsigned int offload_flags;
+
unsigned int ptp_sts_word_pre;
unsigned int ptp_sts_word_post;
struct ptp_system_timestamp *ptp_sts;
- bool timestamped;
-
struct list_head transfer_list;
-
-#define SPI_TRANS_FAIL_NO_START BIT(0)
- u16 error;
};
/**
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
- * addresses for each transfer buffer
+ * @pre_optimized: peripheral driver pre-optimized the message
+ * @optimized: the message is in the optimized state
+ * @prepared: spi_prepare_message was called for the this message
+ * @status: zero for success, else negative errno
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
* @frame_length: the total number of bytes in the message
* @actual_length: the total number of bytes that were transferred in all
* successful segments
- * @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
- * @resources: for resource management when the spi message is processed
+ * @opt_state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the SPI message is processed
+ * @offload: (optional) offload instance used by this message
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -982,9 +1156,16 @@ struct spi_message {
struct spi_device *spi;
- unsigned is_dma_mapped:1;
+ /* spi_optimize_message() was called for this message */
+ bool pre_optimized;
+ /* __spi_optimize_message() was called for this message */
+ bool optimized;
- /* REVISIT: we might want a flag affecting the behavior of the
+ /* spi_prepare_message() was called for this message */
+ bool prepared;
+
+ /*
+ * REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
@@ -995,21 +1176,33 @@ struct spi_message {
* tell them about such special cases.
*/
- /* completion is reported through a callback */
+ /* Completion is reported through a callback */
+ int status;
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
- int status;
- /* for optional use by whatever driver currently owns the
+ /*
+ * For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
+ /*
+ * Optional state for use by controller driver between calls to
+ * __spi_optimize_message() and __spi_unoptimize_message().
+ */
+ void *opt_state;
+
+ /*
+ * Optional offload instance used by this message. This must be set
+ * by the peripheral driver before calling spi_optimize_message().
+ */
+ struct spi_offload *offload;
- /* list of spi_res reources when the spi message is processed */
+ /* List of spi_res resources when the SPI message is processed */
struct list_head resources;
};
@@ -1040,21 +1233,13 @@ spi_transfer_del(struct spi_transfer *t)
static inline int
spi_transfer_delay_exec(struct spi_transfer *t)
{
- struct spi_delay d;
-
- if (t->delay_usecs) {
- d.value = t->delay_usecs;
- d.unit = SPI_DELAY_UNIT_USECS;
- return spi_delay_exec(&d, NULL);
- }
-
return spi_delay_exec(&t->delay, t);
}
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
- * @xfers: An array of spi transfers
+ * @xfers: An array of SPI transfers
* @num_xfers: Number of items in the xfer array
*
* This function initializes the given spi_message and adds each spi_transfer in
@@ -1071,26 +1256,27 @@ struct spi_transfer *xfers, unsigned int num_xfers)
spi_message_add_tail(&xfers[i], m);
}
-/* It's fine to embed message and transaction structures in other data
+/*
+ * It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
-
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
- struct spi_message *m;
-
- m = kzalloc(sizeof(struct spi_message)
- + ntrans * sizeof(struct spi_transfer),
- flags);
- if (m) {
- unsigned i;
- struct spi_transfer *t = (struct spi_transfer *)(m + 1);
-
- spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++, t++)
- spi_message_add_tail(t, m);
- }
- return m;
+ struct spi_message_with_transfers {
+ struct spi_message m;
+ struct spi_transfer t[];
+ } *mwt;
+ unsigned i;
+
+ mwt = kzalloc(struct_size(mwt, t, ntrans), flags);
+ if (!mwt)
+ return NULL;
+
+ spi_message_init_no_memset(&mwt->m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&mwt->t[i], &mwt->m);
+
+ return &mwt->m;
}
static inline void spi_message_free(struct spi_message *m)
@@ -1098,16 +1284,14 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
+extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
+extern void spi_unoptimize_message(struct spi_message *msg);
+extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_async_locked(struct spi_device *spi,
- struct spi_message *message);
-extern int spi_slave_abort(struct spi_device *spi);
+extern int spi_target_abort(struct spi_device *spi);
static inline size_t
spi_max_message_size(struct spi_device *spi)
@@ -1129,7 +1313,7 @@ spi_max_transfer_size(struct spi_device *spi)
if (ctlr->max_transfer_size)
tr_max = ctlr->max_transfer_size(spi);
- /* transfer size limit must not be greater than messsage size limit */
+ /* Transfer size limit must not be greater than message size limit */
return min(tr_max, msg_max);
}
@@ -1145,7 +1329,7 @@ spi_max_transfer_size(struct spi_device *spi)
*/
static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
{
- u32 bpw_mask = spi->master->bits_per_word_mask;
+ u32 bpw_mask = spi->controller->bits_per_word_mask;
if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw)))
return true;
@@ -1153,6 +1337,49 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
return false;
}
+/**
+ * spi_bpw_to_bytes - Covert bits per word to bytes
+ * @bpw: Bits per word
+ *
+ * This function converts the given @bpw to bytes. The result is always
+ * power-of-two, e.g.,
+ *
+ * =============== =================
+ * Input (in bits) Output (in bytes)
+ * =============== =================
+ * 5 1
+ * 9 2
+ * 21 4
+ * 37 8
+ * =============== =================
+ *
+ * It will return 0 for the 0 input.
+ *
+ * Returns:
+ * Bytes for the given @bpw.
+ */
+static inline u32 spi_bpw_to_bytes(u32 bpw)
+{
+ return roundup_pow_of_two(BITS_TO_BYTES(bpw));
+}
+
+/**
+ * spi_controller_xfer_timeout - Compute a suitable timeout value
+ * @ctlr: SPI device
+ * @xfer: Transfer descriptor
+ *
+ * Compute a relevant timeout value for the given transfer. We derive the time
+ * that it would take on a single data line and take twice this amount of time
+ * with a minimum of 500ms to avoid false positives on loaded systems.
+ *
+ * Returns: Transfer timeout value in milliseconds.
+ */
+static inline unsigned int spi_controller_xfer_timeout(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ return max(xfer->len * 8 * 2 / (xfer->speed_hz / 1000), 500U);
+}
+
/*---------------------------------------------------------------------------*/
/* SPI transfer replacement methods which make use of spi_res */
@@ -1166,7 +1393,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* replacements that have occurred
* so that they can get reverted
* @release: some extra release code to get executed prior to
- * relasing this structure
+ * releasing this structure
* @extradata: pointer to some extra data if requested or NULL
* @replaced_transfers: transfers that have been replaced and which need
* to get restored
@@ -1176,9 +1403,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* @inserted_transfers: array of spi_transfers of array-size @inserted,
* that have been replacing replaced_transfers
*
- * note: that @extradata will point to @inserted_transfers[@inserted]
+ * Note: that @extradata will point to @inserted_transfers[@inserted]
* if some extra allocation is requested, so alignment will be the same
- * as for spi_transfers
+ * as for spi_transfers.
*/
struct spi_replaced_transfers {
spi_replaced_release_t release;
@@ -1189,27 +1416,21 @@ struct spi_replaced_transfers {
struct spi_transfer inserted_transfers[];
};
-extern struct spi_replaced_transfers *spi_replace_transfers(
- struct spi_message *msg,
- struct spi_transfer *xfer_first,
- size_t remove,
- size_t insert,
- spi_replaced_release_t release,
- size_t extradatasize,
- gfp_t gfp);
-
/*---------------------------------------------------------------------------*/
/* SPI transfer transformation methods */
extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp);
+ size_t maxsize);
+extern int spi_split_transfers_maxwords(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxwords);
/*---------------------------------------------------------------------------*/
-/* All these synchronous SPI transfer routines are utilities layered
+/*
+ * All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
*/
@@ -1289,7 +1510,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
return spi_sync_transfer(spi, &t, 1);
}
-/* this copies txbuf and rxbuf data; for small transfers only! */
+/* This copies txbuf and rxbuf data; for small transfers only! */
extern int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
void *rxbuf, unsigned n_rx);
@@ -1312,7 +1533,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 1);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1337,7 +1558,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 2);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1352,7 +1573,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
*
* Callable only from contexts that can sleep.
*
- * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * Return: the (unsigned) sixteen bit number returned by the device in CPU
* endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
@@ -1380,7 +1601,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* As a rule, SPI devices can't be probed. Instead, board init code
* provides a table listing the devices which are present, with enough
* information to bind and set up the device's driver. There's basic
- * support for nonstatic configurations too; enough to handle adding
+ * support for non-static configurations too; enough to handle adding
* parport adapters, or microcontrollers acting as USB-to-SPI bridges.
*/
@@ -1389,7 +1610,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* @modalias: Initializes spi_device.modalias; identifies the driver.
* @platform_data: Initializes spi_device.platform_data; the particular
* data stored there is driver-specific.
- * @properties: Additional device properties for the device.
+ * @swnode: Software node for the device.
* @controller_data: Initializes spi_device.controller_data; some
* controllers need hints about hardware setup, e.g. for DMA.
* @irq: Initializes spi_device.irq; depends on how the board is wired.
@@ -1417,39 +1638,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* the device name and module name are coupled, like platform_bus;
+ /*
+ * The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * device properties are copied and attached to spi_device,
- * irq is copied too
+ * IRQ is copied too.
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
- const struct property_entry *properties;
+ const struct software_node *swnode;
void *controller_data;
int irq;
- /* slower signaling on noisy or low voltage boards */
+ /* Slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
- /* bus_num is board specific and matches the bus_num of some
+ /*
+ * bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
- * chip_select reflects how this chip is wired to that master;
+ * chip_select reflects how this chip is wired to that controller;
* it's less than num_chipselect.
*/
u16 bus_num;
u16 chip_select;
- /* mode becomes spi_device.mode, and is essential for chips
+ /*
+ * mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u32 mode;
- /* ... may need additional spi_device chip config data here.
+ /*
+ * ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
* - quirks like clock rate mattering when not selected
@@ -1460,19 +1684,20 @@ struct spi_board_info {
extern int
spi_register_board_info(struct spi_board_info const *info, unsigned n);
#else
-/* board init code may ignore whether SPI is configured or not */
+/* Board init code may ignore whether SPI is configured or not */
static inline int
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-/* If you're hotplugging an adapter with devices (parport, usb, etc)
+/*
+ * If you're hotplugging an adapter with devices (parport, USB, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
*
* You can also use spi_alloc_device() and spi_add_device() to use a two
- * stage registration sequence for each spi_device. This gives the caller
+ * stage registration sequence for each spi_device. This gives the caller
* some more control over the spi_device structure before it is registered,
* but requires that caller to initialize fields that would otherwise
* be defined using the board info.
@@ -1491,49 +1716,13 @@ extern void spi_unregister_device(struct spi_device *spi);
extern const struct spi_device_id *
spi_get_device_id(const struct spi_device *sdev);
+extern const void *
+spi_get_device_match_data(const struct spi_device *sdev);
+
static inline bool
spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
-/* OF support code */
-#if IS_ENABLED(CONFIG_OF)
-
-/* must call put_device() when done with returned spi_device device */
-extern struct spi_device *
-of_find_spi_device_by_node(struct device_node *node);
-
-#else
-
-static inline struct spi_device *
-of_find_spi_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_OF) */
-
-/* Compatibility layer */
-#define spi_master spi_controller
-
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
-#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
-#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
-#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
-
-#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
-#define spi_master_set_devdata(_ctlr, _data) \
- spi_controller_set_devdata(_ctlr, _data)
-#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
-#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
-#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
-#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
-
-#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
-#define devm_spi_register_master(_dev, _ctlr) \
- devm_spi_register_controller(_dev, _ctlr)
-#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
-
#endif /* __LINUX_SPI_H */