aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-03-30 00:50:56 +0000
committerMark Brown <broonie@linaro.org>2014-03-30 00:50:56 +0000
commita78389844ea95f18aadda79246587c707586ce89 (patch)
tree37d57897ff10f1a940736fc6f3fa94968edbab22 /drivers/spi
parentMerge remote-tracking branch 'spi/topic/core' into spi-next (diff)
parentspi: Make max_tx and max_rx the same type (diff)
downloadlinux-dev-a78389844ea95f18aadda79246587c707586ce89.tar.xz
linux-dev-a78389844ea95f18aadda79246587c707586ce89.zip
Merge remote-tracking branch 'spi/topic/dma' into spi-next
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/spi-s3c64xx.c207
-rw-r--r--drivers/spi/spi.c180
2 files changed, 279 insertions, 108 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ae907dde1371..25c9bd409a87 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
#else
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config);
}
- desc = dmaengine_prep_slave_single(dma->ch, buf, len,
- dma->direction, DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+#ifndef CONFIG_S3C_DMA
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+#else
prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
+#endif
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
+#ifndef CONFIG_S3C_DMA
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+#else
prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
+#endif
}
}
@@ -555,23 +563,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
-static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
- if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
- /* Deselect the last toggled device */
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
- sdd->tgl_spi = NULL;
- }
-
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
-}
-
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
@@ -593,112 +584,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return RX_FIFO_LVL(status, sdd);
}
-static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer, int dma_mode)
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
+ u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
- if (dma_mode) {
- val = msecs_to_jiffies(ms) + 10;
- val = wait_for_completion_timeout(&sdd->xfer_completion, val);
- } else {
- u32 status;
- val = msecs_to_loops(ms);
- do {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+ }
+
}
- if (dma_mode) {
- u32 status;
-
- /*
- * If the previous xfer was completed within timeout, then
- * proceed further else return -EIO.
- * DmaTx returns after simply writing data in the FIFO,
- * w/o waiting for real transmission on the bus to finish.
- * DmaRx returns only after Dma read data from FIFO which
- * needs bus transmission to finish, so we don't worry if
- * Xfer involved Rx(with or without Tx).
- */
- if (val && !xfer->rx_buf) {
- val = msecs_to_loops(10);
- status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sdd)
- || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
- && --val) {
- cpu_relax();
- status = readl(regs + S3C64XX_SPI_STATUS);
- }
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
- }
+ return 0;
+}
- /* If timed out while checking rx/tx status return error */
- if (!val)
- return -EIO;
- } else {
- int loops;
- u32 cpy_len;
- u8 *buf;
-
- /* If it was only Tx */
- if (!xfer->rx_buf) {
- sdd->state &= ~TXBUSY;
- return 0;
- }
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
- /*
- * If the receive length is bigger than the controller fifo
- * size, calculate the loops and read the fifo as many times.
- * loops = length / max fifo size (calculated by using the
- * fifo mask).
- * For any size less than the fifo size the below code is
- * executed atleast once.
- */
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
- buf = xfer->rx_buf;
- do {
- /* wait for data to be received in the fifo */
- cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
- (loops ? ms : 0));
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
- switch (sdd->cur_bpw) {
- case 32:
- ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 4);
- break;
- case 16:
- ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 2);
- break;
- default:
- ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len);
- break;
- }
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
- buf = buf + cpy_len;
- } while (loops--);
- sdd->state &= ~RXBUSY;
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
}
- return 0;
-}
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
-static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi == spi)
- sdd->tgl_spi = NULL;
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ return 0;
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -929,7 +919,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
spin_unlock_irqrestore(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
@@ -1092,14 +1085,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_put(&sdd->pdev->dev);
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0a8f399c907f..32e4603d5fc8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -578,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable);
}
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_buf = page_address(vm_page) +
+ ((size_t)buf & ~PAGE_MASK);
+ } else {
+ sg_buf = buf;
+ }
+
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+ int ret;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -684,6 +849,10 @@ static void spi_pump_messages(struct kthread_work *work)
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
@@ -750,6 +919,13 @@ static void spi_pump_messages(struct kthread_work *work)
master->cur_msg_prepared = true;
}
+ ret = spi_map_msg(master, master->cur_msg);
+ if (ret) {
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
@@ -837,6 +1013,8 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ spi_unmap_msg(master, mesg);
+
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
@@ -1370,6 +1548,8 @@ int spi_register_master(struct spi_master *master)
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.