aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/serial/samsung.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/samsung.c')
-rw-r--r--drivers/tty/serial/samsung.c680
1 files changed, 662 insertions, 18 deletions
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 107e80722575..af821a908720 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -28,6 +28,9 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -78,6 +81,10 @@ static void dbg(const char *fmt, ...)
#define S3C24XX_SERIAL_MAJOR 204
#define S3C24XX_SERIAL_MINOR 64
+#define S3C24XX_TX_PIO 1
+#define S3C24XX_TX_DMA 2
+#define S3C24XX_RX_PIO 1
+#define S3C24XX_RX_DMA 2
/* macros to change one thing to another */
#define tx_enabled(port) ((port)->unused[0])
@@ -154,39 +161,272 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
static void s3c24xx_serial_stop_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_tx_state state;
+ int count;
- if (tx_enabled(port)) {
- if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
- else
- disable_irq_nosync(ourport->tx_irq);
- tx_enabled(port) = 0;
- if (port->flags & UPF_CONS_FLOW)
- s3c24xx_serial_rx_enable(port);
+ if (!tx_enabled(port))
+ return;
+
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->tx_irq);
+
+ if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) {
+ dmaengine_pause(dma->tx_chan);
+ dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
+ dmaengine_terminate_all(dma->tx_chan);
+ dma_sync_single_for_cpu(ourport->port.dev,
+ dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);
+ async_tx_ack(dma->tx_desc);
+ count = dma->tx_bytes_requested - state.residue;
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
}
+
+ tx_enabled(port) = 0;
+ ourport->tx_in_progress = 0;
+
+ if (port->flags & UPF_CONS_FLOW)
+ s3c24xx_serial_rx_enable(port);
+
+ ourport->tx_mode = 0;
}
-static void s3c24xx_serial_start_tx(struct uart_port *port)
+static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport);
+
+static void s3c24xx_serial_tx_dma_complete(void *args)
+{
+ struct s3c24xx_uart_port *ourport = args;
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct dma_tx_state state;
+ unsigned long flags;
+ int count;
+
+
+ dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
+ count = dma->tx_bytes_requested - state.residue;
+ async_tx_ack(dma->tx_desc);
+
+ dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
+ dma->tx_size, DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
+ ourport->tx_in_progress = 0;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ s3c24xx_serial_start_next_tx(ourport);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ u32 ucon;
+
+ /* Mask Tx interrupt */
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __set_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ disable_irq_nosync(ourport->tx_irq);
+
+ /* Enable tx dma mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK);
+ ucon |= (dma_get_cache_alignment() >= 16) ?
+ S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1;
+ ucon |= S3C64XX_UCON_TXMODE_DMA;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->tx_mode = S3C24XX_TX_DMA;
+}
+
+static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ u32 ucon, ufcon;
+
+ /* Set ufcon txtrig */
+ ourport->tx_in_progress = S3C24XX_TX_PIO;
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ wr_regl(port, S3C2410_UFCON, ufcon);
+
+ /* Enable tx pio mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TXMODE_MASK);
+ ucon |= S3C64XX_UCON_TXMODE_CPU;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ /* Unmask Tx interrupt */
+ if (s3c24xx_serial_has_interrupt_mask(port))
+ __clear_bit(S3C64XX_UINTM_TXD,
+ portaddrl(port, S3C64XX_UINTM));
+ else
+ enable_irq(ourport->tx_irq);
+
+ ourport->tx_mode = S3C24XX_TX_PIO;
+}
+
+static void s3c24xx_serial_start_tx_pio(struct s3c24xx_uart_port *ourport)
+{
+ if (ourport->tx_mode != S3C24XX_TX_PIO)
+ enable_tx_pio(ourport);
+}
+
+static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
+ unsigned int count)
+{
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+
+
+ if (ourport->tx_mode != S3C24XX_TX_DMA)
+ enable_tx_dma(ourport);
+
+ while (xmit->tail & (dma_get_cache_alignment() - 1)) {
+ if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
+ return 0;
+ wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ count--;
+ }
+
+ dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
+ dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
+
+ dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,
+ dma->tx_size, DMA_TO_DEVICE);
+
+ dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
+ dma->tx_transfer_addr, dma->tx_size,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!dma->tx_desc) {
+ dev_err(ourport->port.dev, "Unable to get desc for Tx\n");
+ return -EIO;
+ }
+
+ dma->tx_desc->callback = s3c24xx_serial_tx_dma_complete;
+ dma->tx_desc->callback_param = ourport;
+ dma->tx_bytes_requested = dma->tx_size;
+
+ ourport->tx_in_progress = S3C24XX_TX_DMA;
+ dma->tx_cookie = dmaengine_submit(dma->tx_desc);
+ dma_async_issue_pending(dma->tx_chan);
+ return 0;
+}
+
+static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long count;
+
+ /* Get data size up to the end of buffer */
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ if (!count) {
+ s3c24xx_serial_stop_tx(port);
+ return;
+ }
+
+ if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
+ s3c24xx_serial_start_tx_pio(ourport);
+ else
+ s3c24xx_serial_start_tx_dma(ourport, count);
+}
+
+void s3c24xx_serial_start_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct circ_buf *xmit = &port->state->xmit;
if (!tx_enabled(port)) {
if (port->flags & UPF_CONS_FLOW)
s3c24xx_serial_rx_disable(port);
- if (s3c24xx_serial_has_interrupt_mask(port))
- __clear_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
- else
- enable_irq(ourport->tx_irq);
tx_enabled(port) = 1;
+ if (!ourport->dma || !ourport->dma->tx_chan)
+ s3c24xx_serial_start_tx_pio(ourport);
+ }
+
+ if (ourport->dma && ourport->dma->tx_chan) {
+ if (!uart_circ_empty(xmit) && !ourport->tx_in_progress)
+ s3c24xx_serial_start_next_tx(ourport);
+ }
+}
+
+static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,
+ struct tty_port *tty, int count)
+{
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ int copied;
+
+ if (!count)
+ return;
+
+ dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ ourport->port.icount.rx += count;
+ if (!tty) {
+ dev_err(ourport->port.dev, "No tty port\n");
+ return;
+ }
+ copied = tty_insert_flip_string(tty,
+ ((unsigned char *)(ourport->dma->rx_buf)), count);
+ if (copied != count) {
+ WARN_ON(1);
+ dev_err(ourport->port.dev, "RxData copy to tty layer failed\n");
+ }
+}
+
+static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
+ unsigned long ufstat);
+
+static void uart_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ struct tty_port *tty = &port->state->port;
+ unsigned int ch, ufstat;
+ unsigned int count;
+
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+ count = s3c24xx_serial_rx_fifocnt(ourport, ufstat);
+
+ if (!count)
+ return;
+
+ while (count-- > 0) {
+ ch = rd_regb(port, S3C2410_URXH);
+
+ ourport->port.icount.rx++;
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
}
+
+ tty_flip_buffer_push(tty);
}
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_port *t = &port->state->port;
+ struct dma_tx_state state;
+ enum dma_status dma_status;
+ unsigned int received;
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
@@ -197,6 +437,17 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
}
+ if (dma && dma->rx_chan) {
+ dmaengine_pause(dma->tx_chan);
+ dma_status = dmaengine_tx_status(dma->rx_chan,
+ dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS ||
+ dma_status == DMA_PAUSED) {
+ received = dma->rx_bytes_requested - state.residue;
+ dmaengine_terminate_all(dma->rx_chan);
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+ }
+ }
}
static inline struct s3c24xx_uart_info
@@ -228,12 +479,157 @@ static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
+static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport);
+static void s3c24xx_serial_rx_dma_complete(void *args)
+{
+ struct s3c24xx_uart_port *ourport = args;
+ struct uart_port *port = &ourport->port;
+
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_port *t = &port->state->port;
+ struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
+
+ struct dma_tx_state state;
+ unsigned long flags;
+ int received;
+
+ dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
+ received = dma->rx_bytes_requested - state.residue;
+ async_tx_ack(dma->rx_desc);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (received)
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+
+ if (tty) {
+ tty_flip_buffer_push(t);
+ tty_kref_put(tty);
+ }
+
+ s3c64xx_start_rx_dma(ourport);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+
+ dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
+ dma->rx_addr, dma->rx_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma->rx_desc) {
+ dev_err(ourport->port.dev, "Unable to get desc for Rx\n");
+ return;
+ }
+
+ dma->rx_desc->callback = s3c24xx_serial_rx_dma_complete;
+ dma->rx_desc->callback_param = ourport;
+ dma->rx_bytes_requested = dma->rx_size;
+
+ dma->rx_cookie = dmaengine_submit(dma->rx_desc);
+ dma_async_issue_pending(dma->rx_chan);
+}
/* ? - where has parity gone?? */
#define S3C2410_UERSTAT_PARITY (0x1000)
-static irqreturn_t
-s3c24xx_serial_rx_chars(int irq, void *dev_id)
+static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ unsigned int ucon;
+
+ /* set Rx mode to DMA mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_RXBURST_MASK |
+ S3C64XX_UCON_TIMEOUT_MASK |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_DMASUS_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_MASK);
+ ucon |= S3C64XX_UCON_RXBURST_16 |
+ 0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_DMA;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_mode = S3C24XX_RX_DMA;
+}
+
+static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
+{
+ struct uart_port *port = &ourport->port;
+ unsigned int ucon;
+
+ /* set Rx mode to DMA mode */
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= ~(S3C64XX_UCON_TIMEOUT_MASK |
+ S3C64XX_UCON_EMPTYINT_EN |
+ S3C64XX_UCON_DMASUS_EN |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_MASK);
+ ucon |= 0xf << S3C64XX_UCON_TIMEOUT_SHIFT |
+ S3C64XX_UCON_TIMEOUT_EN |
+ S3C64XX_UCON_RXMODE_CPU;
+ wr_regl(port, S3C2410_UCON, ucon);
+
+ ourport->rx_mode = S3C24XX_RX_PIO;
+}
+
+static irqreturn_t s3c24xx_serial_rx_chars_dma(int irq, void *dev_id)
+{
+ unsigned int utrstat, ufstat, received;
+ struct s3c24xx_uart_port *ourport = dev_id;
+ struct uart_port *port = &ourport->port;
+ struct s3c24xx_uart_dma *dma = ourport->dma;
+ struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
+ struct tty_port *t = &port->state->port;
+ unsigned long flags;
+ struct dma_tx_state state;
+
+ utrstat = rd_regl(port, S3C2410_UTRSTAT);
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
+ s3c64xx_start_rx_dma(ourport);
+ if (ourport->rx_mode == S3C24XX_RX_PIO)
+ enable_rx_dma(ourport);
+ goto finish;
+ }
+
+ if (ourport->rx_mode == S3C24XX_RX_DMA) {
+ dmaengine_pause(dma->rx_chan);
+ dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
+ dmaengine_terminate_all(dma->rx_chan);
+ received = dma->rx_bytes_requested - state.residue;
+ s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
+
+ enable_rx_pio(ourport);
+ }
+
+ uart_rx_drain_fifo(ourport);
+
+ if (tty) {
+ tty_flip_buffer_push(t);
+ tty_kref_put(tty);
+ }
+
+ wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);
+
+finish:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s3c24xx_serial_rx_chars_pio(int irq, void *dev_id)
{
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
@@ -324,16 +720,33 @@ out:
return IRQ_HANDLED;
}
+
+static irqreturn_t s3c24xx_serial_rx_chars(int irq, void *dev_id)
+{
+ struct s3c24xx_uart_port *ourport = dev_id;
+
+ if (ourport->dma && ourport->dma->rx_chan)
+ return s3c24xx_serial_rx_chars_dma(irq, dev_id);
+ return s3c24xx_serial_rx_chars_pio(irq, dev_id);
+}
+
static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
{
struct s3c24xx_uart_port *ourport = id;
struct uart_port *port = &ourport->port;
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
- int count = port->fifosize;
+ int count;
spin_lock_irqsave(&port->lock, flags);
+ count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
+ s3c24xx_serial_start_tx_dma(ourport, count);
+ goto out;
+ }
+
if (port->x_char) {
wr_regb(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
@@ -352,6 +765,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
/* try and drain the buffer... */
+ count = port->fifosize;
while (!uart_circ_empty(xmit) && count-- > 0) {
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
@@ -453,6 +867,93 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&port->lock, flags);
}
+static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+{
+ struct s3c24xx_uart_dma *dma = p->dma;
+ dma_cap_mask_t mask;
+ unsigned long flags;
+
+ /* Default slave configuration parameters */
+ dma->rx_conf.direction = DMA_DEV_TO_MEM;
+ dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
+ dma->rx_conf.src_maxburst = 16;
+
+ dma->tx_conf.direction = DMA_MEM_TO_DEV;
+ dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
+ if (dma_get_cache_alignment() >= 16)
+ dma->tx_conf.dst_maxburst = 16;
+ else
+ dma->tx_conf.dst_maxburst = 1;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dma->rx_chan = dma_request_slave_channel_compat(mask, dma->fn,
+ dma->rx_param, p->port.dev, "rx");
+ if (!dma->rx_chan)
+ return -ENODEV;
+
+ dmaengine_slave_config(dma->rx_chan, &dma->rx_conf);
+
+ dma->tx_chan = dma_request_slave_channel_compat(mask, dma->fn,
+ dma->tx_param, p->port.dev, "tx");
+ if (!dma->tx_chan) {
+ dma_release_channel(dma->rx_chan);
+ return -ENODEV;
+ }
+
+ dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
+
+ /* RX buffer */
+ dma->rx_size = PAGE_SIZE;
+
+ dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
+
+ if (!dma->rx_buf) {
+ dma_release_channel(dma->rx_chan);
+ dma_release_channel(dma->tx_chan);
+ return -ENOMEM;
+ }
+
+ dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
+ dma->rx_size, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&p->port.lock, flags);
+
+ /* TX buffer */
+ dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
+ p->port.state->xmit.buf,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+ spin_unlock_irqrestore(&p->port.lock, flags);
+
+ return 0;
+}
+
+static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
+{
+ struct s3c24xx_uart_dma *dma = p->dma;
+
+ if (dma->rx_chan) {
+ dmaengine_terminate_all(dma->rx_chan);
+ dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
+ dma->rx_size, DMA_FROM_DEVICE);
+ kfree(dma->rx_buf);
+ dma_release_channel(dma->rx_chan);
+ dma->rx_chan = NULL;
+ }
+
+ if (dma->tx_chan) {
+ dmaengine_terminate_all(dma->tx_chan);
+ dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
+ UART_XMIT_SIZE, DMA_TO_DEVICE);
+ dma_release_channel(dma->tx_chan);
+ dma->tx_chan = NULL;
+ }
+}
+
static void s3c24xx_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
@@ -478,6 +979,11 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
wr_regl(port, S3C64XX_UINTP, 0xf);
wr_regl(port, S3C64XX_UINTM, 0xf);
}
+
+ if (ourport->dma)
+ s3c24xx_serial_release_dma(ourport);
+
+ ourport->tx_in_progress = 0;
}
static int s3c24xx_serial_startup(struct uart_port *port)
@@ -529,12 +1035,21 @@ err:
static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
+ unsigned long flags;
+ unsigned int ufcon;
int ret;
dbg("s3c64xx_serial_startup: port=%p (%08llx,%p)\n",
port, (unsigned long long)port->mapbase, port->membase);
wr_regl(port, S3C64XX_UINTM, 0xf);
+ if (ourport->dma) {
+ ret = s3c24xx_serial_request_dma(ourport);
+ if (ret < 0) {
+ dev_warn(port->dev, "DMA request failed\n");
+ return ret;
+ }
+ }
ret = request_irq(port->irq, s3c64xx_serial_handle_irq, IRQF_SHARED,
s3c24xx_serial_portname(port), ourport);
@@ -549,8 +1064,20 @@ static int s3c64xx_serial_startup(struct uart_port *port)
tx_enabled(port) = 0;
ourport->tx_claimed = 1;
+ spin_lock_irqsave(&port->lock, flags);
+
+ ufcon = rd_regl(port, S3C2410_UFCON);
+ ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX |
+ S5PV210_UFCON_RXTRIG8;
+ wr_regl(port, S3C2410_UFCON, ufcon);
+
+ enable_rx_pio(ourport);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
/* Enable Rx Interrupt */
__clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+
dbg("s3c64xx_serial_startup ok\n");
return ret;
}
@@ -1209,6 +1736,18 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ret = platform_get_irq(platdev, 1);
if (ret > 0)
ourport->tx_irq = ret;
+ /*
+ * DMA is currently supported only on DT platforms, if DMA properties
+ * are specified.
+ */
+ if (platdev->dev.of_node && of_find_property(platdev->dev.of_node,
+ "dmas", NULL)) {
+ ourport->dma = devm_kzalloc(port->dev,
+ sizeof(*ourport->dma),
+ GFP_KERNEL);
+ if (!ourport->dma)
+ return -ENOMEM;
+ }
ourport->clk = clk_get(&platdev->dev, "uart");
if (IS_ERR(ourport->clk)) {
@@ -1857,6 +2396,111 @@ static struct platform_driver samsung_serial_driver = {
module_platform_driver(samsung_serial_driver);
+#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
+/*
+ * Early console.
+ */
+
+struct samsung_early_console_data {
+ u32 txfull_mask;
+};
+
+static void samsung_early_busyuart(struct uart_port *port)
+{
+ while (!(readl(port->membase + S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXFE))
+ ;
+}
+
+static void samsung_early_busyuart_fifo(struct uart_port *port)
+{
+ struct samsung_early_console_data *data = port->private_data;
+
+ while (readl(port->membase + S3C2410_UFSTAT) & data->txfull_mask)
+ ;
+}
+
+static void samsung_early_putc(struct uart_port *port, int c)
+{
+ if (readl(port->membase + S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE)
+ samsung_early_busyuart_fifo(port);
+ else
+ samsung_early_busyuart(port);
+
+ writeb(c, port->membase + S3C2410_UTXH);
+}
+
+static void samsung_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, samsung_early_putc);
+}
+
+static int __init samsung_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = samsung_early_write;
+ return 0;
+}
+
+/* S3C2410 */
+static struct samsung_early_console_data s3c2410_early_console_data = {
+ .txfull_mask = S3C2410_UFSTAT_TXFULL,
+};
+
+static int __init s3c2410_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s3c2410_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
+ s3c2410_early_console_setup);
+EARLYCON_DECLARE(s3c2410, s3c2410_early_console_setup);
+
+/* S3C2412, S3C2440, S3C64xx */
+static struct samsung_early_console_data s3c2440_early_console_data = {
+ .txfull_mask = S3C2440_UFSTAT_TXFULL,
+};
+
+static int __init s3c2440_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s3c2440_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s3c2412, "samsung,s3c2412-uart",
+ s3c2440_early_console_setup);
+OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
+ s3c2440_early_console_setup);
+OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
+ s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c2412, s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c2440, s3c2440_early_console_setup);
+EARLYCON_DECLARE(s3c6400, s3c2440_early_console_setup);
+
+/* S5PV210, EXYNOS */
+static struct samsung_early_console_data s5pv210_early_console_data = {
+ .txfull_mask = S5PV210_UFSTAT_TXFULL,
+};
+
+static int __init s5pv210_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->port.private_data = &s5pv210_early_console_data;
+ return samsung_early_console_setup(device, opt);
+}
+OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart",
+ s5pv210_early_console_setup);
+OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
+ s5pv210_early_console_setup);
+EARLYCON_DECLARE(s5pv210, s5pv210_early_console_setup);
+EARLYCON_DECLARE(exynos4210, s5pv210_early_console_setup);
+#endif
+
MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");