aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/tty/serial/stm32-usart.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/stm32-usart.c')
-rw-r--r--drivers/tty/serial/stm32-usart.c388
1 files changed, 306 insertions, 82 deletions
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 8f032e77b954..3244e7f6818c 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -165,63 +165,61 @@ static int stm32_usart_init_rs485(struct uart_port *port,
return uart_get_rs485_mode(port);
}
-static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
- int *last_res, bool threaded)
+static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- enum dma_status status;
- struct dma_tx_state state;
- *sr = readl_relaxed(port->membase + ofs->isr);
+ if (!stm32_port->rx_ch)
+ return false;
- if (threaded && stm32_port->rx_ch) {
- status = dmaengine_tx_status(stm32_port->rx_ch,
- stm32_port->rx_ch->cookie,
- &state);
- if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
- return 1;
- else
- return 0;
- } else if (*sr & USART_SR_RXNE) {
- return 1;
+ return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
+}
+
+/* Return true when data is pending (in pio mode), and false when no data is pending. */
+static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ *sr = readl_relaxed(port->membase + ofs->isr);
+ /* Get pending characters in RDR or FIFO */
+ if (*sr & USART_SR_RXNE) {
+ /* Get all pending characters from the RDR or the FIFO when using interrupts */
+ if (!stm32_usart_rx_dma_enabled(port))
+ return true;
+
+ /* Handle only RX data errors when using DMA */
+ if (*sr & USART_SR_ERR_MASK)
+ return true;
}
- return 0;
+
+ return false;
}
-static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
- int *last_res)
+static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
- if (stm32_port->rx_ch) {
- c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
- if ((*last_res) == 0)
- *last_res = RX_BUF_L;
- } else {
- c = readl_relaxed(port->membase + ofs->rdr);
- /* apply RDR data mask */
- c &= stm32_port->rdr_mask;
- }
+ c = readl_relaxed(port->membase + ofs->rdr);
+ /* Apply RDR data mask */
+ c &= stm32_port->rdr_mask;
return c;
}
-static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
{
- struct tty_port *tport = &port->state->port;
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
unsigned long c;
+ unsigned int size = 0;
u32 sr;
char flag;
- spin_lock(&port->lock);
-
- while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
- threaded)) {
+ while (stm32_usart_pending_rx_pio(port, &sr)) {
sr |= USART_SR_DUMMY_RX;
flag = TTY_NORMAL;
@@ -240,8 +238,9 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
writel_relaxed(sr & USART_SR_ERR_MASK,
port->membase + ofs->icr);
- c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
+ c = stm32_usart_get_char_pio(port);
port->icount.rx++;
+ size++;
if (sr & USART_SR_ERR_MASK) {
if (sr & USART_SR_ORE) {
port->icount.overrun++;
@@ -275,9 +274,95 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
uart_insert_char(port, sr, USART_SR_ORE, c, flag);
}
- uart_unlock_and_check_sysrq(port);
+ return size;
+}
+
+static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct tty_port *ttyport = &stm32_port->port.state->port;
+ unsigned char *dma_start;
+ int dma_count, i;
- tty_flip_buffer_push(tport);
+ dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
+
+ /*
+ * Apply rdr_mask on buffer in order to mask parity bit.
+ * This loop is useless in cs8 mode because DMA copies only
+ * 8 bits and already ignores parity bit.
+ */
+ if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
+ for (i = 0; i < dma_size; i++)
+ *(dma_start + i) &= stm32_port->rdr_mask;
+
+ dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
+ port->icount.rx += dma_count;
+ if (dma_count != dma_size)
+ port->icount.buf_overrun++;
+ stm32_port->last_res -= dma_count;
+ if (stm32_port->last_res == 0)
+ stm32_port->last_res = RX_BUF_L;
+}
+
+static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ unsigned int dma_size, size = 0;
+
+ /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
+ if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
+ /* Conditional first part: from last_res to end of DMA buffer */
+ dma_size = stm32_port->last_res;
+ stm32_usart_push_buffer_dma(port, dma_size);
+ size = dma_size;
+ }
+
+ dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
+ stm32_usart_push_buffer_dma(port, dma_size);
+ size += dma_size;
+
+ return size;
+}
+
+static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ enum dma_status rx_dma_status;
+ u32 sr;
+ unsigned int size = 0;
+
+ if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) {
+ rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
+ stm32_port->rx_ch->cookie,
+ &stm32_port->rx_dma_state);
+ if (rx_dma_status == DMA_IN_PROGRESS) {
+ /* Empty DMA buffer */
+ size = stm32_usart_receive_chars_dma(port);
+ sr = readl_relaxed(port->membase + ofs->isr);
+ if (sr & USART_SR_ERR_MASK) {
+ /* Disable DMA request line */
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+
+ /* Switch to PIO mode to handle the errors */
+ size += stm32_usart_receive_chars_pio(port);
+
+ /* Switch back to DMA mode */
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
+ }
+ } else {
+ /* Disable RX DMA */
+ dmaengine_terminate_async(stm32_port->rx_ch);
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ /* Fall back to interrupt mode */
+ dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
+ size = stm32_usart_receive_chars_pio(port);
+ }
+ } else {
+ size = stm32_usart_receive_chars_pio(port);
+ }
+
+ return size;
}
static void stm32_usart_tx_dma_complete(void *arg)
@@ -312,6 +397,20 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
+static void stm32_usart_rx_dma_complete(void *arg)
+{
+ struct uart_port *port = arg;
+ struct tty_port *tport = &port->state->port;
+ unsigned int size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ if (size)
+ tty_flip_buffer_push(tport);
+}
+
static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -462,6 +561,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
+ unsigned int size;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -478,8 +578,20 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
pm_wakeup_event(tport->tty->dev, 0);
}
- if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
- stm32_usart_receive_chars(port, false);
+ /*
+ * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
+ * line has been masked by HW and rx data are stacking in FIFO.
+ */
+ if (!stm32_port->throttled) {
+ if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
+ ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) {
+ spin_lock(&port->lock);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq(port);
+ if (size)
+ tty_flip_buffer_push(tport);
+ }
+ }
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
spin_lock(&port->lock);
@@ -487,7 +599,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
spin_unlock(&port->lock);
}
- if (stm32_port->rx_ch)
+ if (stm32_usart_rx_dma_enabled(port))
return IRQ_WAKE_THREAD;
else
return IRQ_HANDLED;
@@ -496,10 +608,19 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
{
struct uart_port *port = ptr;
+ struct tty_port *tport = &port->state->port;
struct stm32_port *stm32_port = to_stm32_port(port);
+ unsigned int size;
+ unsigned long flags;
- if (stm32_port->rx_ch)
- stm32_usart_receive_chars(port, true);
+ /* Receiver timeout irq for DMA RX */
+ if (!stm32_port->throttled) {
+ spin_lock_irqsave(&port->lock, flags);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ if (size)
+ tty_flip_buffer_push(tport);
+ }
return IRQ_HANDLED;
}
@@ -612,10 +733,19 @@ static void stm32_usart_throttle(struct uart_port *port)
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
+ * Hardware flow control is triggered when RX FIFO is full.
+ */
+ if (stm32_usart_rx_dma_enabled(port))
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ stm32_port->throttled = true;
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -631,6 +761,14 @@ static void stm32_usart_unthrottle(struct uart_port *port)
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ /*
+ * Switch back to DMA mode (re-enable DMA request line).
+ * Hardware flow control is stopped when FIFO is not full any more.
+ */
+ if (stm32_port->rx_ch)
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
+
+ stm32_port->throttled = false;
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -640,6 +778,10 @@ static void stm32_usart_stop_rx(struct uart_port *port)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ /* Disable DMA request line. */
+ if (stm32_port->rx_ch)
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
@@ -650,6 +792,48 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
}
+static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct dma_async_tx_descriptor *desc;
+ int ret;
+
+ stm32_port->last_res = RX_BUF_L;
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
+ stm32_port->rx_dma_buf,
+ RX_BUF_L, RX_BUF_P,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "rx dma prep cyclic failed\n");
+ return -ENODEV;
+ }
+
+ desc->callback = stm32_usart_rx_dma_complete;
+ desc->callback_param = port;
+
+ /* Push current DMA transaction in the pending queue */
+ ret = dma_submit_error(dmaengine_submit(desc));
+ if (ret) {
+ dmaengine_terminate_sync(stm32_port->rx_ch);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(stm32_port->rx_ch);
+
+ /*
+ * DMA request line not re-enabled at resume when port is throttled.
+ * It will be re-enabled by unthrottle ops.
+ */
+ if (!stm32_port->throttled)
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
+
+ return 0;
+}
+
static int stm32_usart_startup(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -676,6 +860,14 @@ static int stm32_usart_startup(struct uart_port *port)
if (ofs->rqr != UNDEF_REG)
writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
+ if (stm32_port->rx_ch) {
+ ret = stm32_usart_start_rx_dma_cyclic(port);
+ if (ret) {
+ free_irq(port->irq, port);
+ return ret;
+ }
+ }
+
/* RX enabling */
val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
stm32_usart_set_bits(port, ofs->cr1, val);
@@ -708,6 +900,10 @@ static void stm32_usart_shutdown(struct uart_port *port)
if (ret)
dev_err(port->dev, "Transmission is not complete\n");
+ /* Disable RX DMA. */
+ if (stm32_port->rx_ch)
+ dmaengine_terminate_async(stm32_port->rx_ch);
+
/* flush RX & TX FIFO */
if (ofs->rqr != UNDEF_REG)
writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
@@ -810,9 +1006,11 @@ static void stm32_usart_set_termios(struct uart_port *port,
stm32_port->cr1_irq = USART_CR1_RTOIE;
writel_relaxed(bits, port->membase + ofs->rtor);
cr2 |= USART_CR2_RTOEN;
- /* Not using dma, enable fifo threshold irq */
- if (!stm32_port->rx_ch)
- stm32_port->cr3_irq = USART_CR3_RXFTIE;
+ /*
+ * Enable fifo threshold irq in two cases, either when there is no DMA, or when
+ * wake up over usart, from low power until the DMA gets re-enabled by resume.
+ */
+ stm32_port->cr3_irq = USART_CR3_RXFTIE;
}
cr1 |= stm32_port->cr1_irq;
@@ -875,8 +1073,16 @@ static void stm32_usart_set_termios(struct uart_port *port,
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= USART_SR_DUMMY_RX;
- if (stm32_port->rx_ch)
+ if (stm32_port->rx_ch) {
+ /*
+ * Setup DMA to collect only valid data and enable error irqs.
+ * This also enables break reception when using DMA.
+ */
+ cr1 |= USART_CR1_PEIE;
+ cr3 |= USART_CR3_EIE;
cr3 |= USART_CR3_DMAR;
+ cr3 |= USART_CR3_DDRE;
+ }
if (rs485conf->flags & SER_RS485_ENABLED) {
stm32_usart_config_reg_rs485(&cr1, &cr3,
@@ -1166,7 +1372,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct uart_port *port = &stm32port->port;
struct device *dev = &pdev->dev;
struct dma_slave_config config;
- struct dma_async_tx_descriptor *desc = NULL;
int ret;
/*
@@ -1194,32 +1399,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
return ret;
}
- /* Prepare a DMA cyclic transaction */
- desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch,
- stm32port->rx_dma_buf,
- RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "rx dma prep cyclic failed\n");
- stm32_usart_of_dma_rx_remove(stm32port, pdev);
- return -ENODEV;
- }
-
- /* No callback as dma buffer is drained on usart interrupt */
- desc->callback = NULL;
- desc->callback_param = NULL;
-
- /* Push current DMA transaction in the pending queue */
- ret = dma_submit_error(dmaengine_submit(desc));
- if (ret) {
- dmaengine_terminate_sync(stm32port->rx_ch);
- stm32_usart_of_dma_rx_remove(stm32port, pdev);
- return ret;
- }
-
- /* Issue pending DMA requests */
- dma_async_issue_pending(stm32port->rx_ch);
-
return 0;
}
@@ -1372,6 +1551,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
int err;
+ u32 cr3;
pm_runtime_get_sync(&pdev->dev);
err = uart_remove_one_port(&stm32_usart_driver, port);
@@ -1382,7 +1562,12 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
+ cr3 = readl_relaxed(port->membase + ofs->cr3);
+ cr3 &= ~USART_CR3_EIE;
+ cr3 &= ~USART_CR3_DMAR;
+ cr3 &= ~USART_CR3_DDRE;
+ writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->tx_ch) {
dmaengine_terminate_async(stm32_port->tx_ch);
@@ -1391,7 +1576,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
}
if (stm32_port->rx_ch) {
- dmaengine_terminate_async(stm32_port->rx_ch);
stm32_usart_of_dma_rx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->rx_ch);
}
@@ -1504,14 +1688,18 @@ static struct uart_driver stm32_usart_driver = {
.cons = STM32_SERIAL_CONSOLE,
};
-static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
- bool enable)
+static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
+ bool enable)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct tty_port *tport = &port->state->port;
+ int ret;
+ unsigned int size;
+ unsigned long flags;
- if (!stm32_port->wakeup_src)
- return;
+ if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
+ return 0;
/*
* Enable low-power wake-up and wake-up irq if argument is set to
@@ -1520,20 +1708,52 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
if (enable) {
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
+
+ /*
+ * When DMA is used for reception, it must be disabled before
+ * entering low-power mode and re-enabled when exiting from
+ * low-power mode.
+ */
+ if (stm32_port->rx_ch) {
+ spin_lock_irqsave(&port->lock, flags);
+ /* Avoid race with RX IRQ when DMAR is cleared */
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ /* Poll data from DMA RX buffer if any */
+ size = stm32_usart_receive_chars(port, true);
+ dmaengine_terminate_async(stm32_port->rx_ch);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ if (size)
+ tty_flip_buffer_push(tport);
+ }
+
+ /* Poll data from RX FIFO if any */
+ stm32_usart_receive_chars(port, false);
} else {
+ if (stm32_port->rx_ch) {
+ ret = stm32_usart_start_rx_dma_cyclic(port);
+ if (ret)
+ return ret;
+ }
+
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
}
+
+ return 0;
}
static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
+ int ret;
uart_suspend_port(&stm32_usart_driver, port);
- if (device_may_wakeup(dev) || device_wakeup_path(dev))
- stm32_usart_serial_en_wakeup(port, true);
+ if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
+ ret = stm32_usart_serial_en_wakeup(port, true);
+ if (ret)
+ return ret;
+ }
/*
* When "no_console_suspend" is enabled, keep the pinctrl default state
@@ -1554,11 +1774,15 @@ static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
+ int ret;
pinctrl_pm_select_default_state(dev);
- if (device_may_wakeup(dev) || device_wakeup_path(dev))
- stm32_usart_serial_en_wakeup(port, false);
+ if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
+ ret = stm32_usart_serial_en_wakeup(port, false);
+ if (ret)
+ return ret;
+ }
return uart_resume_port(&stm32_usart_driver, port);
}