b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | From 0d6e214f5a257f9b53619ef8aa3b6e767189bdcf Mon Sep 17 00:00:00 2001 |
| 2 | From: Fugang Duan <fugang.duan@nxp.com> |
| 3 | Date: Wed, 11 Sep 2019 16:21:06 +0800 |
| 4 | Subject: [PATCH] tty: serial: fsl_lpuart: enable dma mode for imx8qxp |
| 5 | |
| 6 | imx8qxp lpuart support eDMA for dma mode, support EOP (end-of-packet) |
| 7 | feature. But eDMA cannot detect the correct DADDR for current major |
| 8 | loop in cyclic mode, so it doesn't support cyclic mode. |
| 9 | |
| 10 | The patch is to enable lpuart prep slave sg dma mode for imx8qxp. |
| 11 | |
| 12 | Signed-off-by: Fugang Duan <fugang.duan@nxp.com> |
| 13 | --- |
| 14 | drivers/tty/serial/fsl_lpuart.c | 280 +++++++++++++++++++++++++++++++--------- |
| 15 | 1 file changed, 219 insertions(+), 61 deletions(-) |
| 16 | |
| 17 | --- a/drivers/tty/serial/fsl_lpuart.c |
| 18 | +++ b/drivers/tty/serial/fsl_lpuart.c |
| 19 | @@ -131,6 +131,7 @@ |
| 20 | #define UARTBAUD_M10 0x20000000 |
| 21 | #define UARTBAUD_TDMAE 0x00800000 |
| 22 | #define UARTBAUD_RDMAE 0x00200000 |
| 23 | +#define UARTBAUD_RIDMAE 0x00100000 |
| 24 | #define UARTBAUD_MATCFG 0x00400000 |
| 25 | #define UARTBAUD_BOTHEDGE 0x00020000 |
| 26 | #define UARTBAUD_RESYNCDIS 0x00010000 |
| 27 | @@ -179,7 +180,7 @@ |
| 28 | #define UARTCTRL_SBK 0x00010000 |
| 29 | #define UARTCTRL_MA1IE 0x00008000 |
| 30 | #define UARTCTRL_MA2IE 0x00004000 |
| 31 | -#define UARTCTRL_IDLECFG 0x00000100 |
| 32 | +#define UARTCTRL_IDLECFG_OFF 0x8 |
| 33 | #define UARTCTRL_LOOPS 0x00000080 |
| 34 | #define UARTCTRL_DOZEEN 0x00000040 |
| 35 | #define UARTCTRL_RSRC 0x00000020 |
| 36 | @@ -197,6 +198,7 @@ |
| 37 | #define UARTDATA_MASK 0x3ff |
| 38 | |
| 39 | #define UARTMODIR_IREN 0x00020000 |
| 40 | +#define UARTMODIR_RTSWATER_S 0x8 |
| 41 | #define UARTMODIR_TXCTSSRC 0x00000020 |
| 42 | #define UARTMODIR_TXCTSC 0x00000010 |
| 43 | #define UARTMODIR_RXRTSE 0x00000008 |
| 44 | @@ -210,6 +212,8 @@ |
| 45 | #define UARTFIFO_RXUF 0x00010000 |
| 46 | #define UARTFIFO_TXFLUSH 0x00008000 |
| 47 | #define UARTFIFO_RXFLUSH 0x00004000 |
| 48 | +#define UARTFIFO_RXIDEN_MASK 0x7 |
| 49 | +#define UARTFIFO_RXIDEN_OFF 10 |
| 50 | #define UARTFIFO_TXOFE 0x00000200 |
| 51 | #define UARTFIFO_RXUFE 0x00000100 |
| 52 | #define UARTFIFO_TXFE 0x00000080 |
| 53 | @@ -226,6 +230,9 @@ |
| 54 | #define UARTWATER_TXWATER_OFF 0 |
| 55 | #define UARTWATER_RXWATER_OFF 16 |
| 56 | |
| 57 | +#define UARTFIFO_RXIDEN_RDRF 0x3 |
| 58 | +#define UARTCTRL_IDLECFG 0x7 |
| 59 | + |
| 60 | /* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */ |
| 61 | #define DMA_RX_TIMEOUT (10) |
| 62 | |
| 63 | @@ -252,6 +259,9 @@ struct lpuart_port { |
| 64 | unsigned int txfifo_size; |
| 65 | unsigned int rxfifo_size; |
| 66 | |
| 67 | + u8 rx_watermark; |
| 68 | + bool dma_eeop; |
| 69 | + bool rx_dma_cyclic; |
| 70 | bool lpuart_dma_tx_use; |
| 71 | bool lpuart_dma_rx_use; |
| 72 | struct dma_chan *dma_tx_chan; |
| 73 | @@ -276,33 +286,45 @@ struct lpuart_soc_data { |
| 74 | enum lpuart_type devtype; |
| 75 | char iotype; |
| 76 | u8 reg_off; |
| 77 | + u8 rx_watermark; |
| 78 | + bool rx_dma_cyclic; |
| 79 | }; |
| 80 | |
| 81 | static const struct lpuart_soc_data vf_data = { |
| 82 | .devtype = VF610_LPUART, |
| 83 | .iotype = UPIO_MEM, |
| 84 | + .rx_watermark = 1, |
| 85 | + .rx_dma_cyclic = true, |
| 86 | }; |
| 87 | |
| 88 | static const struct lpuart_soc_data ls1021a_data = { |
| 89 | .devtype = LS1021A_LPUART, |
| 90 | .iotype = UPIO_MEM32BE, |
| 91 | + .rx_watermark = 0, |
| 92 | + .rx_dma_cyclic = true, |
| 93 | }; |
| 94 | |
| 95 | static const struct lpuart_soc_data ls1028a_data = { |
| 96 | .devtype = LS1028A_LPUART, |
| 97 | .iotype = UPIO_MEM32, |
| 98 | + .rx_watermark = 0, |
| 99 | + .rx_dma_cyclic = true, |
| 100 | }; |
| 101 | |
| 102 | static struct lpuart_soc_data imx7ulp_data = { |
| 103 | .devtype = IMX7ULP_LPUART, |
| 104 | .iotype = UPIO_MEM32, |
| 105 | .reg_off = IMX_REG_OFF, |
| 106 | + .rx_watermark = 0, |
| 107 | + .rx_dma_cyclic = true, |
| 108 | }; |
| 109 | |
| 110 | static struct lpuart_soc_data imx8qxp_data = { |
| 111 | .devtype = IMX8QXP_LPUART, |
| 112 | .iotype = UPIO_MEM32, |
| 113 | .reg_off = IMX_REG_OFF, |
| 114 | + .rx_watermark = 31, |
| 115 | + .rx_dma_cyclic = false, |
| 116 | }; |
| 117 | |
| 118 | static const struct of_device_id lpuart_dt_ids[] = { |
| 119 | @@ -317,6 +339,7 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids); |
| 120 | |
| 121 | /* Forward declare this for the dma callbacks*/ |
| 122 | static void lpuart_dma_tx_complete(void *arg); |
| 123 | +static int lpuart_sched_rx_dma(struct lpuart_port *sport); |
| 124 | |
| 125 | static inline bool is_layerscape_lpuart(struct lpuart_port *sport) |
| 126 | { |
| 127 | @@ -1008,19 +1031,15 @@ static irqreturn_t lpuart32_int(int irq, |
| 128 | if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use) |
| 129 | lpuart32_txint(sport); |
| 130 | |
| 131 | + if (sport->lpuart_dma_rx_use && sport->dma_eeop) |
| 132 | + sts &= ~UARTSTAT_IDLE; |
| 133 | + |
| 134 | lpuart32_write(&sport->port, sts, UARTSTAT); |
| 135 | return IRQ_HANDLED; |
| 136 | } |
| 137 | |
| 138 | -static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) |
| 139 | +static void lpuart_rx_error_stat(struct lpuart_port *sport) |
| 140 | { |
| 141 | - struct tty_port *port = &sport->port.state->port; |
| 142 | - struct dma_tx_state state; |
| 143 | - enum dma_status dmastat; |
| 144 | - struct circ_buf *ring = &sport->rx_ring; |
| 145 | - unsigned long flags; |
| 146 | - int count = 0; |
| 147 | - |
| 148 | if (lpuart_is_32(sport)) { |
| 149 | unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); |
| 150 | |
| 151 | @@ -1072,8 +1091,21 @@ static void lpuart_copy_rx_to_tty(struct |
| 152 | writeb(cr2, sport->port.membase + UARTCR2); |
| 153 | } |
| 154 | } |
| 155 | +} |
| 156 | + |
| 157 | +static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) |
| 158 | +{ |
| 159 | + struct tty_port *port = &sport->port.state->port; |
| 160 | + struct dma_tx_state state; |
| 161 | + enum dma_status dmastat; |
| 162 | + struct circ_buf *ring = &sport->rx_ring; |
| 163 | + unsigned long flags; |
| 164 | + int count = 0; |
| 165 | |
| 166 | - async_tx_ack(sport->dma_rx_desc); |
| 167 | + if (!is_imx8qxp_lpuart(sport)) { |
| 168 | + lpuart_rx_error_stat(sport); |
| 169 | + async_tx_ack(sport->dma_rx_desc); |
| 170 | + } |
| 171 | |
| 172 | spin_lock_irqsave(&sport->port.lock, flags); |
| 173 | |
| 174 | @@ -1136,7 +1168,33 @@ static void lpuart_copy_rx_to_tty(struct |
| 175 | spin_unlock_irqrestore(&sport->port.lock, flags); |
| 176 | |
| 177 | tty_flip_buffer_push(port); |
| 178 | - mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout); |
| 179 | + |
| 180 | + if (!sport->dma_eeop) |
| 181 | + mod_timer(&sport->lpuart_timer, |
| 182 | + jiffies + sport->dma_rx_timeout); |
| 183 | +} |
| 184 | + |
| 185 | +static void lpuart_dma_rx_post_handler(struct lpuart_port *sport) |
| 186 | +{ |
| 187 | + unsigned long flags; |
| 188 | + unsigned long rxcount; |
| 189 | + |
| 190 | + spin_lock_irqsave(&sport->port.lock, flags); |
| 191 | + |
| 192 | + /* For end of packet, clear the idle flag to avoid to trigger |
| 193 | + * the next transfer. Only i.MX8x lpuart support EEOP. |
| 194 | + */ |
| 195 | + if (sport->dma_eeop && lpuart_is_32(sport)) { |
| 196 | + rxcount = lpuart32_read(&sport->port, UARTWATER); |
| 197 | + rxcount = rxcount >> UARTWATER_RXCNT_OFF; |
| 198 | + if (!rxcount) |
| 199 | + lpuart32_write(&sport->port, UARTSTAT_IDLE, UARTSTAT); |
| 200 | + } |
| 201 | + |
| 202 | + lpuart_sched_rx_dma(sport); |
| 203 | + |
| 204 | + spin_unlock_irqrestore(&sport->port.lock, flags); |
| 205 | + |
| 206 | } |
| 207 | |
| 208 | static void lpuart_dma_rx_complete(void *arg) |
| 209 | @@ -1144,6 +1202,8 @@ static void lpuart_dma_rx_complete(void |
| 210 | struct lpuart_port *sport = arg; |
| 211 | |
| 212 | lpuart_copy_rx_to_tty(sport); |
| 213 | + if (!sport->rx_dma_cyclic) |
| 214 | + lpuart_dma_rx_post_handler(sport); |
| 215 | } |
| 216 | |
| 217 | static void lpuart_timer_func(struct timer_list *t) |
| 218 | @@ -1151,13 +1211,78 @@ static void lpuart_timer_func(struct tim |
| 219 | struct lpuart_port *sport = from_timer(sport, t, lpuart_timer); |
| 220 | |
| 221 | lpuart_copy_rx_to_tty(sport); |
| 222 | + if (!sport->rx_dma_cyclic) { |
| 223 | + dmaengine_terminate_async(sport->dma_rx_chan); |
| 224 | + lpuart_dma_rx_post_handler(sport); |
| 225 | + } |
| 226 | } |
| 227 | |
| 228 | -static inline int lpuart_start_rx_dma(struct lpuart_port *sport) |
| 229 | +static int lpuart_sched_rxdma_cyclic(struct lpuart_port *sport) |
| 230 | +{ |
| 231 | + sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan, |
| 232 | + sg_dma_address(&sport->rx_sgl), |
| 233 | + sport->rx_sgl.length, |
| 234 | + sport->rx_sgl.length / 2, |
| 235 | + DMA_DEV_TO_MEM, |
| 236 | + DMA_PREP_INTERRUPT); |
| 237 | + if (!sport->dma_rx_desc) { |
| 238 | + dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); |
| 239 | + return -EFAULT; |
| 240 | + } |
| 241 | + |
| 242 | + return 0; |
| 243 | +} |
| 244 | + |
| 245 | +static int lpuart_sched_rxdma_slave_sg(struct lpuart_port *sport) |
| 246 | +{ |
| 247 | + dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1, |
| 248 | + DMA_FROM_DEVICE); |
| 249 | + sport->dma_rx_desc = dmaengine_prep_slave_sg(sport->dma_rx_chan, |
| 250 | + &sport->rx_sgl, |
| 251 | + 1, |
| 252 | + DMA_DEV_TO_MEM, |
| 253 | + DMA_PREP_INTERRUPT); |
| 254 | + if (!sport->dma_rx_desc) { |
| 255 | + dev_err(sport->port.dev, "Cannot prepare slave_sg DMA\n"); |
| 256 | + return -EFAULT; |
| 257 | + } |
| 258 | + sport->rx_ring.tail = 0; |
| 259 | + sport->rx_ring.head = 0; |
| 260 | + |
| 261 | + return 0; |
| 262 | +} |
| 263 | + |
| 264 | +static int lpuart_sched_rx_dma(struct lpuart_port *sport) |
| 265 | +{ |
| 266 | + unsigned long temp; |
| 267 | + int ret; |
| 268 | + |
| 269 | + if (sport->rx_dma_cyclic) |
| 270 | + ret = lpuart_sched_rxdma_cyclic(sport); |
| 271 | + else |
| 272 | + ret = lpuart_sched_rxdma_slave_sg(sport); |
| 273 | + |
| 274 | + sport->dma_rx_desc->callback = lpuart_dma_rx_complete; |
| 275 | + sport->dma_rx_desc->callback_param = sport; |
| 276 | + sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); |
| 277 | + dma_async_issue_pending(sport->dma_rx_chan); |
| 278 | + |
| 279 | + if (lpuart_is_32(sport)) { |
| 280 | + temp = lpuart32_read(&sport->port, UARTBAUD); |
| 281 | + if (sport->dma_eeop) |
| 282 | + temp |= UARTBAUD_RIDMAE; |
| 283 | + temp |= UARTBAUD_RDMAE; |
| 284 | + lpuart32_write(&sport->port, temp, UARTBAUD); |
| 285 | + } else { |
| 286 | + writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, |
| 287 | + sport->port.membase + UARTCR5); |
| 288 | + } |
| 289 | + |
| 290 | + return ret; |
| 291 | +} |
| 292 | + |
| 293 | +static void lpuart_get_rx_dma_rng_len(struct lpuart_port *sport) |
| 294 | { |
| 295 | - struct dma_slave_config dma_rx_sconfig = {}; |
| 296 | - struct circ_buf *ring = &sport->rx_ring; |
| 297 | - int ret, nent; |
| 298 | int bits, baud; |
| 299 | struct tty_port *port = &sport->port.state->port; |
| 300 | struct tty_struct *tty = port->tty; |
| 301 | @@ -1177,6 +1302,18 @@ static inline int lpuart_start_rx_dma(st |
| 302 | sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1)); |
| 303 | if (sport->rx_dma_rng_buf_len < 16) |
| 304 | sport->rx_dma_rng_buf_len = 16; |
| 305 | +} |
| 306 | + |
| 307 | +static inline int lpuart_start_rx_dma(struct lpuart_port *sport) |
| 308 | +{ |
| 309 | + struct dma_slave_config dma_rx_sconfig = {}; |
| 310 | + struct circ_buf *ring = &sport->rx_ring; |
| 311 | + int ret, nent; |
| 312 | + |
| 313 | + if (!sport->dma_eeop) |
| 314 | + lpuart_get_rx_dma_rng_len(sport); |
| 315 | + else |
| 316 | + sport->rx_dma_rng_buf_len = PAGE_SIZE; |
| 317 | |
| 318 | ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC); |
| 319 | if (!ring->buf) |
| 320 | @@ -1202,32 +1339,7 @@ static inline int lpuart_start_rx_dma(st |
| 321 | return ret; |
| 322 | } |
| 323 | |
| 324 | - sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan, |
| 325 | - sg_dma_address(&sport->rx_sgl), |
| 326 | - sport->rx_sgl.length, |
| 327 | - sport->rx_sgl.length / 2, |
| 328 | - DMA_DEV_TO_MEM, |
| 329 | - DMA_PREP_INTERRUPT); |
| 330 | - if (!sport->dma_rx_desc) { |
| 331 | - dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n"); |
| 332 | - return -EFAULT; |
| 333 | - } |
| 334 | - |
| 335 | - sport->dma_rx_desc->callback = lpuart_dma_rx_complete; |
| 336 | - sport->dma_rx_desc->callback_param = sport; |
| 337 | - sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc); |
| 338 | - dma_async_issue_pending(sport->dma_rx_chan); |
| 339 | - |
| 340 | - if (lpuart_is_32(sport)) { |
| 341 | - unsigned long temp = lpuart32_read(&sport->port, UARTBAUD); |
| 342 | - |
| 343 | - lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD); |
| 344 | - } else { |
| 345 | - writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS, |
| 346 | - sport->port.membase + UARTCR5); |
| 347 | - } |
| 348 | - |
| 349 | - return 0; |
| 350 | + return lpuart_sched_rx_dma(sport); |
| 351 | } |
| 352 | |
| 353 | static void lpuart_dma_rx_free(struct uart_port *port) |
| 354 | @@ -1433,8 +1545,10 @@ static void lpuart_setup_watermark(struc |
| 355 | writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO); |
| 356 | } |
| 357 | |
| 358 | + if (uart_console(&sport->port)) |
| 359 | + sport->rx_watermark = 1; |
| 360 | writeb(0, sport->port.membase + UARTTWFIFO); |
| 361 | - writeb(1, sport->port.membase + UARTRWFIFO); |
| 362 | + writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO); |
| 363 | |
| 364 | /* Restore cr2 */ |
| 365 | writeb(cr2_saved, sport->port.membase + UARTCR2); |
| 366 | @@ -1455,6 +1569,7 @@ static void lpuart32_setup_watermark(str |
| 367 | { |
| 368 | unsigned long val, ctrl; |
| 369 | unsigned long ctrl_saved; |
| 370 | + unsigned long rxiden_cnt; |
| 371 | |
| 372 | ctrl = lpuart32_read(&sport->port, UARTCTRL); |
| 373 | ctrl_saved = ctrl; |
| 374 | @@ -1466,12 +1581,26 @@ static void lpuart32_setup_watermark(str |
| 375 | val = lpuart32_read(&sport->port, UARTFIFO); |
| 376 | val |= UARTFIFO_TXFE | UARTFIFO_RXFE; |
| 377 | val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH; |
| 378 | + val &= ~(UARTFIFO_RXIDEN_MASK << UARTFIFO_RXIDEN_OFF); |
| 379 | + rxiden_cnt = sport->dma_eeop ? 0 : UARTFIFO_RXIDEN_RDRF; |
| 380 | + val |= ((rxiden_cnt & UARTFIFO_RXIDEN_MASK) << |
| 381 | + UARTFIFO_RXIDEN_OFF); |
| 382 | lpuart32_write(&sport->port, val, UARTFIFO); |
| 383 | |
| 384 | /* set the watermark */ |
| 385 | - val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF); |
| 386 | + if (uart_console(&sport->port)) |
| 387 | + sport->rx_watermark = 1; |
| 388 | + val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) | |
| 389 | + (0x0 << UARTWATER_TXWATER_OFF); |
| 390 | lpuart32_write(&sport->port, val, UARTWATER); |
| 391 | |
| 392 | + /* set RTS watermark */ |
| 393 | + if (!uart_console(&sport->port)) { |
| 394 | + val = lpuart32_read(&sport->port, UARTMODIR); |
| 395 | + val = (sport->rxfifo_size >> 1) << UARTMODIR_RTSWATER_S; |
| 396 | + lpuart32_write(&sport->port, val, UARTMODIR); |
| 397 | + } |
| 398 | + |
| 399 | /* Restore cr2 */ |
| 400 | lpuart32_write(&sport->port, ctrl_saved, UARTCTRL); |
| 401 | } |
| 402 | @@ -1483,17 +1612,29 @@ static void lpuart32_setup_watermark_ena |
| 403 | lpuart32_setup_watermark(sport); |
| 404 | |
| 405 | temp = lpuart32_read(&sport->port, UARTCTRL); |
| 406 | - temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE; |
| 407 | + temp |= UARTCTRL_RE | UARTCTRL_TE; |
| 408 | + temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF; |
| 409 | lpuart32_write(&sport->port, temp, UARTCTRL); |
| 410 | } |
| 411 | |
| 412 | static void rx_dma_timer_init(struct lpuart_port *sport) |
| 413 | { |
| 414 | + if (sport->dma_eeop) |
| 415 | + return; |
| 416 | + |
| 417 | timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0); |
| 418 | sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout; |
| 419 | add_timer(&sport->lpuart_timer); |
| 420 | } |
| 421 | |
| 422 | +static void lpuart_del_timer_sync(struct lpuart_port *sport) |
| 423 | +{ |
| 424 | + if (sport->dma_eeop) |
| 425 | + return; |
| 426 | + |
| 427 | + del_timer_sync(&sport->lpuart_timer); |
| 428 | +} |
| 429 | + |
| 430 | static void lpuart_tx_dma_startup(struct lpuart_port *sport) |
| 431 | { |
| 432 | u32 uartbaud; |
| 433 | @@ -1557,19 +1698,23 @@ static int lpuart_startup(struct uart_po |
| 434 | return 0; |
| 435 | } |
| 436 | |
| 437 | +static void lpuart32_hw_disable(struct lpuart_port *sport) |
| 438 | +{ |
| 439 | + unsigned long temp; |
| 440 | + |
| 441 | + temp = lpuart32_read(&sport->port, UARTCTRL); |
| 442 | + temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE | |
| 443 | + UARTCTRL_TIE | UARTCTRL_TE); |
| 444 | + lpuart32_write(&sport->port, temp, UARTCTRL); |
| 445 | +} |
| 446 | + |
| 447 | static void lpuart32_configure(struct lpuart_port *sport) |
| 448 | { |
| 449 | unsigned long temp; |
| 450 | |
| 451 | - if (sport->lpuart_dma_rx_use) { |
| 452 | - /* RXWATER must be 0 */ |
| 453 | - temp = lpuart32_read(&sport->port, UARTWATER); |
| 454 | - temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF); |
| 455 | - lpuart32_write(&sport->port, temp, UARTWATER); |
| 456 | - } |
| 457 | temp = lpuart32_read(&sport->port, UARTCTRL); |
| 458 | if (!sport->lpuart_dma_rx_use) |
| 459 | - temp |= UARTCTRL_RIE; |
| 460 | + temp |= UARTCTRL_RIE | UARTCTRL_ILIE; |
| 461 | if (!sport->lpuart_dma_tx_use) |
| 462 | temp |= UARTCTRL_TIE; |
| 463 | lpuart32_write(&sport->port, temp, UARTCTRL); |
| 464 | @@ -1613,12 +1758,12 @@ static int lpuart32_startup(struct uart_ |
| 465 | |
| 466 | spin_lock_irqsave(&sport->port.lock, flags); |
| 467 | |
| 468 | - lpuart32_setup_watermark_enable(sport); |
| 469 | - |
| 470 | + lpuart32_hw_disable(sport); |
| 471 | |
| 472 | lpuart_rx_dma_startup(sport); |
| 473 | lpuart_tx_dma_startup(sport); |
| 474 | |
| 475 | + lpuart32_setup_watermark_enable(sport); |
| 476 | lpuart32_configure(sport); |
| 477 | |
| 478 | spin_unlock_irqrestore(&sport->port.lock, flags); |
| 479 | @@ -1628,7 +1773,7 @@ static int lpuart32_startup(struct uart_ |
| 480 | static void lpuart_dma_shutdown(struct lpuart_port *sport) |
| 481 | { |
| 482 | if (sport->lpuart_dma_rx_use) { |
| 483 | - del_timer_sync(&sport->lpuart_timer); |
| 484 | + lpuart_del_timer_sync(sport); |
| 485 | lpuart_dma_rx_free(&sport->port); |
| 486 | sport->lpuart_dma_rx_use = false; |
| 487 | } |
| 488 | @@ -1671,11 +1816,22 @@ static void lpuart32_shutdown(struct uar |
| 489 | |
| 490 | spin_lock_irqsave(&port->lock, flags); |
| 491 | |
| 492 | + /* clear statue */ |
| 493 | + temp = lpuart32_read(&sport->port, UARTSTAT); |
| 494 | + lpuart32_write(&sport->port, temp, UARTSTAT); |
| 495 | + |
| 496 | + /* disable Rx/Tx DMA */ |
| 497 | + temp = lpuart32_read(port, UARTBAUD); |
| 498 | + temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE | UARTBAUD_RIDMAE); |
| 499 | + lpuart32_write(port, temp, UARTBAUD); |
| 500 | + |
| 501 | /* disable Rx/Tx and interrupts */ |
| 502 | temp = lpuart32_read(port, UARTCTRL); |
| 503 | - temp &= ~(UARTCTRL_TE | UARTCTRL_RE | |
| 504 | - UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE); |
| 505 | + temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_TIE | |
| 506 | + UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_ILIE | |
| 507 | + UARTCTRL_LOOPS); |
| 508 | lpuart32_write(port, temp, UARTCTRL); |
| 509 | + lpuart32_write(port, 0, UARTMODIR); |
| 510 | |
| 511 | spin_unlock_irqrestore(&port->lock, flags); |
| 512 | |
| 513 | @@ -1772,10 +1928,10 @@ lpuart_set_termios(struct uart_port *por |
| 514 | * baud rate and restart Rx DMA path. |
| 515 | * |
| 516 | * Since timer function acqures sport->port.lock, need to stop before |
| 517 | - * acquring same lock because otherwise del_timer_sync() can deadlock. |
| 518 | + * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock. |
| 519 | */ |
| 520 | if (old && sport->lpuart_dma_rx_use) { |
| 521 | - del_timer_sync(&sport->lpuart_timer); |
| 522 | + lpuart_del_timer_sync(sport); |
| 523 | lpuart_dma_rx_free(&sport->port); |
| 524 | } |
| 525 | |
| 526 | @@ -1987,10 +2143,10 @@ lpuart32_set_termios(struct uart_port *p |
| 527 | * baud rate and restart Rx DMA path. |
| 528 | * |
| 529 | * Since timer function acqures sport->port.lock, need to stop before |
| 530 | - * acquring same lock because otherwise del_timer_sync() can deadlock. |
| 531 | + * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock. |
| 532 | */ |
| 533 | if (old && sport->lpuart_dma_rx_use) { |
| 534 | - del_timer_sync(&sport->lpuart_timer); |
| 535 | + lpuart_del_timer_sync(sport); |
| 536 | lpuart_dma_rx_free(&sport->port); |
| 537 | } |
| 538 | |
| 539 | @@ -2509,6 +2665,10 @@ static int lpuart_probe(struct platform_ |
| 540 | sport->port.dev = &pdev->dev; |
| 541 | sport->port.type = PORT_LPUART; |
| 542 | sport->devtype = sdata->devtype; |
| 543 | + sport->rx_dma_cyclic = sdata->rx_dma_cyclic; |
| 544 | + sport->rx_watermark = sdata->rx_watermark; |
| 545 | + sport->dma_eeop = is_imx8qxp_lpuart(sport); |
| 546 | + |
| 547 | ret = platform_get_irq(pdev, 0); |
| 548 | if (ret < 0) |
| 549 | return ret; |
| 550 | @@ -2659,7 +2819,7 @@ static int lpuart_suspend(struct device |
| 551 | * Rx DMA path before suspend and start Rx DMA path on resume. |
| 552 | */ |
| 553 | if (irq_wake) { |
| 554 | - del_timer_sync(&sport->lpuart_timer); |
| 555 | + lpuart_del_timer_sync(sport); |
| 556 | lpuart_dma_rx_free(&sport->port); |
| 557 | } |
| 558 | |