blob: 06cf474072d6dfe73c9c3c0e0386a09cd184836e [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
3 *
4 * Copyright (C) 2002 - 2011 Paul Mundt
5 * Copyright (C) 2015 Glider bvba
6 * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
7 *
8 * based off of the old drivers/char/sh-sci.c by:
9 *
10 * Copyright (C) 1999, 2000 Niibe Yutaka
11 * Copyright (C) 2000 Sugioka Toshinobu
12 * Modified to support multiple serial ports. Stuart Menefy (May 2000).
13 * Modified to support SecureEdge. David McCullough (2002)
14 * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
15 * Removed SH7300 support (Jul 2007).
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file "COPYING" in the main directory of this archive
19 * for more details.
20 */
21#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
22#define SUPPORT_SYSRQ
23#endif
24
25#undef DEBUG
26
27#include <linux/clk.h>
28#include <linux/console.h>
29#include <linux/ctype.h>
30#include <linux/cpufreq.h>
31#include <linux/delay.h>
32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/errno.h>
36#include <linux/init.h>
37#include <linux/interrupt.h>
38#include <linux/ioport.h>
39#include <linux/major.h>
40#include <linux/module.h>
41#include <linux/mm.h>
42#include <linux/of.h>
43#include <linux/platform_device.h>
44#include <linux/pm_runtime.h>
45#include <linux/scatterlist.h>
46#include <linux/serial.h>
47#include <linux/serial_sci.h>
48#include <linux/sh_dma.h>
49#include <linux/slab.h>
50#include <linux/string.h>
51#include <linux/sysrq.h>
52#include <linux/timer.h>
53#include <linux/tty.h>
54#include <linux/tty_flip.h>
55
56#ifdef CONFIG_SUPERH
57#include <asm/sh_bios.h>
58#endif
59
60#include "serial_mctrl_gpio.h"
61#include "sh-sci.h"
62
63/* Offsets into the sci_port->irqs array */
64enum {
65 SCIx_ERI_IRQ,
66 SCIx_RXI_IRQ,
67 SCIx_TXI_IRQ,
68 SCIx_BRI_IRQ,
69 SCIx_NR_IRQS,
70
71 SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
72};
73
74#define SCIx_IRQ_IS_MUXED(port) \
75 ((port)->irqs[SCIx_ERI_IRQ] == \
76 (port)->irqs[SCIx_RXI_IRQ]) || \
77 ((port)->irqs[SCIx_ERI_IRQ] && \
78 ((port)->irqs[SCIx_RXI_IRQ] < 0))
79
80enum SCI_CLKS {
81 SCI_FCK, /* Functional Clock */
82 SCI_SCK, /* Optional External Clock */
83 SCI_BRG_INT, /* Optional BRG Internal Clock Source */
84 SCI_SCIF_CLK, /* Optional BRG External Clock Source */
85 SCI_NUM_CLKS
86};
87
88/* Bit x set means sampling rate x + 1 is supported */
89#define SCI_SR(x) BIT((x) - 1)
90#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
91
92#define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
93 SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
94 SCI_SR(19) | SCI_SR(27)
95
96#define min_sr(_port) ffs((_port)->sampling_rate_mask)
97#define max_sr(_port) fls((_port)->sampling_rate_mask)
98
99/* Iterate over all supported sampling rates, from high to low */
100#define for_each_sr(_sr, _port) \
101 for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \
102 if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
103
104struct plat_sci_reg {
105 u8 offset, size;
106};
107
108struct sci_port_params {
109 const struct plat_sci_reg regs[SCIx_NR_REGS];
110 unsigned int fifosize;
111 unsigned int overrun_reg;
112 unsigned int overrun_mask;
113 unsigned int sampling_rate_mask;
114 unsigned int error_mask;
115 unsigned int error_clear;
116};
117
118struct sci_port {
119 struct uart_port port;
120
121 /* Platform configuration */
122 const struct sci_port_params *params;
123 const struct plat_sci_port *cfg;
124 unsigned int sampling_rate_mask;
125 resource_size_t reg_size;
126 struct mctrl_gpios *gpios;
127
128 /* Clocks */
129 struct clk *clks[SCI_NUM_CLKS];
130 unsigned long clk_rates[SCI_NUM_CLKS];
131
132 int irqs[SCIx_NR_IRQS];
133 char *irqstr[SCIx_NR_IRQS];
134
135 struct dma_chan *chan_tx;
136 struct dma_chan *chan_rx;
137
138#ifdef CONFIG_SERIAL_SH_SCI_DMA
139 dma_cookie_t cookie_tx;
140 dma_cookie_t cookie_rx[2];
141 dma_cookie_t active_rx;
142 dma_addr_t tx_dma_addr;
143 unsigned int tx_dma_len;
144 struct scatterlist sg_rx[2];
145 void *rx_buf[2];
146 size_t buf_len_rx;
147 struct work_struct work_tx;
148 struct timer_list rx_timer;
149 unsigned int rx_timeout;
150#endif
151 unsigned int rx_frame;
152 int rx_trigger;
153 struct timer_list rx_fifo_timer;
154 int rx_fifo_timeout;
155
156 bool has_rtscts;
157 bool autorts;
158};
159
160#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
161
162static struct sci_port sci_ports[SCI_NPORTS];
163static struct uart_driver sci_uart_driver;
164
165static inline struct sci_port *
166to_sci_port(struct uart_port *uart)
167{
168 return container_of(uart, struct sci_port, port);
169}
170
171static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
172 /*
173 * Common SCI definitions, dependent on the port's regshift
174 * value.
175 */
176 [SCIx_SCI_REGTYPE] = {
177 .regs = {
178 [SCSMR] = { 0x00, 8 },
179 [SCBRR] = { 0x01, 8 },
180 [SCSCR] = { 0x02, 8 },
181 [SCxTDR] = { 0x03, 8 },
182 [SCxSR] = { 0x04, 8 },
183 [SCxRDR] = { 0x05, 8 },
184 },
185 .fifosize = 1,
186 .overrun_reg = SCxSR,
187 .overrun_mask = SCI_ORER,
188 .sampling_rate_mask = SCI_SR(32),
189 .error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
190 .error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
191 },
192
193 /*
194 * Common definitions for legacy IrDA ports.
195 */
196 [SCIx_IRDA_REGTYPE] = {
197 .regs = {
198 [SCSMR] = { 0x00, 8 },
199 [SCBRR] = { 0x02, 8 },
200 [SCSCR] = { 0x04, 8 },
201 [SCxTDR] = { 0x06, 8 },
202 [SCxSR] = { 0x08, 16 },
203 [SCxRDR] = { 0x0a, 8 },
204 [SCFCR] = { 0x0c, 8 },
205 [SCFDR] = { 0x0e, 16 },
206 },
207 .fifosize = 1,
208 .overrun_reg = SCxSR,
209 .overrun_mask = SCI_ORER,
210 .sampling_rate_mask = SCI_SR(32),
211 .error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
212 .error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
213 },
214
215 /*
216 * Common SCIFA definitions.
217 */
218 [SCIx_SCIFA_REGTYPE] = {
219 .regs = {
220 [SCSMR] = { 0x00, 16 },
221 [SCBRR] = { 0x04, 8 },
222 [SCSCR] = { 0x08, 16 },
223 [SCxTDR] = { 0x20, 8 },
224 [SCxSR] = { 0x14, 16 },
225 [SCxRDR] = { 0x24, 8 },
226 [SCFCR] = { 0x18, 16 },
227 [SCFDR] = { 0x1c, 16 },
228 [SCPCR] = { 0x30, 16 },
229 [SCPDR] = { 0x34, 16 },
230 },
231 .fifosize = 64,
232 .overrun_reg = SCxSR,
233 .overrun_mask = SCIFA_ORER,
234 .sampling_rate_mask = SCI_SR_SCIFAB,
235 .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
236 .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
237 },
238
239 /*
240 * Common SCIFB definitions.
241 */
242 [SCIx_SCIFB_REGTYPE] = {
243 .regs = {
244 [SCSMR] = { 0x00, 16 },
245 [SCBRR] = { 0x04, 8 },
246 [SCSCR] = { 0x08, 16 },
247 [SCxTDR] = { 0x40, 8 },
248 [SCxSR] = { 0x14, 16 },
249 [SCxRDR] = { 0x60, 8 },
250 [SCFCR] = { 0x18, 16 },
251 [SCTFDR] = { 0x38, 16 },
252 [SCRFDR] = { 0x3c, 16 },
253 [SCPCR] = { 0x30, 16 },
254 [SCPDR] = { 0x34, 16 },
255 },
256 .fifosize = 256,
257 .overrun_reg = SCxSR,
258 .overrun_mask = SCIFA_ORER,
259 .sampling_rate_mask = SCI_SR_SCIFAB,
260 .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
261 .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
262 },
263
264 /*
265 * Common SH-2(A) SCIF definitions for ports with FIFO data
266 * count registers.
267 */
268 [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
269 .regs = {
270 [SCSMR] = { 0x00, 16 },
271 [SCBRR] = { 0x04, 8 },
272 [SCSCR] = { 0x08, 16 },
273 [SCxTDR] = { 0x0c, 8 },
274 [SCxSR] = { 0x10, 16 },
275 [SCxRDR] = { 0x14, 8 },
276 [SCFCR] = { 0x18, 16 },
277 [SCFDR] = { 0x1c, 16 },
278 [SCSPTR] = { 0x20, 16 },
279 [SCLSR] = { 0x24, 16 },
280 },
281 .fifosize = 16,
282 .overrun_reg = SCLSR,
283 .overrun_mask = SCLSR_ORER,
284 .sampling_rate_mask = SCI_SR(32),
285 .error_mask = SCIF_DEFAULT_ERROR_MASK,
286 .error_clear = SCIF_ERROR_CLEAR,
287 },
288
289 /*
290 * Common SH-3 SCIF definitions.
291 */
292 [SCIx_SH3_SCIF_REGTYPE] = {
293 .regs = {
294 [SCSMR] = { 0x00, 8 },
295 [SCBRR] = { 0x02, 8 },
296 [SCSCR] = { 0x04, 8 },
297 [SCxTDR] = { 0x06, 8 },
298 [SCxSR] = { 0x08, 16 },
299 [SCxRDR] = { 0x0a, 8 },
300 [SCFCR] = { 0x0c, 8 },
301 [SCFDR] = { 0x0e, 16 },
302 },
303 .fifosize = 16,
304 .overrun_reg = SCLSR,
305 .overrun_mask = SCLSR_ORER,
306 .sampling_rate_mask = SCI_SR(32),
307 .error_mask = SCIF_DEFAULT_ERROR_MASK,
308 .error_clear = SCIF_ERROR_CLEAR,
309 },
310
311 /*
312 * Common SH-4(A) SCIF(B) definitions.
313 */
314 [SCIx_SH4_SCIF_REGTYPE] = {
315 .regs = {
316 [SCSMR] = { 0x00, 16 },
317 [SCBRR] = { 0x04, 8 },
318 [SCSCR] = { 0x08, 16 },
319 [SCxTDR] = { 0x0c, 8 },
320 [SCxSR] = { 0x10, 16 },
321 [SCxRDR] = { 0x14, 8 },
322 [SCFCR] = { 0x18, 16 },
323 [SCFDR] = { 0x1c, 16 },
324 [SCSPTR] = { 0x20, 16 },
325 [SCLSR] = { 0x24, 16 },
326 },
327 .fifosize = 16,
328 .overrun_reg = SCLSR,
329 .overrun_mask = SCLSR_ORER,
330 .sampling_rate_mask = SCI_SR(32),
331 .error_mask = SCIF_DEFAULT_ERROR_MASK,
332 .error_clear = SCIF_ERROR_CLEAR,
333 },
334
335 /*
336 * Common SCIF definitions for ports with a Baud Rate Generator for
337 * External Clock (BRG).
338 */
339 [SCIx_SH4_SCIF_BRG_REGTYPE] = {
340 .regs = {
341 [SCSMR] = { 0x00, 16 },
342 [SCBRR] = { 0x04, 8 },
343 [SCSCR] = { 0x08, 16 },
344 [SCxTDR] = { 0x0c, 8 },
345 [SCxSR] = { 0x10, 16 },
346 [SCxRDR] = { 0x14, 8 },
347 [SCFCR] = { 0x18, 16 },
348 [SCFDR] = { 0x1c, 16 },
349 [SCSPTR] = { 0x20, 16 },
350 [SCLSR] = { 0x24, 16 },
351 [SCDL] = { 0x30, 16 },
352 [SCCKS] = { 0x34, 16 },
353 },
354 .fifosize = 16,
355 .overrun_reg = SCLSR,
356 .overrun_mask = SCLSR_ORER,
357 .sampling_rate_mask = SCI_SR(32),
358 .error_mask = SCIF_DEFAULT_ERROR_MASK,
359 .error_clear = SCIF_ERROR_CLEAR,
360 },
361
362 /*
363 * Common HSCIF definitions.
364 */
365 [SCIx_HSCIF_REGTYPE] = {
366 .regs = {
367 [SCSMR] = { 0x00, 16 },
368 [SCBRR] = { 0x04, 8 },
369 [SCSCR] = { 0x08, 16 },
370 [SCxTDR] = { 0x0c, 8 },
371 [SCxSR] = { 0x10, 16 },
372 [SCxRDR] = { 0x14, 8 },
373 [SCFCR] = { 0x18, 16 },
374 [SCFDR] = { 0x1c, 16 },
375 [SCSPTR] = { 0x20, 16 },
376 [SCLSR] = { 0x24, 16 },
377 [HSSRR] = { 0x40, 16 },
378 [SCDL] = { 0x30, 16 },
379 [SCCKS] = { 0x34, 16 },
380 [HSRTRGR] = { 0x54, 16 },
381 [HSTTRGR] = { 0x58, 16 },
382 },
383 .fifosize = 128,
384 .overrun_reg = SCLSR,
385 .overrun_mask = SCLSR_ORER,
386 .sampling_rate_mask = SCI_SR_RANGE(8, 32),
387 .error_mask = SCIF_DEFAULT_ERROR_MASK,
388 .error_clear = SCIF_ERROR_CLEAR,
389 },
390
391 /*
392 * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
393 * register.
394 */
395 [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
396 .regs = {
397 [SCSMR] = { 0x00, 16 },
398 [SCBRR] = { 0x04, 8 },
399 [SCSCR] = { 0x08, 16 },
400 [SCxTDR] = { 0x0c, 8 },
401 [SCxSR] = { 0x10, 16 },
402 [SCxRDR] = { 0x14, 8 },
403 [SCFCR] = { 0x18, 16 },
404 [SCFDR] = { 0x1c, 16 },
405 [SCLSR] = { 0x24, 16 },
406 },
407 .fifosize = 16,
408 .overrun_reg = SCLSR,
409 .overrun_mask = SCLSR_ORER,
410 .sampling_rate_mask = SCI_SR(32),
411 .error_mask = SCIF_DEFAULT_ERROR_MASK,
412 .error_clear = SCIF_ERROR_CLEAR,
413 },
414
415 /*
416 * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
417 * count registers.
418 */
419 [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
420 .regs = {
421 [SCSMR] = { 0x00, 16 },
422 [SCBRR] = { 0x04, 8 },
423 [SCSCR] = { 0x08, 16 },
424 [SCxTDR] = { 0x0c, 8 },
425 [SCxSR] = { 0x10, 16 },
426 [SCxRDR] = { 0x14, 8 },
427 [SCFCR] = { 0x18, 16 },
428 [SCFDR] = { 0x1c, 16 },
429 [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
430 [SCRFDR] = { 0x20, 16 },
431 [SCSPTR] = { 0x24, 16 },
432 [SCLSR] = { 0x28, 16 },
433 },
434 .fifosize = 16,
435 .overrun_reg = SCLSR,
436 .overrun_mask = SCLSR_ORER,
437 .sampling_rate_mask = SCI_SR(32),
438 .error_mask = SCIF_DEFAULT_ERROR_MASK,
439 .error_clear = SCIF_ERROR_CLEAR,
440 },
441
442 /*
443 * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
444 * registers.
445 */
446 [SCIx_SH7705_SCIF_REGTYPE] = {
447 .regs = {
448 [SCSMR] = { 0x00, 16 },
449 [SCBRR] = { 0x04, 8 },
450 [SCSCR] = { 0x08, 16 },
451 [SCxTDR] = { 0x20, 8 },
452 [SCxSR] = { 0x14, 16 },
453 [SCxRDR] = { 0x24, 8 },
454 [SCFCR] = { 0x18, 16 },
455 [SCFDR] = { 0x1c, 16 },
456 },
457 .fifosize = 64,
458 .overrun_reg = SCxSR,
459 .overrun_mask = SCIFA_ORER,
460 .sampling_rate_mask = SCI_SR(16),
461 .error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
462 .error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
463 },
464};
465
466#define sci_getreg(up, offset) (&to_sci_port(up)->params->regs[offset])
467
468/*
469 * The "offset" here is rather misleading, in that it refers to an enum
470 * value relative to the port mapping rather than the fixed offset
471 * itself, which needs to be manually retrieved from the platform's
472 * register map for the given port.
473 */
474static unsigned int sci_serial_in(struct uart_port *p, int offset)
475{
476 const struct plat_sci_reg *reg = sci_getreg(p, offset);
477
478 if (reg->size == 8)
479 return ioread8(p->membase + (reg->offset << p->regshift));
480 else if (reg->size == 16)
481 return ioread16(p->membase + (reg->offset << p->regshift));
482 else
483 WARN(1, "Invalid register access\n");
484
485 return 0;
486}
487
488static void sci_serial_out(struct uart_port *p, int offset, int value)
489{
490 const struct plat_sci_reg *reg = sci_getreg(p, offset);
491
492 if (reg->size == 8)
493 iowrite8(value, p->membase + (reg->offset << p->regshift));
494 else if (reg->size == 16)
495 iowrite16(value, p->membase + (reg->offset << p->regshift));
496 else
497 WARN(1, "Invalid register access\n");
498}
499
500static void sci_port_enable(struct sci_port *sci_port)
501{
502 unsigned int i;
503
504 if (!sci_port->port.dev)
505 return;
506
507 pm_runtime_get_sync(sci_port->port.dev);
508
509 for (i = 0; i < SCI_NUM_CLKS; i++) {
510 clk_prepare_enable(sci_port->clks[i]);
511 sci_port->clk_rates[i] = clk_get_rate(sci_port->clks[i]);
512 }
513 sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK];
514}
515
516static void sci_port_disable(struct sci_port *sci_port)
517{
518 unsigned int i;
519
520 if (!sci_port->port.dev)
521 return;
522
523 for (i = SCI_NUM_CLKS; i-- > 0; )
524 clk_disable_unprepare(sci_port->clks[i]);
525
526 pm_runtime_put_sync(sci_port->port.dev);
527}
528
529static inline unsigned long port_rx_irq_mask(struct uart_port *port)
530{
531 /*
532 * Not all ports (such as SCIFA) will support REIE. Rather than
533 * special-casing the port type, we check the port initialization
534 * IRQ enable mask to see whether the IRQ is desired at all. If
535 * it's unset, it's logically inferred that there's no point in
536 * testing for it.
537 */
538 return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
539}
540
541static void sci_start_tx(struct uart_port *port)
542{
543 struct sci_port *s = to_sci_port(port);
544 unsigned short ctrl;
545
546#ifdef CONFIG_SERIAL_SH_SCI_DMA
547 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
548 u16 new, scr = serial_port_in(port, SCSCR);
549 if (s->chan_tx)
550 new = scr | SCSCR_TDRQE;
551 else
552 new = scr & ~SCSCR_TDRQE;
553 if (new != scr)
554 serial_port_out(port, SCSCR, new);
555 }
556
557 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
558 dma_submit_error(s->cookie_tx)) {
559 s->cookie_tx = 0;
560 schedule_work(&s->work_tx);
561 }
562#endif
563
564 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
565 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
566 ctrl = serial_port_in(port, SCSCR);
567 serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
568 }
569}
570
571static void sci_stop_tx(struct uart_port *port)
572{
573 unsigned short ctrl;
574
575 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
576 ctrl = serial_port_in(port, SCSCR);
577
578 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
579 ctrl &= ~SCSCR_TDRQE;
580
581 ctrl &= ~SCSCR_TIE;
582
583 serial_port_out(port, SCSCR, ctrl);
584}
585
586static void sci_start_rx(struct uart_port *port)
587{
588 unsigned short ctrl;
589
590 ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
591
592 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
593 ctrl &= ~SCSCR_RDRQE;
594
595 serial_port_out(port, SCSCR, ctrl);
596}
597
598static void sci_stop_rx(struct uart_port *port)
599{
600 unsigned short ctrl;
601
602 ctrl = serial_port_in(port, SCSCR);
603
604 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
605 ctrl &= ~SCSCR_RDRQE;
606
607 ctrl &= ~port_rx_irq_mask(port);
608
609 serial_port_out(port, SCSCR, ctrl);
610}
611
612static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
613{
614 if (port->type == PORT_SCI) {
615 /* Just store the mask */
616 serial_port_out(port, SCxSR, mask);
617 } else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) {
618 /* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
619 /* Only clear the status bits we want to clear */
620 serial_port_out(port, SCxSR,
621 serial_port_in(port, SCxSR) & mask);
622 } else {
623 /* Store the mask, clear parity/framing errors */
624 serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
625 }
626}
627
628#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
629 defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
630
631#ifdef CONFIG_CONSOLE_POLL
632static int sci_poll_get_char(struct uart_port *port)
633{
634 unsigned short status;
635 int c;
636
637 do {
638 status = serial_port_in(port, SCxSR);
639 if (status & SCxSR_ERRORS(port)) {
640 sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
641 continue;
642 }
643 break;
644 } while (1);
645
646 if (!(status & SCxSR_RDxF(port)))
647 return NO_POLL_CHAR;
648
649 c = serial_port_in(port, SCxRDR);
650
651 /* Dummy read */
652 serial_port_in(port, SCxSR);
653 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
654
655 return c;
656}
657#endif
658
659static void sci_poll_put_char(struct uart_port *port, unsigned char c)
660{
661 unsigned short status;
662
663 do {
664 status = serial_port_in(port, SCxSR);
665 } while (!(status & SCxSR_TDxE(port)));
666
667 serial_port_out(port, SCxTDR, c);
668 sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
669}
670#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
671 CONFIG_SERIAL_SH_SCI_EARLYCON */
672
673static void sci_init_pins(struct uart_port *port, unsigned int cflag)
674{
675 struct sci_port *s = to_sci_port(port);
676
677 /*
678 * Use port-specific handler if provided.
679 */
680 if (s->cfg->ops && s->cfg->ops->init_pins) {
681 s->cfg->ops->init_pins(port, cflag);
682 return;
683 }
684
685 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
686 u16 data = serial_port_in(port, SCPDR);
687 u16 ctrl = serial_port_in(port, SCPCR);
688
689 /* Enable RXD and TXD pin functions */
690 ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
691 if (to_sci_port(port)->has_rtscts) {
692 /* RTS# is output, active low, unless autorts */
693 if (!(port->mctrl & TIOCM_RTS)) {
694 ctrl |= SCPCR_RTSC;
695 data |= SCPDR_RTSD;
696 } else if (!s->autorts) {
697 ctrl |= SCPCR_RTSC;
698 data &= ~SCPDR_RTSD;
699 } else {
700 /* Enable RTS# pin function */
701 ctrl &= ~SCPCR_RTSC;
702 }
703 /* Enable CTS# pin function */
704 ctrl &= ~SCPCR_CTSC;
705 }
706 serial_port_out(port, SCPDR, data);
707 serial_port_out(port, SCPCR, ctrl);
708 } else if (sci_getreg(port, SCSPTR)->size) {
709 u16 status = serial_port_in(port, SCSPTR);
710
711 /* RTS# is always output; and active low, unless autorts */
712 status |= SCSPTR_RTSIO;
713 if (!(port->mctrl & TIOCM_RTS))
714 status |= SCSPTR_RTSDT;
715 else if (!s->autorts)
716 status &= ~SCSPTR_RTSDT;
717 /* CTS# and SCK are inputs */
718 status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
719 serial_port_out(port, SCSPTR, status);
720 }
721}
722
723static int sci_txfill(struct uart_port *port)
724{
725 struct sci_port *s = to_sci_port(port);
726 unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
727 const struct plat_sci_reg *reg;
728
729 reg = sci_getreg(port, SCTFDR);
730 if (reg->size)
731 return serial_port_in(port, SCTFDR) & fifo_mask;
732
733 reg = sci_getreg(port, SCFDR);
734 if (reg->size)
735 return serial_port_in(port, SCFDR) >> 8;
736
737 return !(serial_port_in(port, SCxSR) & SCI_TDRE);
738}
739
740static int sci_txroom(struct uart_port *port)
741{
742 return port->fifosize - sci_txfill(port);
743}
744
745static int sci_rxfill(struct uart_port *port)
746{
747 struct sci_port *s = to_sci_port(port);
748 unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
749 const struct plat_sci_reg *reg;
750
751 reg = sci_getreg(port, SCRFDR);
752 if (reg->size)
753 return serial_port_in(port, SCRFDR) & fifo_mask;
754
755 reg = sci_getreg(port, SCFDR);
756 if (reg->size)
757 return serial_port_in(port, SCFDR) & fifo_mask;
758
759 return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
760}
761
762/* ********************************************************************** *
763 * the interrupt related routines *
764 * ********************************************************************** */
765
766static void sci_transmit_chars(struct uart_port *port)
767{
768 struct circ_buf *xmit = &port->state->xmit;
769 unsigned int stopped = uart_tx_stopped(port);
770 unsigned short status;
771 unsigned short ctrl;
772 int count;
773
774 status = serial_port_in(port, SCxSR);
775 if (!(status & SCxSR_TDxE(port))) {
776 ctrl = serial_port_in(port, SCSCR);
777 if (uart_circ_empty(xmit))
778 ctrl &= ~SCSCR_TIE;
779 else
780 ctrl |= SCSCR_TIE;
781 serial_port_out(port, SCSCR, ctrl);
782 return;
783 }
784
785 count = sci_txroom(port);
786
787 do {
788 unsigned char c;
789
790 if (port->x_char) {
791 c = port->x_char;
792 port->x_char = 0;
793 } else if (!uart_circ_empty(xmit) && !stopped) {
794 c = xmit->buf[xmit->tail];
795 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
796 } else {
797 break;
798 }
799
800 serial_port_out(port, SCxTDR, c);
801
802 port->icount.tx++;
803 } while (--count > 0);
804
805 sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
806
807 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
808 uart_write_wakeup(port);
809 if (uart_circ_empty(xmit))
810 sci_stop_tx(port);
811
812}
813
814/* On SH3, SCIF may read end-of-break as a space->mark char */
815#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
816
817static void sci_receive_chars(struct uart_port *port)
818{
819 struct tty_port *tport = &port->state->port;
820 int i, count, copied = 0;
821 unsigned short status;
822 unsigned char flag;
823
824 status = serial_port_in(port, SCxSR);
825 if (!(status & SCxSR_RDxF(port)))
826 return;
827
828 while (1) {
829 /* Don't copy more bytes than there is room for in the buffer */
830 count = tty_buffer_request_room(tport, sci_rxfill(port));
831
832 /* If for any reason we can't copy more data, we're done! */
833 if (count == 0)
834 break;
835
836 if (port->type == PORT_SCI) {
837 char c = serial_port_in(port, SCxRDR);
838 if (uart_handle_sysrq_char(port, c))
839 count = 0;
840 else
841 tty_insert_flip_char(tport, c, TTY_NORMAL);
842 } else {
843 for (i = 0; i < count; i++) {
844 char c;
845
846 if (port->type == PORT_SCIF ||
847 port->type == PORT_HSCIF) {
848 status = serial_port_in(port, SCxSR);
849 c = serial_port_in(port, SCxRDR);
850 } else {
851 c = serial_port_in(port, SCxRDR);
852 status = serial_port_in(port, SCxSR);
853 }
854 if (uart_handle_sysrq_char(port, c)) {
855 count--; i--;
856 continue;
857 }
858
859 /* Store data and status */
860 if (status & SCxSR_FER(port)) {
861 flag = TTY_FRAME;
862 port->icount.frame++;
863 dev_notice(port->dev, "frame error\n");
864 } else if (status & SCxSR_PER(port)) {
865 flag = TTY_PARITY;
866 port->icount.parity++;
867 dev_notice(port->dev, "parity error\n");
868 } else
869 flag = TTY_NORMAL;
870
871 tty_insert_flip_char(tport, c, flag);
872 }
873 }
874
875 serial_port_in(port, SCxSR); /* dummy read */
876 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
877
878 copied += count;
879 port->icount.rx += count;
880 }
881
882 if (copied) {
883 /* Tell the rest of the system the news. New characters! */
884 tty_flip_buffer_push(tport);
885 } else {
886 /* TTY buffers full; read from RX reg to prevent lockup */
887 serial_port_in(port, SCxRDR);
888 serial_port_in(port, SCxSR); /* dummy read */
889 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
890 }
891}
892
893static int sci_handle_errors(struct uart_port *port)
894{
895 int copied = 0;
896 unsigned short status = serial_port_in(port, SCxSR);
897 struct tty_port *tport = &port->state->port;
898 struct sci_port *s = to_sci_port(port);
899
900 /* Handle overruns */
901 if (status & s->params->overrun_mask) {
902 port->icount.overrun++;
903
904 /* overrun error */
905 if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
906 copied++;
907
908 dev_notice(port->dev, "overrun error\n");
909 }
910
911 if (status & SCxSR_FER(port)) {
912 /* frame error */
913 port->icount.frame++;
914
915 if (tty_insert_flip_char(tport, 0, TTY_FRAME))
916 copied++;
917
918 dev_notice(port->dev, "frame error\n");
919 }
920
921 if (status & SCxSR_PER(port)) {
922 /* parity error */
923 port->icount.parity++;
924
925 if (tty_insert_flip_char(tport, 0, TTY_PARITY))
926 copied++;
927
928 dev_notice(port->dev, "parity error\n");
929 }
930
931 if (copied)
932 tty_flip_buffer_push(tport);
933
934 return copied;
935}
936
937static int sci_handle_fifo_overrun(struct uart_port *port)
938{
939 struct tty_port *tport = &port->state->port;
940 struct sci_port *s = to_sci_port(port);
941 const struct plat_sci_reg *reg;
942 int copied = 0;
943 u16 status;
944
945 reg = sci_getreg(port, s->params->overrun_reg);
946 if (!reg->size)
947 return 0;
948
949 status = serial_port_in(port, s->params->overrun_reg);
950 if (status & s->params->overrun_mask) {
951 status &= ~s->params->overrun_mask;
952 serial_port_out(port, s->params->overrun_reg, status);
953
954 port->icount.overrun++;
955
956 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
957 tty_flip_buffer_push(tport);
958
959 dev_dbg(port->dev, "overrun error\n");
960 copied++;
961 }
962
963 return copied;
964}
965
966static int sci_handle_breaks(struct uart_port *port)
967{
968 int copied = 0;
969 unsigned short status = serial_port_in(port, SCxSR);
970 struct tty_port *tport = &port->state->port;
971
972 if (uart_handle_break(port))
973 return 0;
974
975 if (status & SCxSR_BRK(port)) {
976 port->icount.brk++;
977
978 /* Notify of BREAK */
979 if (tty_insert_flip_char(tport, 0, TTY_BREAK))
980 copied++;
981
982 dev_dbg(port->dev, "BREAK detected\n");
983 }
984
985 if (copied)
986 tty_flip_buffer_push(tport);
987
988 copied += sci_handle_fifo_overrun(port);
989
990 return copied;
991}
992
993static int scif_set_rtrg(struct uart_port *port, int rx_trig)
994{
995 unsigned int bits;
996
997 if (rx_trig < 1)
998 rx_trig = 1;
999 if (rx_trig >= port->fifosize)
1000 rx_trig = port->fifosize;
1001
1002 /* HSCIF can be set to an arbitrary level. */
1003 if (sci_getreg(port, HSRTRGR)->size) {
1004 serial_port_out(port, HSRTRGR, rx_trig);
1005 return rx_trig;
1006 }
1007
1008 switch (port->type) {
1009 case PORT_SCIF:
1010 if (rx_trig < 4) {
1011 bits = 0;
1012 rx_trig = 1;
1013 } else if (rx_trig < 8) {
1014 bits = SCFCR_RTRG0;
1015 rx_trig = 4;
1016 } else if (rx_trig < 14) {
1017 bits = SCFCR_RTRG1;
1018 rx_trig = 8;
1019 } else {
1020 bits = SCFCR_RTRG0 | SCFCR_RTRG1;
1021 rx_trig = 14;
1022 }
1023 break;
1024 case PORT_SCIFA:
1025 case PORT_SCIFB:
1026 if (rx_trig < 16) {
1027 bits = 0;
1028 rx_trig = 1;
1029 } else if (rx_trig < 32) {
1030 bits = SCFCR_RTRG0;
1031 rx_trig = 16;
1032 } else if (rx_trig < 48) {
1033 bits = SCFCR_RTRG1;
1034 rx_trig = 32;
1035 } else {
1036 bits = SCFCR_RTRG0 | SCFCR_RTRG1;
1037 rx_trig = 48;
1038 }
1039 break;
1040 default:
1041 WARN(1, "unknown FIFO configuration");
1042 return 1;
1043 }
1044
1045 serial_port_out(port, SCFCR,
1046 (serial_port_in(port, SCFCR) &
1047 ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
1048
1049 return rx_trig;
1050}
1051
1052static int scif_rtrg_enabled(struct uart_port *port)
1053{
1054 if (sci_getreg(port, HSRTRGR)->size)
1055 return serial_port_in(port, HSRTRGR) != 0;
1056 else
1057 return (serial_port_in(port, SCFCR) &
1058 (SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
1059}
1060
1061static void rx_fifo_timer_fn(unsigned long arg)
1062{
1063 struct sci_port *s = (struct sci_port *)arg;
1064 struct uart_port *port = &s->port;
1065
1066 dev_dbg(port->dev, "Rx timed out\n");
1067 scif_set_rtrg(port, 1);
1068}
1069
1070static ssize_t rx_trigger_show(struct device *dev,
1071 struct device_attribute *attr,
1072 char *buf)
1073{
1074 struct uart_port *port = dev_get_drvdata(dev);
1075 struct sci_port *sci = to_sci_port(port);
1076
1077 return sprintf(buf, "%d\n", sci->rx_trigger);
1078}
1079
1080static ssize_t rx_trigger_store(struct device *dev,
1081 struct device_attribute *attr,
1082 const char *buf,
1083 size_t count)
1084{
1085 struct uart_port *port = dev_get_drvdata(dev);
1086 struct sci_port *sci = to_sci_port(port);
1087 int ret;
1088 long r;
1089
1090 ret = kstrtol(buf, 0, &r);
1091 if (ret)
1092 return ret;
1093
1094 sci->rx_trigger = scif_set_rtrg(port, r);
1095 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1096 scif_set_rtrg(port, 1);
1097
1098 return count;
1099}
1100
1101static DEVICE_ATTR(rx_fifo_trigger, 0644, rx_trigger_show, rx_trigger_store);
1102
1103static ssize_t rx_fifo_timeout_show(struct device *dev,
1104 struct device_attribute *attr,
1105 char *buf)
1106{
1107 struct uart_port *port = dev_get_drvdata(dev);
1108 struct sci_port *sci = to_sci_port(port);
1109
1110 return sprintf(buf, "%d\n", sci->rx_fifo_timeout);
1111}
1112
1113static ssize_t rx_fifo_timeout_store(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf,
1116 size_t count)
1117{
1118 struct uart_port *port = dev_get_drvdata(dev);
1119 struct sci_port *sci = to_sci_port(port);
1120 int ret;
1121 long r;
1122
1123 ret = kstrtol(buf, 0, &r);
1124 if (ret)
1125 return ret;
1126 sci->rx_fifo_timeout = r;
1127 scif_set_rtrg(port, 1);
1128 if (r > 0)
1129 setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
1130 (unsigned long)sci);
1131 return count;
1132}
1133
1134static DEVICE_ATTR(rx_fifo_timeout, 0644, rx_fifo_timeout_show, rx_fifo_timeout_store);
1135
1136
1137#ifdef CONFIG_SERIAL_SH_SCI_DMA
1138static void sci_dma_tx_complete(void *arg)
1139{
1140 struct sci_port *s = arg;
1141 struct uart_port *port = &s->port;
1142 struct circ_buf *xmit = &port->state->xmit;
1143 unsigned long flags;
1144
1145 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1146
1147 spin_lock_irqsave(&port->lock, flags);
1148
1149 xmit->tail += s->tx_dma_len;
1150 xmit->tail &= UART_XMIT_SIZE - 1;
1151
1152 port->icount.tx += s->tx_dma_len;
1153
1154 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1155 uart_write_wakeup(port);
1156
1157 if (!uart_circ_empty(xmit)) {
1158 s->cookie_tx = 0;
1159 schedule_work(&s->work_tx);
1160 } else {
1161 s->cookie_tx = -EINVAL;
1162 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1163 u16 ctrl = serial_port_in(port, SCSCR);
1164 serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1165 }
1166 }
1167
1168 spin_unlock_irqrestore(&port->lock, flags);
1169}
1170
1171/* Locking: called with port lock held */
1172static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
1173{
1174 struct uart_port *port = &s->port;
1175 struct tty_port *tport = &port->state->port;
1176 int copied;
1177
1178 copied = tty_insert_flip_string(tport, buf, count);
1179 if (copied < count)
1180 port->icount.buf_overrun++;
1181
1182 port->icount.rx += copied;
1183
1184 return copied;
1185}
1186
1187static int sci_dma_rx_find_active(struct sci_port *s)
1188{
1189 unsigned int i;
1190
1191 for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
1192 if (s->active_rx == s->cookie_rx[i])
1193 return i;
1194
1195 return -1;
1196}
1197
1198static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1199{
1200 struct dma_chan *chan = s->chan_rx;
1201 struct uart_port *port = &s->port;
1202 unsigned long flags;
1203
1204 spin_lock_irqsave(&port->lock, flags);
1205 s->chan_rx = NULL;
1206 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1207 spin_unlock_irqrestore(&port->lock, flags);
1208 dmaengine_terminate_all(chan);
1209 dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
1210 sg_dma_address(&s->sg_rx[0]));
1211 dma_release_channel(chan);
1212 if (enable_pio)
1213 sci_start_rx(port);
1214}
1215
1216static void sci_dma_rx_complete(void *arg)
1217{
1218 struct sci_port *s = arg;
1219 struct dma_chan *chan = s->chan_rx;
1220 struct uart_port *port = &s->port;
1221 struct dma_async_tx_descriptor *desc;
1222 unsigned long flags;
1223 int active, count = 0;
1224
1225 dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
1226 s->active_rx);
1227
1228 spin_lock_irqsave(&port->lock, flags);
1229
1230 active = sci_dma_rx_find_active(s);
1231 if (active >= 0)
1232 count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx);
1233
1234 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1235
1236 if (count)
1237 tty_flip_buffer_push(&port->state->port);
1238
1239 desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[active], 1,
1240 DMA_DEV_TO_MEM,
1241 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1242 if (!desc)
1243 goto fail;
1244
1245 desc->callback = sci_dma_rx_complete;
1246 desc->callback_param = s;
1247 s->cookie_rx[active] = dmaengine_submit(desc);
1248 if (dma_submit_error(s->cookie_rx[active]))
1249 goto fail;
1250
1251 s->active_rx = s->cookie_rx[!active];
1252
1253 dma_async_issue_pending(chan);
1254
1255 spin_unlock_irqrestore(&port->lock, flags);
1256 dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
1257 __func__, s->cookie_rx[active], active, s->active_rx);
1258 return;
1259
1260fail:
1261 spin_unlock_irqrestore(&port->lock, flags);
1262 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1263 sci_rx_dma_release(s, true);
1264}
1265
1266static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1267{
1268 struct dma_chan *chan = s->chan_tx;
1269 struct uart_port *port = &s->port;
1270 unsigned long flags;
1271
1272 spin_lock_irqsave(&port->lock, flags);
1273 s->chan_tx = NULL;
1274 s->cookie_tx = -EINVAL;
1275 spin_unlock_irqrestore(&port->lock, flags);
1276 dmaengine_terminate_all(chan);
1277 dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
1278 DMA_TO_DEVICE);
1279 dma_release_channel(chan);
1280 if (enable_pio)
1281 sci_start_tx(port);
1282}
1283
1284static void sci_submit_rx(struct sci_port *s)
1285{
1286 struct dma_chan *chan = s->chan_rx;
1287 int i;
1288
1289 for (i = 0; i < 2; i++) {
1290 struct scatterlist *sg = &s->sg_rx[i];
1291 struct dma_async_tx_descriptor *desc;
1292
1293 desc = dmaengine_prep_slave_sg(chan,
1294 sg, 1, DMA_DEV_TO_MEM,
1295 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1296 if (!desc)
1297 goto fail;
1298
1299 desc->callback = sci_dma_rx_complete;
1300 desc->callback_param = s;
1301 s->cookie_rx[i] = dmaengine_submit(desc);
1302 if (dma_submit_error(s->cookie_rx[i]))
1303 goto fail;
1304
1305 }
1306
1307 s->active_rx = s->cookie_rx[0];
1308
1309 dma_async_issue_pending(chan);
1310 return;
1311
1312fail:
1313 if (i)
1314 dmaengine_terminate_all(chan);
1315 for (i = 0; i < 2; i++)
1316 s->cookie_rx[i] = -EINVAL;
1317 s->active_rx = -EINVAL;
1318 sci_rx_dma_release(s, true);
1319}
1320
1321static void work_fn_tx(struct work_struct *work)
1322{
1323 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1324 struct dma_async_tx_descriptor *desc;
1325 struct dma_chan *chan = s->chan_tx;
1326 struct uart_port *port = &s->port;
1327 struct circ_buf *xmit = &port->state->xmit;
1328 dma_addr_t buf;
1329 int head, tail;
1330
1331 /*
1332 * DMA is idle now.
1333 * Port xmit buffer is already mapped, and it is one page... Just adjust
1334 * offsets and lengths. Since it is a circular buffer, we have to
1335 * transmit till the end, and then the rest. Take the port lock to get a
1336 * consistent xmit buffer state.
1337 */
1338 spin_lock_irq(&port->lock);
1339 head = xmit->head;
1340 tail = xmit->tail;
1341 buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
1342 s->tx_dma_len = min_t(unsigned int,
1343 CIRC_CNT(head, tail, UART_XMIT_SIZE),
1344 CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
1345 if (!s->tx_dma_len) {
1346 /* Transmit buffer has been flushed */
1347 spin_unlock_irq(&port->lock);
1348 return;
1349 }
1350
1351 desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
1352 DMA_MEM_TO_DEV,
1353 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1354 if (!desc) {
1355 spin_unlock_irq(&port->lock);
1356 dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
1357 /* switch to PIO */
1358 sci_tx_dma_release(s, true);
1359 return;
1360 }
1361
1362 dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
1363 DMA_TO_DEVICE);
1364
1365 desc->callback = sci_dma_tx_complete;
1366 desc->callback_param = s;
1367 s->cookie_tx = dmaengine_submit(desc);
1368 if (dma_submit_error(s->cookie_tx)) {
1369 spin_unlock_irq(&port->lock);
1370 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1371 /* switch to PIO */
1372 sci_tx_dma_release(s, true);
1373 return;
1374 }
1375
1376 spin_unlock_irq(&port->lock);
1377 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
1378 __func__, xmit->buf, tail, head, s->cookie_tx);
1379
1380 dma_async_issue_pending(chan);
1381}
1382
1383static void rx_timer_fn(unsigned long arg)
1384{
1385 struct sci_port *s = (struct sci_port *)arg;
1386 struct dma_chan *chan = s->chan_rx;
1387 struct uart_port *port = &s->port;
1388 struct dma_tx_state state;
1389 enum dma_status status;
1390 unsigned long flags;
1391 unsigned int read;
1392 int active, count;
1393 u16 scr;
1394
1395 dev_dbg(port->dev, "DMA Rx timed out\n");
1396
1397 spin_lock_irqsave(&port->lock, flags);
1398
1399 active = sci_dma_rx_find_active(s);
1400 if (active < 0) {
1401 spin_unlock_irqrestore(&port->lock, flags);
1402 return;
1403 }
1404
1405 status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1406 if (status == DMA_COMPLETE) {
1407 spin_unlock_irqrestore(&port->lock, flags);
1408 dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
1409 s->active_rx, active);
1410
1411 /* Let packet complete handler take care of the packet */
1412 return;
1413 }
1414
1415 dmaengine_pause(chan);
1416
1417 /*
1418 * sometimes DMA transfer doesn't stop even if it is stopped and
1419 * data keeps on coming until transaction is complete so check
1420 * for DMA_COMPLETE again
1421 * Let packet complete handler take care of the packet
1422 */
1423 status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
1424 if (status == DMA_COMPLETE) {
1425 spin_unlock_irqrestore(&port->lock, flags);
1426 dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
1427 return;
1428 }
1429
1430 /* Handle incomplete DMA receive */
1431 dmaengine_terminate_all(s->chan_rx);
1432 read = sg_dma_len(&s->sg_rx[active]) - state.residue;
1433
1434 if (read) {
1435 count = sci_dma_rx_push(s, s->rx_buf[active], read);
1436 if (count)
1437 tty_flip_buffer_push(&port->state->port);
1438 }
1439
1440 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1441 sci_submit_rx(s);
1442
1443 /* Direct new serial port interrupts back to CPU */
1444 scr = serial_port_in(port, SCSCR);
1445 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1446 scr &= ~SCSCR_RDRQE;
1447 enable_irq(s->irqs[SCIx_RXI_IRQ]);
1448 }
1449 serial_port_out(port, SCSCR, scr | SCSCR_RIE);
1450
1451 spin_unlock_irqrestore(&port->lock, flags);
1452}
1453
1454static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
1455 enum dma_transfer_direction dir)
1456{
1457 struct dma_chan *chan;
1458 struct dma_slave_config cfg;
1459 int ret;
1460
1461 chan = dma_request_slave_channel(port->dev,
1462 dir == DMA_MEM_TO_DEV ? "tx" : "rx");
1463 if (!chan) {
1464 dev_warn(port->dev, "dma_request_slave_channel failed\n");
1465 return NULL;
1466 }
1467
1468 memset(&cfg, 0, sizeof(cfg));
1469 cfg.direction = dir;
1470 if (dir == DMA_MEM_TO_DEV) {
1471 cfg.dst_addr = port->mapbase +
1472 (sci_getreg(port, SCxTDR)->offset << port->regshift);
1473 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1474 } else {
1475 cfg.src_addr = port->mapbase +
1476 (sci_getreg(port, SCxRDR)->offset << port->regshift);
1477 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1478 }
1479
1480 ret = dmaengine_slave_config(chan, &cfg);
1481 if (ret) {
1482 dev_warn(port->dev, "dmaengine_slave_config failed %d\n", ret);
1483 dma_release_channel(chan);
1484 return NULL;
1485 }
1486
1487 return chan;
1488}
1489
1490static void sci_request_dma(struct uart_port *port)
1491{
1492 struct sci_port *s = to_sci_port(port);
1493 struct dma_chan *chan;
1494
1495 dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
1496
1497 /*
1498 * DMA on console may interfere with Kernel log messages which use
1499 * plain putchar(). So, simply don't use it with a console.
1500 */
1501 if (uart_console(port))
1502 return;
1503
1504 if (!port->dev->of_node)
1505 return;
1506
1507 s->cookie_tx = -EINVAL;
1508
1509 /*
1510 * Don't request a dma channel if no channel was specified
1511 * in the device tree.
1512 */
1513 if (!of_find_property(port->dev->of_node, "dmas", NULL))
1514 return;
1515
1516 chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
1517 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1518 if (chan) {
1519 s->chan_tx = chan;
1520 /* UART circular tx buffer is an aligned page. */
1521 s->tx_dma_addr = dma_map_single(chan->device->dev,
1522 port->state->xmit.buf,
1523 UART_XMIT_SIZE,
1524 DMA_TO_DEVICE);
1525 if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
1526 dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
1527 dma_release_channel(chan);
1528 s->chan_tx = NULL;
1529 } else {
1530 dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
1531 __func__, UART_XMIT_SIZE,
1532 port->state->xmit.buf, &s->tx_dma_addr);
1533 }
1534
1535 INIT_WORK(&s->work_tx, work_fn_tx);
1536 }
1537
1538 chan = sci_request_dma_chan(port, DMA_DEV_TO_MEM);
1539 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1540 if (chan) {
1541 unsigned int i;
1542 dma_addr_t dma;
1543 void *buf;
1544
1545 s->chan_rx = chan;
1546
1547 s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
1548 buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
1549 &dma, GFP_KERNEL);
1550 if (!buf) {
1551 dev_warn(port->dev,
1552 "Failed to allocate Rx dma buffer, using PIO\n");
1553 dma_release_channel(chan);
1554 s->chan_rx = NULL;
1555 return;
1556 }
1557
1558 for (i = 0; i < 2; i++) {
1559 struct scatterlist *sg = &s->sg_rx[i];
1560
1561 sg_init_table(sg, 1);
1562 s->rx_buf[i] = buf;
1563 sg_dma_address(sg) = dma;
1564 sg_dma_len(sg) = s->buf_len_rx;
1565
1566 buf += s->buf_len_rx;
1567 dma += s->buf_len_rx;
1568 }
1569
1570 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1571
1572 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1573 sci_submit_rx(s);
1574 }
1575}
1576
1577static void sci_free_dma(struct uart_port *port)
1578{
1579 struct sci_port *s = to_sci_port(port);
1580
1581 if (s->chan_tx)
1582 sci_tx_dma_release(s, false);
1583 if (s->chan_rx)
1584 sci_rx_dma_release(s, false);
1585}
1586
1587static void sci_flush_buffer(struct uart_port *port)
1588{
1589 struct sci_port *s = to_sci_port(port);
1590
1591 /*
1592 * In uart_flush_buffer(), the xmit circular buffer has just been
1593 * cleared, so we have to reset tx_dma_len accordingly, and stop any
1594 * pending transfers
1595 */
1596 s->tx_dma_len = 0;
1597 if (s->chan_tx) {
1598 dmaengine_terminate_async(s->chan_tx);
1599 s->cookie_tx = -EINVAL;
1600 }
1601}
1602#else /* !CONFIG_SERIAL_SH_SCI_DMA */
1603static inline void sci_request_dma(struct uart_port *port)
1604{
1605}
1606
1607static inline void sci_free_dma(struct uart_port *port)
1608{
1609}
1610
1611#define sci_flush_buffer NULL
1612#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
1613
1614static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
1615{
1616 struct uart_port *port = ptr;
1617 struct sci_port *s = to_sci_port(port);
1618
1619#ifdef CONFIG_SERIAL_SH_SCI_DMA
1620 if (s->chan_rx) {
1621 u16 scr = serial_port_in(port, SCSCR);
1622 u16 ssr = serial_port_in(port, SCxSR);
1623
1624 /* Disable future Rx interrupts */
1625 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1626 disable_irq_nosync(irq);
1627 scr |= SCSCR_RDRQE;
1628 } else {
1629 scr &= ~SCSCR_RIE;
1630 sci_submit_rx(s);
1631 }
1632 serial_port_out(port, SCSCR, scr);
1633 /* Clear current interrupt */
1634 serial_port_out(port, SCxSR,
1635 ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
1636 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
1637 jiffies, s->rx_timeout);
1638 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1639
1640 return IRQ_HANDLED;
1641 }
1642#endif
1643
1644 if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
1645 if (!scif_rtrg_enabled(port))
1646 scif_set_rtrg(port, s->rx_trigger);
1647
1648 mod_timer(&s->rx_fifo_timer, jiffies + DIV_ROUND_UP(
1649 s->rx_frame * s->rx_fifo_timeout, 1000));
1650 }
1651
1652 /* I think sci_receive_chars has to be called irrespective
1653 * of whether the I_IXOFF is set, otherwise, how is the interrupt
1654 * to be disabled?
1655 */
1656 sci_receive_chars(ptr);
1657
1658 return IRQ_HANDLED;
1659}
1660
1661static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
1662{
1663 struct uart_port *port = ptr;
1664 unsigned long flags;
1665
1666 spin_lock_irqsave(&port->lock, flags);
1667 sci_transmit_chars(port);
1668 spin_unlock_irqrestore(&port->lock, flags);
1669
1670 return IRQ_HANDLED;
1671}
1672
1673static irqreturn_t sci_er_interrupt(int irq, void *ptr)
1674{
1675 struct uart_port *port = ptr;
1676 struct sci_port *s = to_sci_port(port);
1677
1678 /* Handle errors */
1679 if (port->type == PORT_SCI) {
1680 if (sci_handle_errors(port)) {
1681 /* discard character in rx buffer */
1682 serial_port_in(port, SCxSR);
1683 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
1684 }
1685 } else {
1686 sci_handle_fifo_overrun(port);
1687 if (!s->chan_rx)
1688 sci_receive_chars(ptr);
1689 }
1690
1691 sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
1692
1693 /* Kick the transmission */
1694 if (!s->chan_tx)
1695 sci_tx_interrupt(irq, ptr);
1696
1697 return IRQ_HANDLED;
1698}
1699
1700static irqreturn_t sci_br_interrupt(int irq, void *ptr)
1701{
1702 struct uart_port *port = ptr;
1703
1704 /* Handle BREAKs */
1705 sci_handle_breaks(port);
1706 sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
1707
1708 return IRQ_HANDLED;
1709}
1710
1711static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
1712{
1713 unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
1714 struct uart_port *port = ptr;
1715 struct sci_port *s = to_sci_port(port);
1716 irqreturn_t ret = IRQ_NONE;
1717
1718 ssr_status = serial_port_in(port, SCxSR);
1719 scr_status = serial_port_in(port, SCSCR);
1720 if (s->params->overrun_reg == SCxSR)
1721 orer_status = ssr_status;
1722 else if (sci_getreg(port, s->params->overrun_reg)->size)
1723 orer_status = serial_port_in(port, s->params->overrun_reg);
1724
1725 err_enabled = scr_status & port_rx_irq_mask(port);
1726
1727 /* Tx Interrupt */
1728 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
1729 !s->chan_tx)
1730 ret = sci_tx_interrupt(irq, ptr);
1731
1732 /*
1733 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
1734 * DR flags
1735 */
1736 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
1737 (scr_status & SCSCR_RIE))
1738 ret = sci_rx_interrupt(irq, ptr);
1739
1740 /* Error Interrupt */
1741 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
1742 ret = sci_er_interrupt(irq, ptr);
1743
1744 /* Break Interrupt */
1745 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
1746 ret = sci_br_interrupt(irq, ptr);
1747
1748 /* Overrun Interrupt */
1749 if (orer_status & s->params->overrun_mask) {
1750 sci_handle_fifo_overrun(port);
1751 ret = IRQ_HANDLED;
1752 }
1753
1754 return ret;
1755}
1756
1757static const struct sci_irq_desc {
1758 const char *desc;
1759 irq_handler_t handler;
1760} sci_irq_desc[] = {
1761 /*
1762 * Split out handlers, the default case.
1763 */
1764 [SCIx_ERI_IRQ] = {
1765 .desc = "rx err",
1766 .handler = sci_er_interrupt,
1767 },
1768
1769 [SCIx_RXI_IRQ] = {
1770 .desc = "rx full",
1771 .handler = sci_rx_interrupt,
1772 },
1773
1774 [SCIx_TXI_IRQ] = {
1775 .desc = "tx empty",
1776 .handler = sci_tx_interrupt,
1777 },
1778
1779 [SCIx_BRI_IRQ] = {
1780 .desc = "break",
1781 .handler = sci_br_interrupt,
1782 },
1783
1784 /*
1785 * Special muxed handler.
1786 */
1787 [SCIx_MUX_IRQ] = {
1788 .desc = "mux",
1789 .handler = sci_mpxed_interrupt,
1790 },
1791};
1792
1793static int sci_request_irq(struct sci_port *port)
1794{
1795 struct uart_port *up = &port->port;
1796 int i, j, ret = 0;
1797
1798 for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
1799 const struct sci_irq_desc *desc;
1800 int irq;
1801
1802 if (SCIx_IRQ_IS_MUXED(port)) {
1803 i = SCIx_MUX_IRQ;
1804 irq = up->irq;
1805 } else {
1806 irq = port->irqs[i];
1807
1808 /*
1809 * Certain port types won't support all of the
1810 * available interrupt sources.
1811 */
1812 if (unlikely(irq < 0))
1813 continue;
1814 }
1815
1816 desc = sci_irq_desc + i;
1817 port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
1818 dev_name(up->dev), desc->desc);
1819 if (!port->irqstr[j]) {
1820 ret = -ENOMEM;
1821 goto out_nomem;
1822 }
1823
1824 ret = request_irq(irq, desc->handler, up->irqflags,
1825 port->irqstr[j], port);
1826 if (unlikely(ret)) {
1827 dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
1828 goto out_noirq;
1829 }
1830 }
1831
1832 return 0;
1833
1834out_noirq:
1835 while (--i >= 0)
1836 free_irq(port->irqs[i], port);
1837
1838out_nomem:
1839 while (--j >= 0)
1840 kfree(port->irqstr[j]);
1841
1842 return ret;
1843}
1844
1845static void sci_free_irq(struct sci_port *port)
1846{
1847 int i;
1848
1849 /*
1850 * Intentionally in reverse order so we iterate over the muxed
1851 * IRQ first.
1852 */
1853 for (i = 0; i < SCIx_NR_IRQS; i++) {
1854 int irq = port->irqs[i];
1855
1856 /*
1857 * Certain port types won't support all of the available
1858 * interrupt sources.
1859 */
1860 if (unlikely(irq < 0))
1861 continue;
1862
1863 free_irq(port->irqs[i], port);
1864 kfree(port->irqstr[i]);
1865
1866 if (SCIx_IRQ_IS_MUXED(port)) {
1867 /* If there's only one IRQ, we're done. */
1868 return;
1869 }
1870 }
1871}
1872
1873static unsigned int sci_tx_empty(struct uart_port *port)
1874{
1875 unsigned short status = serial_port_in(port, SCxSR);
1876 unsigned short in_tx_fifo = sci_txfill(port);
1877
1878 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
1879}
1880
1881static void sci_set_rts(struct uart_port *port, bool state)
1882{
1883 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1884 u16 data = serial_port_in(port, SCPDR);
1885
1886 /* Active low */
1887 if (state)
1888 data &= ~SCPDR_RTSD;
1889 else
1890 data |= SCPDR_RTSD;
1891 serial_port_out(port, SCPDR, data);
1892
1893 /* RTS# is output */
1894 serial_port_out(port, SCPCR,
1895 serial_port_in(port, SCPCR) | SCPCR_RTSC);
1896 } else if (sci_getreg(port, SCSPTR)->size) {
1897 u16 ctrl = serial_port_in(port, SCSPTR);
1898
1899 /* Active low */
1900 if (state)
1901 ctrl &= ~SCSPTR_RTSDT;
1902 else
1903 ctrl |= SCSPTR_RTSDT;
1904 serial_port_out(port, SCSPTR, ctrl);
1905 }
1906}
1907
1908static bool sci_get_cts(struct uart_port *port)
1909{
1910 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1911 /* Active low */
1912 return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
1913 } else if (sci_getreg(port, SCSPTR)->size) {
1914 /* Active low */
1915 return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
1916 }
1917
1918 return true;
1919}
1920
1921/*
1922 * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
1923 * CTS/RTS is supported in hardware by at least one port and controlled
1924 * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
1925 * handled via the ->init_pins() op, which is a bit of a one-way street,
1926 * lacking any ability to defer pin control -- this will later be
1927 * converted over to the GPIO framework).
1928 *
1929 * Other modes (such as loopback) are supported generically on certain
1930 * port types, but not others. For these it's sufficient to test for the
1931 * existence of the support register and simply ignore the port type.
1932 */
1933static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
1934{
1935 struct sci_port *s = to_sci_port(port);
1936
1937 if (mctrl & TIOCM_LOOP) {
1938 const struct plat_sci_reg *reg;
1939
1940 /*
1941 * Standard loopback mode for SCFCR ports.
1942 */
1943 reg = sci_getreg(port, SCFCR);
1944 if (reg->size)
1945 serial_port_out(port, SCFCR,
1946 serial_port_in(port, SCFCR) |
1947 SCFCR_LOOP);
1948 }
1949
1950 mctrl_gpio_set(s->gpios, mctrl);
1951
1952 if (!s->has_rtscts)
1953 return;
1954
1955 if (!(mctrl & TIOCM_RTS)) {
1956 /* Disable Auto RTS */
1957 serial_port_out(port, SCFCR,
1958 serial_port_in(port, SCFCR) & ~SCFCR_MCE);
1959
1960 /* Clear RTS */
1961 sci_set_rts(port, 0);
1962 } else if (s->autorts) {
1963 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1964 /* Enable RTS# pin function */
1965 serial_port_out(port, SCPCR,
1966 serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
1967 }
1968
1969 /* Enable Auto RTS */
1970 serial_port_out(port, SCFCR,
1971 serial_port_in(port, SCFCR) | SCFCR_MCE);
1972 } else {
1973 /* Set RTS */
1974 sci_set_rts(port, 1);
1975 }
1976}
1977
1978static unsigned int sci_get_mctrl(struct uart_port *port)
1979{
1980 struct sci_port *s = to_sci_port(port);
1981 struct mctrl_gpios *gpios = s->gpios;
1982 unsigned int mctrl = 0;
1983
1984 mctrl_gpio_get(gpios, &mctrl);
1985
1986 /*
1987 * CTS/RTS is handled in hardware when supported, while nothing
1988 * else is wired up.
1989 */
1990 if (s->autorts) {
1991 if (sci_get_cts(port))
1992 mctrl |= TIOCM_CTS;
1993 } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
1994 mctrl |= TIOCM_CTS;
1995 }
1996 if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
1997 mctrl |= TIOCM_DSR;
1998 if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
1999 mctrl |= TIOCM_CAR;
2000
2001 return mctrl;
2002}
2003
2004static void sci_enable_ms(struct uart_port *port)
2005{
2006 mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
2007}
2008
2009static void sci_break_ctl(struct uart_port *port, int break_state)
2010{
2011 unsigned short scscr, scsptr;
2012
2013 /* check wheter the port has SCSPTR */
2014 if (!sci_getreg(port, SCSPTR)->size) {
2015 /*
2016 * Not supported by hardware. Most parts couple break and rx
2017 * interrupts together, with break detection always enabled.
2018 */
2019 return;
2020 }
2021
2022 scsptr = serial_port_in(port, SCSPTR);
2023 scscr = serial_port_in(port, SCSCR);
2024
2025 if (break_state == -1) {
2026 scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
2027 scscr &= ~SCSCR_TE;
2028 } else {
2029 scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
2030 scscr |= SCSCR_TE;
2031 }
2032
2033 serial_port_out(port, SCSPTR, scsptr);
2034 serial_port_out(port, SCSCR, scscr);
2035}
2036
2037static int sci_startup(struct uart_port *port)
2038{
2039 struct sci_port *s = to_sci_port(port);
2040 int ret;
2041
2042 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
2043
2044 sci_request_dma(port);
2045
2046 ret = sci_request_irq(s);
2047 if (unlikely(ret < 0)) {
2048 sci_free_dma(port);
2049 return ret;
2050 }
2051
2052 return 0;
2053}
2054
2055static void sci_shutdown(struct uart_port *port)
2056{
2057 struct sci_port *s = to_sci_port(port);
2058 unsigned long flags;
2059 u16 scr;
2060
2061 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
2062
2063 s->autorts = false;
2064 mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
2065
2066 spin_lock_irqsave(&port->lock, flags);
2067 sci_stop_rx(port);
2068 sci_stop_tx(port);
2069 /* Stop RX and TX, disable related interrupts, keep clock source */
2070 scr = serial_port_in(port, SCSCR);
2071 serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
2072 spin_unlock_irqrestore(&port->lock, flags);
2073
2074#ifdef CONFIG_SERIAL_SH_SCI_DMA
2075 if (s->chan_rx) {
2076 dev_dbg(port->dev, "%s(%d) deleting rx_timer\n", __func__,
2077 port->line);
2078 del_timer_sync(&s->rx_timer);
2079 }
2080#endif
2081
2082 if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0)
2083 del_timer_sync(&s->rx_fifo_timer);
2084 sci_free_irq(s);
2085 sci_free_dma(port);
2086}
2087
2088static int sci_sck_calc(struct sci_port *s, unsigned int bps,
2089 unsigned int *srr)
2090{
2091 unsigned long freq = s->clk_rates[SCI_SCK];
2092 int err, min_err = INT_MAX;
2093 unsigned int sr;
2094
2095 if (s->port.type != PORT_HSCIF)
2096 freq *= 2;
2097
2098 for_each_sr(sr, s) {
2099 err = DIV_ROUND_CLOSEST(freq, sr) - bps;
2100 if (abs(err) >= abs(min_err))
2101 continue;
2102
2103 min_err = err;
2104 *srr = sr - 1;
2105
2106 if (!err)
2107 break;
2108 }
2109
2110 dev_dbg(s->port.dev, "SCK: %u%+d bps using SR %u\n", bps, min_err,
2111 *srr + 1);
2112 return min_err;
2113}
2114
2115static int sci_brg_calc(struct sci_port *s, unsigned int bps,
2116 unsigned long freq, unsigned int *dlr,
2117 unsigned int *srr)
2118{
2119 int err, min_err = INT_MAX;
2120 unsigned int sr, dl;
2121
2122 if (s->port.type != PORT_HSCIF)
2123 freq *= 2;
2124
2125 for_each_sr(sr, s) {
2126 dl = DIV_ROUND_CLOSEST(freq, sr * bps);
2127 dl = clamp(dl, 1U, 65535U);
2128
2129 err = DIV_ROUND_CLOSEST(freq, sr * dl) - bps;
2130 if (abs(err) >= abs(min_err))
2131 continue;
2132
2133 min_err = err;
2134 *dlr = dl;
2135 *srr = sr - 1;
2136
2137 if (!err)
2138 break;
2139 }
2140
2141 dev_dbg(s->port.dev, "BRG: %u%+d bps using DL %u SR %u\n", bps,
2142 min_err, *dlr, *srr + 1);
2143 return min_err;
2144}
2145
2146/* calculate sample rate, BRR, and clock select */
2147static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
2148 unsigned int *brr, unsigned int *srr,
2149 unsigned int *cks)
2150{
2151 unsigned long freq = s->clk_rates[SCI_FCK];
2152 unsigned int sr, br, prediv, scrate, c;
2153 int err, min_err = INT_MAX;
2154
2155 if (s->port.type != PORT_HSCIF)
2156 freq *= 2;
2157
2158 /*
2159 * Find the combination of sample rate and clock select with the
2160 * smallest deviation from the desired baud rate.
2161 * Prefer high sample rates to maximise the receive margin.
2162 *
2163 * M: Receive margin (%)
2164 * N: Ratio of bit rate to clock (N = sampling rate)
2165 * D: Clock duty (D = 0 to 1.0)
2166 * L: Frame length (L = 9 to 12)
2167 * F: Absolute value of clock frequency deviation
2168 *
2169 * M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) -
2170 * (|D - 0.5| / N * (1 + F))|
2171 * NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
2172 */
2173 for_each_sr(sr, s) {
2174 for (c = 0; c <= 3; c++) {
2175 /* integerized formulas from HSCIF documentation */
2176 prediv = sr * (1 << (2 * c + 1));
2177
2178 /*
2179 * We need to calculate:
2180 *
2181 * br = freq / (prediv * bps) clamped to [1..256]
2182 * err = freq / (br * prediv) - bps
2183 *
2184 * Watch out for overflow when calculating the desired
2185 * sampling clock rate!
2186 */
2187 if (bps > UINT_MAX / prediv)
2188 break;
2189
2190 scrate = prediv * bps;
2191 br = DIV_ROUND_CLOSEST(freq, scrate);
2192 br = clamp(br, 1U, 256U);
2193
2194 err = DIV_ROUND_CLOSEST(freq, br * prediv) - bps;
2195 if (abs(err) >= abs(min_err))
2196 continue;
2197
2198 min_err = err;
2199 *brr = br - 1;
2200 *srr = sr - 1;
2201 *cks = c;
2202
2203 if (!err)
2204 goto found;
2205 }
2206 }
2207
2208found:
2209 dev_dbg(s->port.dev, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps,
2210 min_err, *brr, *srr + 1, *cks);
2211 return min_err;
2212}
2213
2214static void sci_reset(struct uart_port *port)
2215{
2216 const struct plat_sci_reg *reg;
2217 unsigned int status;
2218 struct sci_port *s = to_sci_port(port);
2219
2220 serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
2221
2222 reg = sci_getreg(port, SCFCR);
2223 if (reg->size)
2224 serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
2225
2226 sci_clear_SCxSR(port,
2227 SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
2228 SCxSR_BREAK_CLEAR(port));
2229 if (sci_getreg(port, SCLSR)->size) {
2230 status = serial_port_in(port, SCLSR);
2231 status &= ~(SCLSR_TO | SCLSR_ORER);
2232 serial_port_out(port, SCLSR, status);
2233 }
2234
2235 if (s->rx_trigger > 1) {
2236 if (s->rx_fifo_timeout) {
2237 scif_set_rtrg(port, 1);
2238 setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
2239 (unsigned long)s);
2240 } else {
2241 if (port->type == PORT_SCIFA ||
2242 port->type == PORT_SCIFB)
2243 scif_set_rtrg(port, 1);
2244 else
2245 scif_set_rtrg(port, s->rx_trigger);
2246 }
2247 }
2248}
2249
2250static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
2251 struct ktermios *old)
2252{
2253 unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits;
2254 unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
2255 unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0;
2256 struct sci_port *s = to_sci_port(port);
2257 const struct plat_sci_reg *reg;
2258 int min_err = INT_MAX, err;
2259 unsigned long max_freq = 0;
2260 int best_clk = -1;
2261
2262 if ((termios->c_cflag & CSIZE) == CS7)
2263 smr_val |= SCSMR_CHR;
2264 if (termios->c_cflag & PARENB)
2265 smr_val |= SCSMR_PE;
2266 if (termios->c_cflag & PARODD)
2267 smr_val |= SCSMR_PE | SCSMR_ODD;
2268 if (termios->c_cflag & CSTOPB)
2269 smr_val |= SCSMR_STOP;
2270
2271 /*
2272 * earlyprintk comes here early on with port->uartclk set to zero.
2273 * the clock framework is not up and running at this point so here
2274 * we assume that 115200 is the maximum baud rate. please note that
2275 * the baud rate is not programmed during earlyprintk - it is assumed
2276 * that the previous boot loader has enabled required clocks and
2277 * setup the baud rate generator hardware for us already.
2278 */
2279 if (!port->uartclk) {
2280 baud = uart_get_baud_rate(port, termios, old, 0, 115200);
2281 goto done;
2282 }
2283
2284 for (i = 0; i < SCI_NUM_CLKS; i++)
2285 max_freq = max(max_freq, s->clk_rates[i]);
2286
2287 baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s));
2288 if (!baud)
2289 goto done;
2290
2291 /*
2292 * There can be multiple sources for the sampling clock. Find the one
2293 * that gives us the smallest deviation from the desired baud rate.
2294 */
2295
2296 /* Optional Undivided External Clock */
2297 if (s->clk_rates[SCI_SCK] && port->type != PORT_SCIFA &&
2298 port->type != PORT_SCIFB) {
2299 err = sci_sck_calc(s, baud, &srr1);
2300 if (abs(err) < abs(min_err)) {
2301 best_clk = SCI_SCK;
2302 scr_val = SCSCR_CKE1;
2303 sccks = SCCKS_CKS;
2304 min_err = err;
2305 srr = srr1;
2306 if (!err)
2307 goto done;
2308 }
2309 }
2310
2311 /* Optional BRG Frequency Divided External Clock */
2312 if (s->clk_rates[SCI_SCIF_CLK] && sci_getreg(port, SCDL)->size) {
2313 err = sci_brg_calc(s, baud, s->clk_rates[SCI_SCIF_CLK], &dl1,
2314 &srr1);
2315 if (abs(err) < abs(min_err)) {
2316 best_clk = SCI_SCIF_CLK;
2317 scr_val = SCSCR_CKE1;
2318 sccks = 0;
2319 min_err = err;
2320 dl = dl1;
2321 srr = srr1;
2322 if (!err)
2323 goto done;
2324 }
2325 }
2326
2327 /* Optional BRG Frequency Divided Internal Clock */
2328 if (s->clk_rates[SCI_BRG_INT] && sci_getreg(port, SCDL)->size) {
2329 err = sci_brg_calc(s, baud, s->clk_rates[SCI_BRG_INT], &dl1,
2330 &srr1);
2331 if (abs(err) < abs(min_err)) {
2332 best_clk = SCI_BRG_INT;
2333 scr_val = SCSCR_CKE1;
2334 sccks = SCCKS_XIN;
2335 min_err = err;
2336 dl = dl1;
2337 srr = srr1;
2338 if (!min_err)
2339 goto done;
2340 }
2341 }
2342
2343 /* Divided Functional Clock using standard Bit Rate Register */
2344 err = sci_scbrr_calc(s, baud, &brr1, &srr1, &cks1);
2345 if (abs(err) < abs(min_err)) {
2346 best_clk = SCI_FCK;
2347 scr_val = 0;
2348 min_err = err;
2349 brr = brr1;
2350 srr = srr1;
2351 cks = cks1;
2352 }
2353
2354done:
2355 if (best_clk >= 0)
2356 dev_dbg(port->dev, "Using clk %pC for %u%+d bps\n",
2357 s->clks[best_clk], baud, min_err);
2358
2359 sci_port_enable(s);
2360
2361 /*
2362 * Program the optional External Baud Rate Generator (BRG) first.
2363 * It controls the mux to select (H)SCK or frequency divided clock.
2364 */
2365 if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) {
2366 serial_port_out(port, SCDL, dl);
2367 serial_port_out(port, SCCKS, sccks);
2368 }
2369
2370 sci_reset(port);
2371
2372 uart_update_timeout(port, termios->c_cflag, baud);
2373
2374 if (best_clk >= 0) {
2375 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
2376 switch (srr + 1) {
2377 case 5: smr_val |= SCSMR_SRC_5; break;
2378 case 7: smr_val |= SCSMR_SRC_7; break;
2379 case 11: smr_val |= SCSMR_SRC_11; break;
2380 case 13: smr_val |= SCSMR_SRC_13; break;
2381 case 16: smr_val |= SCSMR_SRC_16; break;
2382 case 17: smr_val |= SCSMR_SRC_17; break;
2383 case 19: smr_val |= SCSMR_SRC_19; break;
2384 case 27: smr_val |= SCSMR_SRC_27; break;
2385 }
2386 smr_val |= cks;
2387 dev_dbg(port->dev,
2388 "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
2389 scr_val, smr_val, brr, sccks, dl, srr);
2390 serial_port_out(port, SCSCR, scr_val);
2391 serial_port_out(port, SCSMR, smr_val);
2392 serial_port_out(port, SCBRR, brr);
2393 if (sci_getreg(port, HSSRR)->size)
2394 serial_port_out(port, HSSRR, srr | HSCIF_SRE);
2395
2396 /* Wait one bit interval */
2397 udelay((1000000 + (baud - 1)) / baud);
2398 } else {
2399 /* Don't touch the bit rate configuration */
2400 scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
2401 smr_val |= serial_port_in(port, SCSMR) &
2402 (SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
2403 dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val);
2404 serial_port_out(port, SCSCR, scr_val);
2405 serial_port_out(port, SCSMR, smr_val);
2406 }
2407
2408 sci_init_pins(port, termios->c_cflag);
2409
2410 port->status &= ~UPSTAT_AUTOCTS;
2411 s->autorts = false;
2412 reg = sci_getreg(port, SCFCR);
2413 if (reg->size) {
2414 unsigned short ctrl = serial_port_in(port, SCFCR);
2415
2416 if ((port->flags & UPF_HARD_FLOW) &&
2417 (termios->c_cflag & CRTSCTS)) {
2418 /* There is no CTS interrupt to restart the hardware */
2419 port->status |= UPSTAT_AUTOCTS;
2420 /* MCE is enabled when RTS is raised */
2421 s->autorts = true;
2422 }
2423
2424 /*
2425 * As we've done a sci_reset() above, ensure we don't
2426 * interfere with the FIFOs while toggling MCE. As the
2427 * reset values could still be set, simply mask them out.
2428 */
2429 ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
2430
2431 serial_port_out(port, SCFCR, ctrl);
2432 }
2433 if (port->flags & UPF_HARD_FLOW) {
2434 /* Refresh (Auto) RTS */
2435 sci_set_mctrl(port, port->mctrl);
2436 }
2437
2438 scr_val |= SCSCR_RE | SCSCR_TE |
2439 (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
2440 dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val);
2441 serial_port_out(port, SCSCR, scr_val);
2442 if ((srr + 1 == 5) &&
2443 (port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
2444 /*
2445 * In asynchronous mode, when the sampling rate is 1/5, first
2446 * received data may become invalid on some SCIFA and SCIFB.
2447 * To avoid this problem wait more than 1 serial data time (1
2448 * bit time x serial data number) after setting SCSCR.RE = 1.
2449 */
2450 udelay(DIV_ROUND_UP(10 * 1000000, baud));
2451 }
2452
2453 /*
2454 * Calculate delay for 2 DMA buffers (4 FIFO).
2455 * See serial_core.c::uart_update_timeout().
2456 * With 10 bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above
2457 * function calculates 1 jiffie for the data plus 5 jiffies for the
2458 * "slop(e)." Then below we calculate 5 jiffies (20ms) for 2 DMA
2459 * buffers (4 FIFO sizes), but when performing a faster transfer, the
2460 * value obtained by this formula is too small. Therefore, if the value
2461 * is smaller than 20ms, use 20ms as the timeout value for DMA.
2462 */
2463 /* byte size and parity */
2464 switch (termios->c_cflag & CSIZE) {
2465 case CS5:
2466 bits = 7;
2467 break;
2468 case CS6:
2469 bits = 8;
2470 break;
2471 case CS7:
2472 bits = 9;
2473 break;
2474 default:
2475 bits = 10;
2476 break;
2477 }
2478
2479 if (termios->c_cflag & CSTOPB)
2480 bits++;
2481 if (termios->c_cflag & PARENB)
2482 bits++;
2483
2484 s->rx_frame = (100 * bits * HZ) / (baud / 10);
2485#ifdef CONFIG_SERIAL_SH_SCI_DMA
2486 s->rx_timeout = DIV_ROUND_UP(s->buf_len_rx * 2 * s->rx_frame, 1000);
2487 dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
2488 s->rx_timeout * 1000 / HZ, port->timeout);
2489 if (s->rx_timeout < msecs_to_jiffies(20))
2490 s->rx_timeout = msecs_to_jiffies(20);
2491#endif
2492
2493 if ((termios->c_cflag & CREAD) != 0)
2494 sci_start_rx(port);
2495
2496 sci_port_disable(s);
2497
2498 if (UART_ENABLE_MS(port, termios->c_cflag))
2499 sci_enable_ms(port);
2500}
2501
2502static void sci_pm(struct uart_port *port, unsigned int state,
2503 unsigned int oldstate)
2504{
2505 struct sci_port *sci_port = to_sci_port(port);
2506
2507 switch (state) {
2508 case UART_PM_STATE_OFF:
2509 sci_port_disable(sci_port);
2510 break;
2511 default:
2512 sci_port_enable(sci_port);
2513 break;
2514 }
2515}
2516
2517static const char *sci_type(struct uart_port *port)
2518{
2519 switch (port->type) {
2520 case PORT_IRDA:
2521 return "irda";
2522 case PORT_SCI:
2523 return "sci";
2524 case PORT_SCIF:
2525 return "scif";
2526 case PORT_SCIFA:
2527 return "scifa";
2528 case PORT_SCIFB:
2529 return "scifb";
2530 case PORT_HSCIF:
2531 return "hscif";
2532 }
2533
2534 return NULL;
2535}
2536
2537static int sci_remap_port(struct uart_port *port)
2538{
2539 struct sci_port *sport = to_sci_port(port);
2540
2541 /*
2542 * Nothing to do if there's already an established membase.
2543 */
2544 if (port->membase)
2545 return 0;
2546
2547 if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
2548 port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
2549 if (unlikely(!port->membase)) {
2550 dev_err(port->dev, "can't remap port#%d\n", port->line);
2551 return -ENXIO;
2552 }
2553 } else {
2554 /*
2555 * For the simple (and majority of) cases where we don't
2556 * need to do any remapping, just cast the cookie
2557 * directly.
2558 */
2559 port->membase = (void __iomem *)(uintptr_t)port->mapbase;
2560 }
2561
2562 return 0;
2563}
2564
2565static void sci_release_port(struct uart_port *port)
2566{
2567 struct sci_port *sport = to_sci_port(port);
2568
2569 if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
2570 iounmap(port->membase);
2571 port->membase = NULL;
2572 }
2573
2574 release_mem_region(port->mapbase, sport->reg_size);
2575}
2576
2577static int sci_request_port(struct uart_port *port)
2578{
2579 struct resource *res;
2580 struct sci_port *sport = to_sci_port(port);
2581 int ret;
2582
2583 res = request_mem_region(port->mapbase, sport->reg_size,
2584 dev_name(port->dev));
2585 if (unlikely(res == NULL)) {
2586 dev_err(port->dev, "request_mem_region failed.");
2587 return -EBUSY;
2588 }
2589
2590 ret = sci_remap_port(port);
2591 if (unlikely(ret != 0)) {
2592 release_resource(res);
2593 return ret;
2594 }
2595
2596 return 0;
2597}
2598
2599static void sci_config_port(struct uart_port *port, int flags)
2600{
2601 if (flags & UART_CONFIG_TYPE) {
2602 struct sci_port *sport = to_sci_port(port);
2603
2604 port->type = sport->cfg->type;
2605 sci_request_port(port);
2606 }
2607}
2608
2609static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
2610{
2611 if (ser->baud_base < 2400)
2612 /* No paper tape reader for Mitch.. */
2613 return -EINVAL;
2614
2615 return 0;
2616}
2617
2618static const struct uart_ops sci_uart_ops = {
2619 .tx_empty = sci_tx_empty,
2620 .set_mctrl = sci_set_mctrl,
2621 .get_mctrl = sci_get_mctrl,
2622 .start_tx = sci_start_tx,
2623 .stop_tx = sci_stop_tx,
2624 .stop_rx = sci_stop_rx,
2625 .enable_ms = sci_enable_ms,
2626 .break_ctl = sci_break_ctl,
2627 .startup = sci_startup,
2628 .shutdown = sci_shutdown,
2629 .flush_buffer = sci_flush_buffer,
2630 .set_termios = sci_set_termios,
2631 .pm = sci_pm,
2632 .type = sci_type,
2633 .release_port = sci_release_port,
2634 .request_port = sci_request_port,
2635 .config_port = sci_config_port,
2636 .verify_port = sci_verify_port,
2637#ifdef CONFIG_CONSOLE_POLL
2638 .poll_get_char = sci_poll_get_char,
2639 .poll_put_char = sci_poll_put_char,
2640#endif
2641};
2642
2643static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
2644{
2645 const char *clk_names[] = {
2646 [SCI_FCK] = "fck",
2647 [SCI_SCK] = "sck",
2648 [SCI_BRG_INT] = "brg_int",
2649 [SCI_SCIF_CLK] = "scif_clk",
2650 };
2651 struct clk *clk;
2652 unsigned int i;
2653
2654 if (sci_port->cfg->type == PORT_HSCIF)
2655 clk_names[SCI_SCK] = "hsck";
2656
2657 for (i = 0; i < SCI_NUM_CLKS; i++) {
2658 clk = devm_clk_get(dev, clk_names[i]);
2659 if (PTR_ERR(clk) == -EPROBE_DEFER)
2660 return -EPROBE_DEFER;
2661
2662 if (IS_ERR(clk) && i == SCI_FCK) {
2663 /*
2664 * "fck" used to be called "sci_ick", and we need to
2665 * maintain DT backward compatibility.
2666 */
2667 clk = devm_clk_get(dev, "sci_ick");
2668 if (PTR_ERR(clk) == -EPROBE_DEFER)
2669 return -EPROBE_DEFER;
2670
2671 if (!IS_ERR(clk))
2672 goto found;
2673
2674 /*
2675 * Not all SH platforms declare a clock lookup entry
2676 * for SCI devices, in which case we need to get the
2677 * global "peripheral_clk" clock.
2678 */
2679 clk = devm_clk_get(dev, "peripheral_clk");
2680 if (!IS_ERR(clk))
2681 goto found;
2682
2683 dev_err(dev, "failed to get %s (%ld)\n", clk_names[i],
2684 PTR_ERR(clk));
2685 return PTR_ERR(clk);
2686 }
2687
2688found:
2689 if (IS_ERR(clk))
2690 dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
2691 PTR_ERR(clk));
2692 else
2693 dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
2694 clk, clk_get_rate(clk));
2695 sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
2696 }
2697 return 0;
2698}
2699
2700static const struct sci_port_params *
2701sci_probe_regmap(const struct plat_sci_port *cfg)
2702{
2703 unsigned int regtype;
2704
2705 if (cfg->regtype != SCIx_PROBE_REGTYPE)
2706 return &sci_port_params[cfg->regtype];
2707
2708 switch (cfg->type) {
2709 case PORT_SCI:
2710 regtype = SCIx_SCI_REGTYPE;
2711 break;
2712 case PORT_IRDA:
2713 regtype = SCIx_IRDA_REGTYPE;
2714 break;
2715 case PORT_SCIFA:
2716 regtype = SCIx_SCIFA_REGTYPE;
2717 break;
2718 case PORT_SCIFB:
2719 regtype = SCIx_SCIFB_REGTYPE;
2720 break;
2721 case PORT_SCIF:
2722 /*
2723 * The SH-4 is a bit of a misnomer here, although that's
2724 * where this particular port layout originated. This
2725 * configuration (or some slight variation thereof)
2726 * remains the dominant model for all SCIFs.
2727 */
2728 regtype = SCIx_SH4_SCIF_REGTYPE;
2729 break;
2730 case PORT_HSCIF:
2731 regtype = SCIx_HSCIF_REGTYPE;
2732 break;
2733 default:
2734 pr_err("Can't probe register map for given port\n");
2735 return NULL;
2736 }
2737
2738 return &sci_port_params[regtype];
2739}
2740
2741static int sci_init_single(struct platform_device *dev,
2742 struct sci_port *sci_port, unsigned int index,
2743 const struct plat_sci_port *p, bool early)
2744{
2745 struct uart_port *port = &sci_port->port;
2746 const struct resource *res;
2747 unsigned int i;
2748 int ret;
2749
2750 sci_port->cfg = p;
2751
2752 port->ops = &sci_uart_ops;
2753 port->iotype = UPIO_MEM;
2754 port->line = index;
2755
2756 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
2757 if (res == NULL)
2758 return -ENOMEM;
2759
2760 port->mapbase = res->start;
2761 sci_port->reg_size = resource_size(res);
2762
2763 for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
2764 sci_port->irqs[i] = platform_get_irq(dev, i);
2765
2766 /* The SCI generates several interrupts. They can be muxed together or
2767 * connected to different interrupt lines. In the muxed case only one
2768 * interrupt resource is specified. In the non-muxed case three or four
2769 * interrupt resources are specified, as the BRI interrupt is optional.
2770 */
2771 if (sci_port->irqs[0] < 0)
2772 return -ENXIO;
2773
2774 if (sci_port->irqs[1] < 0) {
2775 sci_port->irqs[1] = sci_port->irqs[0];
2776 sci_port->irqs[2] = sci_port->irqs[0];
2777 sci_port->irqs[3] = sci_port->irqs[0];
2778 }
2779
2780 sci_port->params = sci_probe_regmap(p);
2781 if (unlikely(sci_port->params == NULL))
2782 return -EINVAL;
2783
2784 switch (p->type) {
2785 case PORT_SCIFB:
2786 sci_port->rx_trigger = 48;
2787 break;
2788 case PORT_HSCIF:
2789 sci_port->rx_trigger = 64;
2790 break;
2791 case PORT_SCIFA:
2792 sci_port->rx_trigger = 32;
2793 break;
2794 case PORT_SCIF:
2795 if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
2796 /* RX triggering not implemented for this IP */
2797 sci_port->rx_trigger = 1;
2798 else
2799 sci_port->rx_trigger = 8;
2800 break;
2801 default:
2802 sci_port->rx_trigger = 1;
2803 break;
2804 }
2805
2806 sci_port->rx_fifo_timeout = 0;
2807
2808 /* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
2809 * match the SoC datasheet, this should be investigated. Let platform
2810 * data override the sampling rate for now.
2811 */
2812 sci_port->sampling_rate_mask = p->sampling_rate
2813 ? SCI_SR(p->sampling_rate)
2814 : sci_port->params->sampling_rate_mask;
2815
2816 if (!early) {
2817 ret = sci_init_clocks(sci_port, &dev->dev);
2818 if (ret < 0)
2819 return ret;
2820
2821 port->dev = &dev->dev;
2822
2823 pm_runtime_enable(&dev->dev);
2824 }
2825
2826 port->type = p->type;
2827 port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
2828 port->fifosize = sci_port->params->fifosize;
2829
2830 if (port->type == PORT_SCI) {
2831 if (sci_port->reg_size >= 0x20)
2832 port->regshift = 2;
2833 else
2834 port->regshift = 1;
2835 }
2836
2837 /*
2838 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2839 * for the multi-IRQ ports, which is where we are primarily
2840 * concerned with the shutdown path synchronization.
2841 *
2842 * For the muxed case there's nothing more to do.
2843 */
2844 port->irq = sci_port->irqs[SCIx_RXI_IRQ];
2845 port->irqflags = 0;
2846
2847 port->serial_in = sci_serial_in;
2848 port->serial_out = sci_serial_out;
2849
2850 return 0;
2851}
2852
2853static void sci_cleanup_single(struct sci_port *port)
2854{
2855 pm_runtime_disable(port->port.dev);
2856}
2857
2858#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
2859 defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
2860static void serial_console_putchar(struct uart_port *port, int ch)
2861{
2862 sci_poll_put_char(port, ch);
2863}
2864
2865/*
2866 * Print a string to the serial port trying not to disturb
2867 * any possible real use of the port...
2868 */
2869static void serial_console_write(struct console *co, const char *s,
2870 unsigned count)
2871{
2872 struct sci_port *sci_port = &sci_ports[co->index];
2873 struct uart_port *port = &sci_port->port;
2874 unsigned short bits, ctrl, ctrl_temp;
2875 unsigned long flags;
2876 int locked = 1;
2877
2878#if defined(SUPPORT_SYSRQ)
2879 if (port->sysrq)
2880 locked = 0;
2881 else
2882#endif
2883 if (oops_in_progress)
2884 locked = spin_trylock_irqsave(&port->lock, flags);
2885 else
2886 spin_lock_irqsave(&port->lock, flags);
2887
2888 /* first save SCSCR then disable interrupts, keep clock source */
2889 ctrl = serial_port_in(port, SCSCR);
2890 ctrl_temp = SCSCR_RE | SCSCR_TE |
2891 (sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
2892 (ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
2893 serial_port_out(port, SCSCR, ctrl_temp);
2894
2895 uart_console_write(port, s, count, serial_console_putchar);
2896
2897 /* wait until fifo is empty and last bit has been transmitted */
2898 bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
2899 while ((serial_port_in(port, SCxSR) & bits) != bits)
2900 cpu_relax();
2901
2902 /* restore the SCSCR */
2903 serial_port_out(port, SCSCR, ctrl);
2904
2905 if (locked)
2906 spin_unlock_irqrestore(&port->lock, flags);
2907}
2908
2909static int serial_console_setup(struct console *co, char *options)
2910{
2911 struct sci_port *sci_port;
2912 struct uart_port *port;
2913 int baud = 115200;
2914 int bits = 8;
2915 int parity = 'n';
2916 int flow = 'n';
2917 int ret;
2918
2919 /*
2920 * Refuse to handle any bogus ports.
2921 */
2922 if (co->index < 0 || co->index >= SCI_NPORTS)
2923 return -ENODEV;
2924
2925 sci_port = &sci_ports[co->index];
2926 port = &sci_port->port;
2927
2928 /*
2929 * Refuse to handle uninitialized ports.
2930 */
2931 if (!port->ops)
2932 return -ENODEV;
2933
2934 ret = sci_remap_port(port);
2935 if (unlikely(ret != 0))
2936 return ret;
2937
2938 if (options)
2939 uart_parse_options(options, &baud, &parity, &bits, &flow);
2940
2941 return uart_set_options(port, co, baud, parity, bits, flow);
2942}
2943
2944static struct console serial_console = {
2945 .name = "ttySC",
2946 .device = uart_console_device,
2947 .write = serial_console_write,
2948 .setup = serial_console_setup,
2949 .flags = CON_PRINTBUFFER,
2950 .index = -1,
2951 .data = &sci_uart_driver,
2952};
2953
2954static struct console early_serial_console = {
2955 .name = "early_ttySC",
2956 .write = serial_console_write,
2957 .flags = CON_PRINTBUFFER,
2958 .index = -1,
2959};
2960
2961static char early_serial_buf[32];
2962
2963static int sci_probe_earlyprintk(struct platform_device *pdev)
2964{
2965 const struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
2966
2967 if (early_serial_console.data)
2968 return -EEXIST;
2969
2970 early_serial_console.index = pdev->id;
2971
2972 sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
2973
2974 serial_console_setup(&early_serial_console, early_serial_buf);
2975
2976 if (!strstr(early_serial_buf, "keep"))
2977 early_serial_console.flags |= CON_BOOT;
2978
2979 register_console(&early_serial_console);
2980 return 0;
2981}
2982
2983#define SCI_CONSOLE (&serial_console)
2984
2985#else
2986static inline int sci_probe_earlyprintk(struct platform_device *pdev)
2987{
2988 return -EINVAL;
2989}
2990
2991#define SCI_CONSOLE NULL
2992
2993#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
2994
2995static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
2996
2997static DEFINE_MUTEX(sci_uart_registration_lock);
2998static struct uart_driver sci_uart_driver = {
2999 .owner = THIS_MODULE,
3000 .driver_name = "sci",
3001 .dev_name = "ttySC",
3002 .major = SCI_MAJOR,
3003 .minor = SCI_MINOR_START,
3004 .nr = SCI_NPORTS,
3005 .cons = SCI_CONSOLE,
3006};
3007
3008static int sci_remove(struct platform_device *dev)
3009{
3010 struct sci_port *port = platform_get_drvdata(dev);
3011
3012 uart_remove_one_port(&sci_uart_driver, &port->port);
3013
3014 sci_cleanup_single(port);
3015
3016 if (port->port.fifosize > 1) {
3017 sysfs_remove_file(&dev->dev.kobj,
3018 &dev_attr_rx_fifo_trigger.attr);
3019 }
3020 if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB) {
3021 sysfs_remove_file(&dev->dev.kobj,
3022 &dev_attr_rx_fifo_timeout.attr);
3023 }
3024
3025 return 0;
3026}
3027
3028
3029#define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype))
3030#define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16)
3031#define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff)
3032
3033static const struct of_device_id of_sci_match[] = {
3034 /* SoC-specific types */
3035 {
3036 .compatible = "renesas,scif-r7s72100",
3037 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
3038 },
3039 /* Family-specific types */
3040 {
3041 .compatible = "renesas,rcar-gen1-scif",
3042 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3043 }, {
3044 .compatible = "renesas,rcar-gen2-scif",
3045 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3046 }, {
3047 .compatible = "renesas,rcar-gen3-scif",
3048 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
3049 },
3050 /* Generic types */
3051 {
3052 .compatible = "renesas,scif",
3053 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE),
3054 }, {
3055 .compatible = "renesas,scifa",
3056 .data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE),
3057 }, {
3058 .compatible = "renesas,scifb",
3059 .data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE),
3060 }, {
3061 .compatible = "renesas,hscif",
3062 .data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE),
3063 }, {
3064 .compatible = "renesas,sci",
3065 .data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE),
3066 }, {
3067 /* Terminator */
3068 },
3069};
3070MODULE_DEVICE_TABLE(of, of_sci_match);
3071
3072static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
3073 unsigned int *dev_id)
3074{
3075 struct device_node *np = pdev->dev.of_node;
3076 const struct of_device_id *match;
3077 struct plat_sci_port *p;
3078 struct sci_port *sp;
3079 int id;
3080
3081 if (!IS_ENABLED(CONFIG_OF) || !np)
3082 return NULL;
3083
3084 match = of_match_node(of_sci_match, np);
3085 if (!match)
3086 return NULL;
3087
3088 p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
3089 if (!p)
3090 return NULL;
3091
3092 /* Get the line number from the aliases node. */
3093 id = of_alias_get_id(np, "serial");
3094 if (id < 0) {
3095 dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
3096 return NULL;
3097 }
3098 if (id >= ARRAY_SIZE(sci_ports)) {
3099 dev_err(&pdev->dev, "serial%d out of range\n", id);
3100 return NULL;
3101 }
3102
3103 sp = &sci_ports[id];
3104 *dev_id = id;
3105
3106 p->type = SCI_OF_TYPE(match->data);
3107 p->regtype = SCI_OF_REGTYPE(match->data);
3108
3109 sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
3110
3111 return p;
3112}
3113
3114static int sci_probe_single(struct platform_device *dev,
3115 unsigned int index,
3116 struct plat_sci_port *p,
3117 struct sci_port *sciport)
3118{
3119 int ret;
3120
3121 /* Sanity check */
3122 if (unlikely(index >= SCI_NPORTS)) {
3123 dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
3124 index+1, SCI_NPORTS);
3125 dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
3126 return -EINVAL;
3127 }
3128
3129 mutex_lock(&sci_uart_registration_lock);
3130 if (!sci_uart_driver.state) {
3131 ret = uart_register_driver(&sci_uart_driver);
3132 if (ret) {
3133 mutex_unlock(&sci_uart_registration_lock);
3134 return ret;
3135 }
3136 }
3137 mutex_unlock(&sci_uart_registration_lock);
3138
3139 ret = sci_init_single(dev, sciport, index, p, false);
3140 if (ret)
3141 return ret;
3142
3143 sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
3144 if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
3145 return PTR_ERR(sciport->gpios);
3146
3147 if (sciport->has_rtscts) {
3148 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
3149 UART_GPIO_CTS)) ||
3150 !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
3151 UART_GPIO_RTS))) {
3152 dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
3153 return -EINVAL;
3154 }
3155 sciport->port.flags |= UPF_HARD_FLOW;
3156 }
3157
3158 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
3159 if (ret) {
3160 sci_cleanup_single(sciport);
3161 return ret;
3162 }
3163
3164 return 0;
3165}
3166
3167static int sci_probe(struct platform_device *dev)
3168{
3169 struct plat_sci_port *p;
3170 struct sci_port *sp;
3171 unsigned int dev_id;
3172 int ret;
3173
3174 /*
3175 * If we've come here via earlyprintk initialization, head off to
3176 * the special early probe. We don't have sufficient device state
3177 * to make it beyond this yet.
3178 */
3179 if (is_early_platform_device(dev))
3180 return sci_probe_earlyprintk(dev);
3181
3182 if (dev->dev.of_node) {
3183 p = sci_parse_dt(dev, &dev_id);
3184 if (p == NULL)
3185 return -EINVAL;
3186 } else {
3187 p = dev->dev.platform_data;
3188 if (p == NULL) {
3189 dev_err(&dev->dev, "no platform data supplied\n");
3190 return -EINVAL;
3191 }
3192
3193 dev_id = dev->id;
3194 }
3195
3196 sp = &sci_ports[dev_id];
3197 platform_set_drvdata(dev, sp);
3198
3199 ret = sci_probe_single(dev, dev_id, p, sp);
3200 if (ret)
3201 return ret;
3202
3203 if (sp->port.fifosize > 1) {
3204 ret = sysfs_create_file(&dev->dev.kobj,
3205 &dev_attr_rx_fifo_trigger.attr);
3206 if (ret)
3207 return ret;
3208 }
3209 if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB) {
3210 ret = sysfs_create_file(&dev->dev.kobj,
3211 &dev_attr_rx_fifo_timeout.attr);
3212 if (ret) {
3213 if (sp->port.fifosize > 1) {
3214 sysfs_remove_file(&dev->dev.kobj,
3215 &dev_attr_rx_fifo_trigger.attr);
3216 }
3217 return ret;
3218 }
3219 }
3220
3221#ifdef CONFIG_SH_STANDARD_BIOS
3222 sh_bios_gdb_detach();
3223#endif
3224
3225 return 0;
3226}
3227
3228static __maybe_unused int sci_suspend(struct device *dev)
3229{
3230 struct sci_port *sport = dev_get_drvdata(dev);
3231
3232 if (sport)
3233 uart_suspend_port(&sci_uart_driver, &sport->port);
3234
3235 return 0;
3236}
3237
3238static __maybe_unused int sci_resume(struct device *dev)
3239{
3240 struct sci_port *sport = dev_get_drvdata(dev);
3241
3242 if (sport)
3243 uart_resume_port(&sci_uart_driver, &sport->port);
3244
3245 return 0;
3246}
3247
3248static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
3249
3250static struct platform_driver sci_driver = {
3251 .probe = sci_probe,
3252 .remove = sci_remove,
3253 .driver = {
3254 .name = "sh-sci",
3255 .pm = &sci_dev_pm_ops,
3256 .of_match_table = of_match_ptr(of_sci_match),
3257 },
3258};
3259
3260static int __init sci_init(void)
3261{
3262 pr_info("%s\n", banner);
3263
3264 return platform_driver_register(&sci_driver);
3265}
3266
3267static void __exit sci_exit(void)
3268{
3269 platform_driver_unregister(&sci_driver);
3270
3271 if (sci_uart_driver.state)
3272 uart_unregister_driver(&sci_uart_driver);
3273}
3274
3275#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
3276early_platform_init_buffer("earlyprintk", &sci_driver,
3277 early_serial_buf, ARRAY_SIZE(early_serial_buf));
3278#endif
3279#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
3280static struct __init plat_sci_port port_cfg;
3281
3282static int __init early_console_setup(struct earlycon_device *device,
3283 int type)
3284{
3285 if (!device->port.membase)
3286 return -ENODEV;
3287
3288 device->port.serial_in = sci_serial_in;
3289 device->port.serial_out = sci_serial_out;
3290 device->port.type = type;
3291 memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
3292 port_cfg.type = type;
3293 sci_ports[0].cfg = &port_cfg;
3294 sci_ports[0].params = sci_probe_regmap(&port_cfg);
3295 port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
3296 sci_serial_out(&sci_ports[0].port, SCSCR,
3297 SCSCR_RE | SCSCR_TE | port_cfg.scscr);
3298
3299 device->con->write = serial_console_write;
3300 return 0;
3301}
3302static int __init sci_early_console_setup(struct earlycon_device *device,
3303 const char *opt)
3304{
3305 return early_console_setup(device, PORT_SCI);
3306}
3307static int __init scif_early_console_setup(struct earlycon_device *device,
3308 const char *opt)
3309{
3310 return early_console_setup(device, PORT_SCIF);
3311}
3312static int __init scifa_early_console_setup(struct earlycon_device *device,
3313 const char *opt)
3314{
3315 return early_console_setup(device, PORT_SCIFA);
3316}
3317static int __init scifb_early_console_setup(struct earlycon_device *device,
3318 const char *opt)
3319{
3320 return early_console_setup(device, PORT_SCIFB);
3321}
3322static int __init hscif_early_console_setup(struct earlycon_device *device,
3323 const char *opt)
3324{
3325 return early_console_setup(device, PORT_HSCIF);
3326}
3327
3328OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
3329OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
3330OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
3331OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
3332OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
3333#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
3334
3335module_init(sci_init);
3336module_exit(sci_exit);
3337
3338MODULE_LICENSE("GPL");
3339MODULE_ALIAS("platform:sh-sci");
3340MODULE_AUTHOR("Paul Mundt");
3341MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");