blob: 997910bec727957e69b35847945fb7016a0bf5dd [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017 ATMEL
4 * Copyright 2017 Free Electrons
5 *
6 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7 *
8 * Derived from the atmel_nand.c driver which contained the following
9 * copyrights:
10 *
11 * Copyright 2003 Rick Bronson
12 *
13 * Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
14 * Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
15 *
16 * Derived from drivers/mtd/spia.c (removed in v3.8)
17 * Copyright 2000 Steven J. Hill (sjhill@cotw.com)
18 *
19 *
20 * Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
21 * Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
22 *
23 * Derived from Das U-Boot source code
24 * (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
25 * Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
26 *
27 * Add Programmable Multibit ECC support for various AT91 SoC
28 * Copyright 2012 ATMEL, Hong Xu
29 *
30 * Add Nand Flash Controller support for SAMA5 SoC
31 * Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
32 *
33 * A few words about the naming convention in this file. This convention
34 * applies to structure and function names.
35 *
36 * Prefixes:
37 *
38 * - atmel_nand_: all generic structures/functions
39 * - atmel_smc_nand_: all structures/functions specific to the SMC interface
40 * (at91sam9 and avr32 SoCs)
41 * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
42 * (sama5 SoCs and later)
43 * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
44 * that is available in the HSMC block
45 * - <soc>_nand_: all SoC specific structures/functions
46 */
47
48#include <linux/clk.h>
49#include <linux/dma-mapping.h>
50#include <linux/dmaengine.h>
51#include <linux/genalloc.h>
52#include <linux/gpio/consumer.h>
53#include <linux/interrupt.h>
54#include <linux/mfd/syscon.h>
55#include <linux/mfd/syscon/atmel-matrix.h>
56#include <linux/mfd/syscon/atmel-smc.h>
57#include <linux/module.h>
58#include <linux/mtd/rawnand.h>
59#include <linux/of_address.h>
60#include <linux/of_irq.h>
61#include <linux/of_platform.h>
62#include <linux/iopoll.h>
63#include <linux/platform_device.h>
64#include <linux/regmap.h>
65#include <soc/at91/atmel-sfr.h>
66
67#include "pmecc.h"
68
69#define ATMEL_HSMC_NFC_CFG 0x0
70#define ATMEL_HSMC_NFC_CFG_SPARESIZE(x) (((x) / 4) << 24)
71#define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK GENMASK(30, 24)
72#define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul) (((cyc) << 16) | ((mul) << 20))
73#define ATMEL_HSMC_NFC_CFG_DTO_MAX GENMASK(22, 16)
74#define ATMEL_HSMC_NFC_CFG_RBEDGE BIT(13)
75#define ATMEL_HSMC_NFC_CFG_FALLING_EDGE BIT(12)
76#define ATMEL_HSMC_NFC_CFG_RSPARE BIT(9)
77#define ATMEL_HSMC_NFC_CFG_WSPARE BIT(8)
78#define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK GENMASK(2, 0)
79#define ATMEL_HSMC_NFC_CFG_PAGESIZE(x) (fls((x) / 512) - 1)
80
81#define ATMEL_HSMC_NFC_CTRL 0x4
82#define ATMEL_HSMC_NFC_CTRL_EN BIT(0)
83#define ATMEL_HSMC_NFC_CTRL_DIS BIT(1)
84
85#define ATMEL_HSMC_NFC_SR 0x8
86#define ATMEL_HSMC_NFC_IER 0xc
87#define ATMEL_HSMC_NFC_IDR 0x10
88#define ATMEL_HSMC_NFC_IMR 0x14
89#define ATMEL_HSMC_NFC_SR_ENABLED BIT(1)
90#define ATMEL_HSMC_NFC_SR_RB_RISE BIT(4)
91#define ATMEL_HSMC_NFC_SR_RB_FALL BIT(5)
92#define ATMEL_HSMC_NFC_SR_BUSY BIT(8)
93#define ATMEL_HSMC_NFC_SR_WR BIT(11)
94#define ATMEL_HSMC_NFC_SR_CSID GENMASK(14, 12)
95#define ATMEL_HSMC_NFC_SR_XFRDONE BIT(16)
96#define ATMEL_HSMC_NFC_SR_CMDDONE BIT(17)
97#define ATMEL_HSMC_NFC_SR_DTOE BIT(20)
98#define ATMEL_HSMC_NFC_SR_UNDEF BIT(21)
99#define ATMEL_HSMC_NFC_SR_AWB BIT(22)
100#define ATMEL_HSMC_NFC_SR_NFCASE BIT(23)
101#define ATMEL_HSMC_NFC_SR_ERRORS (ATMEL_HSMC_NFC_SR_DTOE | \
102 ATMEL_HSMC_NFC_SR_UNDEF | \
103 ATMEL_HSMC_NFC_SR_AWB | \
104 ATMEL_HSMC_NFC_SR_NFCASE)
105#define ATMEL_HSMC_NFC_SR_RBEDGE(x) BIT((x) + 24)
106
107#define ATMEL_HSMC_NFC_ADDR 0x18
108#define ATMEL_HSMC_NFC_BANK 0x1c
109
110#define ATMEL_NFC_MAX_RB_ID 7
111
112#define ATMEL_NFC_SRAM_SIZE 0x2400
113
114#define ATMEL_NFC_CMD(pos, cmd) ((cmd) << (((pos) * 8) + 2))
115#define ATMEL_NFC_VCMD2 BIT(18)
116#define ATMEL_NFC_ACYCLE(naddrs) ((naddrs) << 19)
117#define ATMEL_NFC_CSID(cs) ((cs) << 22)
118#define ATMEL_NFC_DATAEN BIT(25)
119#define ATMEL_NFC_NFCWR BIT(26)
120
121#define ATMEL_NFC_MAX_ADDR_CYCLES 5
122
123#define ATMEL_NAND_ALE_OFFSET BIT(21)
124#define ATMEL_NAND_CLE_OFFSET BIT(22)
125
126#define DEFAULT_TIMEOUT_MS 1000
127#define MIN_DMA_LEN 128
128
129static bool atmel_nand_avoid_dma __read_mostly;
130
131MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
132module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
133
134enum atmel_nand_rb_type {
135 ATMEL_NAND_NO_RB,
136 ATMEL_NAND_NATIVE_RB,
137 ATMEL_NAND_GPIO_RB,
138};
139
140struct atmel_nand_rb {
141 enum atmel_nand_rb_type type;
142 union {
143 struct gpio_desc *gpio;
144 int id;
145 };
146};
147
148struct atmel_nand_cs {
149 int id;
150 struct atmel_nand_rb rb;
151 struct gpio_desc *csgpio;
152 struct {
153 void __iomem *virt;
154 dma_addr_t dma;
155 } io;
156
157 struct atmel_smc_cs_conf smcconf;
158};
159
160struct atmel_nand {
161 struct list_head node;
162 struct device *dev;
163 struct nand_chip base;
164 struct atmel_nand_cs *activecs;
165 struct atmel_pmecc_user *pmecc;
166 struct gpio_desc *cdgpio;
167 int numcs;
168 struct atmel_nand_cs cs[];
169};
170
171static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
172{
173 return container_of(chip, struct atmel_nand, base);
174}
175
176enum atmel_nfc_data_xfer {
177 ATMEL_NFC_NO_DATA,
178 ATMEL_NFC_READ_DATA,
179 ATMEL_NFC_WRITE_DATA,
180};
181
182struct atmel_nfc_op {
183 u8 cs;
184 u8 ncmds;
185 u8 cmds[2];
186 u8 naddrs;
187 u8 addrs[5];
188 enum atmel_nfc_data_xfer data;
189 u32 wait;
190 u32 errors;
191};
192
193struct atmel_nand_controller;
194struct atmel_nand_controller_caps;
195
196struct atmel_nand_controller_ops {
197 int (*probe)(struct platform_device *pdev,
198 const struct atmel_nand_controller_caps *caps);
199 int (*remove)(struct atmel_nand_controller *nc);
200 void (*nand_init)(struct atmel_nand_controller *nc,
201 struct atmel_nand *nand);
202 int (*ecc_init)(struct nand_chip *chip);
203 int (*setup_data_interface)(struct atmel_nand *nand, int csline,
204 const struct nand_data_interface *conf);
205};
206
207struct atmel_nand_controller_caps {
208 bool has_dma;
209 bool legacy_of_bindings;
210 u32 ale_offs;
211 u32 cle_offs;
212 const char *ebi_csa_regmap_name;
213 const struct atmel_nand_controller_ops *ops;
214};
215
216struct atmel_nand_controller {
217 struct nand_controller base;
218 const struct atmel_nand_controller_caps *caps;
219 struct device *dev;
220 struct regmap *smc;
221 struct dma_chan *dmac;
222 struct atmel_pmecc *pmecc;
223 struct list_head chips;
224 struct clk *mck;
225};
226
227static inline struct atmel_nand_controller *
228to_nand_controller(struct nand_controller *ctl)
229{
230 return container_of(ctl, struct atmel_nand_controller, base);
231}
232
233struct atmel_smc_nand_ebi_csa_cfg {
234 u32 offs;
235 u32 nfd0_on_d16;
236};
237
238struct atmel_smc_nand_controller {
239 struct atmel_nand_controller base;
240 struct regmap *ebi_csa_regmap;
241 struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
242};
243
244static inline struct atmel_smc_nand_controller *
245to_smc_nand_controller(struct nand_controller *ctl)
246{
247 return container_of(to_nand_controller(ctl),
248 struct atmel_smc_nand_controller, base);
249}
250
251struct atmel_hsmc_nand_controller {
252 struct atmel_nand_controller base;
253 struct {
254 struct gen_pool *pool;
255 void __iomem *virt;
256 dma_addr_t dma;
257 } sram;
258 const struct atmel_hsmc_reg_layout *hsmc_layout;
259 struct regmap *io;
260 struct atmel_nfc_op op;
261 struct completion complete;
262 int irq;
263
264 /* Only used when instantiating from legacy DT bindings. */
265 struct clk *clk;
266};
267
268static inline struct atmel_hsmc_nand_controller *
269to_hsmc_nand_controller(struct nand_controller *ctl)
270{
271 return container_of(to_nand_controller(ctl),
272 struct atmel_hsmc_nand_controller, base);
273}
274
275static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
276{
277 op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
278 op->wait ^= status & op->wait;
279
280 return !op->wait || op->errors;
281}
282
283static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
284{
285 struct atmel_hsmc_nand_controller *nc = data;
286 u32 sr, rcvd;
287 bool done;
288
289 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
290
291 rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
292 done = atmel_nfc_op_done(&nc->op, sr);
293
294 if (rcvd)
295 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
296
297 if (done)
298 complete(&nc->complete);
299
300 return rcvd ? IRQ_HANDLED : IRQ_NONE;
301}
302
303static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
304 unsigned int timeout_ms)
305{
306 int ret;
307
308 if (!timeout_ms)
309 timeout_ms = DEFAULT_TIMEOUT_MS;
310
311 if (poll) {
312 u32 status;
313
314 ret = regmap_read_poll_timeout(nc->base.smc,
315 ATMEL_HSMC_NFC_SR, status,
316 atmel_nfc_op_done(&nc->op,
317 status),
318 0, timeout_ms * 1000);
319 } else {
320 init_completion(&nc->complete);
321 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
322 nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
323 ret = wait_for_completion_timeout(&nc->complete,
324 msecs_to_jiffies(timeout_ms));
325 if (!ret)
326 ret = -ETIMEDOUT;
327 else
328 ret = 0;
329
330 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
331 }
332
333 if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
334 dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
335 ret = -ETIMEDOUT;
336 }
337
338 if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
339 dev_err(nc->base.dev, "Access to an undefined area\n");
340 ret = -EIO;
341 }
342
343 if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
344 dev_err(nc->base.dev, "Access while busy\n");
345 ret = -EIO;
346 }
347
348 if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
349 dev_err(nc->base.dev, "Wrong access size\n");
350 ret = -EIO;
351 }
352
353 return ret;
354}
355
356static void atmel_nand_dma_transfer_finished(void *data)
357{
358 struct completion *finished = data;
359
360 complete(finished);
361}
362
363static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
364 void *buf, dma_addr_t dev_dma, size_t len,
365 enum dma_data_direction dir)
366{
367 DECLARE_COMPLETION_ONSTACK(finished);
368 dma_addr_t src_dma, dst_dma, buf_dma;
369 struct dma_async_tx_descriptor *tx;
370 dma_cookie_t cookie;
371
372 buf_dma = dma_map_single(nc->dev, buf, len, dir);
373 if (dma_mapping_error(nc->dev, dev_dma)) {
374 dev_err(nc->dev,
375 "Failed to prepare a buffer for DMA access\n");
376 goto err;
377 }
378
379 if (dir == DMA_FROM_DEVICE) {
380 src_dma = dev_dma;
381 dst_dma = buf_dma;
382 } else {
383 src_dma = buf_dma;
384 dst_dma = dev_dma;
385 }
386
387 tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
388 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
389 if (!tx) {
390 dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
391 goto err_unmap;
392 }
393
394 tx->callback = atmel_nand_dma_transfer_finished;
395 tx->callback_param = &finished;
396
397 cookie = dmaengine_submit(tx);
398 if (dma_submit_error(cookie)) {
399 dev_err(nc->dev, "Failed to do DMA tx_submit\n");
400 goto err_unmap;
401 }
402
403 dma_async_issue_pending(nc->dmac);
404 wait_for_completion(&finished);
405 dma_unmap_single(nc->dev, buf_dma, len, dir);
406
407 return 0;
408
409err_unmap:
410 dma_unmap_single(nc->dev, buf_dma, len, dir);
411
412err:
413 dev_dbg(nc->dev, "Fall back to CPU I/O\n");
414
415 return -EIO;
416}
417
418static u8 atmel_nand_read_byte(struct nand_chip *chip)
419{
420 struct atmel_nand *nand = to_atmel_nand(chip);
421
422 return ioread8(nand->activecs->io.virt);
423}
424
425static void atmel_nand_write_byte(struct nand_chip *chip, u8 byte)
426{
427 struct atmel_nand *nand = to_atmel_nand(chip);
428
429 if (chip->options & NAND_BUSWIDTH_16)
430 iowrite16(byte | (byte << 8), nand->activecs->io.virt);
431 else
432 iowrite8(byte, nand->activecs->io.virt);
433}
434
435static void atmel_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
436{
437 struct atmel_nand *nand = to_atmel_nand(chip);
438 struct atmel_nand_controller *nc;
439
440 nc = to_nand_controller(chip->controller);
441
442 /*
443 * If the controller supports DMA, the buffer address is DMA-able and
444 * len is long enough to make DMA transfers profitable, let's trigger
445 * a DMA transfer. If it fails, fallback to PIO mode.
446 */
447 if (nc->dmac && virt_addr_valid(buf) &&
448 len >= MIN_DMA_LEN &&
449 !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
450 DMA_FROM_DEVICE))
451 return;
452
453 if (chip->options & NAND_BUSWIDTH_16)
454 ioread16_rep(nand->activecs->io.virt, buf, len / 2);
455 else
456 ioread8_rep(nand->activecs->io.virt, buf, len);
457}
458
459static void atmel_nand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
460{
461 struct atmel_nand *nand = to_atmel_nand(chip);
462 struct atmel_nand_controller *nc;
463
464 nc = to_nand_controller(chip->controller);
465
466 /*
467 * If the controller supports DMA, the buffer address is DMA-able and
468 * len is long enough to make DMA transfers profitable, let's trigger
469 * a DMA transfer. If it fails, fallback to PIO mode.
470 */
471 if (nc->dmac && virt_addr_valid(buf) &&
472 len >= MIN_DMA_LEN &&
473 !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
474 len, DMA_TO_DEVICE))
475 return;
476
477 if (chip->options & NAND_BUSWIDTH_16)
478 iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
479 else
480 iowrite8_rep(nand->activecs->io.virt, buf, len);
481}
482
483static int atmel_nand_dev_ready(struct nand_chip *chip)
484{
485 struct atmel_nand *nand = to_atmel_nand(chip);
486
487 return gpiod_get_value(nand->activecs->rb.gpio);
488}
489
490static void atmel_nand_select_chip(struct nand_chip *chip, int cs)
491{
492 struct atmel_nand *nand = to_atmel_nand(chip);
493
494 if (cs < 0 || cs >= nand->numcs) {
495 nand->activecs = NULL;
496 chip->legacy.dev_ready = NULL;
497 return;
498 }
499
500 nand->activecs = &nand->cs[cs];
501
502 if (nand->activecs->rb.type == ATMEL_NAND_GPIO_RB)
503 chip->legacy.dev_ready = atmel_nand_dev_ready;
504}
505
506static int atmel_hsmc_nand_dev_ready(struct nand_chip *chip)
507{
508 struct atmel_nand *nand = to_atmel_nand(chip);
509 struct atmel_hsmc_nand_controller *nc;
510 u32 status;
511
512 nc = to_hsmc_nand_controller(chip->controller);
513
514 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status);
515
516 return status & ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
517}
518
519static void atmel_hsmc_nand_select_chip(struct nand_chip *chip, int cs)
520{
521 struct mtd_info *mtd = nand_to_mtd(chip);
522 struct atmel_nand *nand = to_atmel_nand(chip);
523 struct atmel_hsmc_nand_controller *nc;
524
525 nc = to_hsmc_nand_controller(chip->controller);
526
527 atmel_nand_select_chip(chip, cs);
528
529 if (!nand->activecs) {
530 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
531 ATMEL_HSMC_NFC_CTRL_DIS);
532 return;
533 }
534
535 if (nand->activecs->rb.type == ATMEL_NAND_NATIVE_RB)
536 chip->legacy.dev_ready = atmel_hsmc_nand_dev_ready;
537
538 regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
539 ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
540 ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
541 ATMEL_HSMC_NFC_CFG_RSPARE |
542 ATMEL_HSMC_NFC_CFG_WSPARE,
543 ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
544 ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
545 ATMEL_HSMC_NFC_CFG_RSPARE);
546 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
547 ATMEL_HSMC_NFC_CTRL_EN);
548}
549
550static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
551{
552 u8 *addrs = nc->op.addrs;
553 unsigned int op = 0;
554 u32 addr, val;
555 int i, ret;
556
557 nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
558
559 for (i = 0; i < nc->op.ncmds; i++)
560 op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
561
562 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
563 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
564
565 op |= ATMEL_NFC_CSID(nc->op.cs) |
566 ATMEL_NFC_ACYCLE(nc->op.naddrs);
567
568 if (nc->op.ncmds > 1)
569 op |= ATMEL_NFC_VCMD2;
570
571 addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
572 (addrs[3] << 24);
573
574 if (nc->op.data != ATMEL_NFC_NO_DATA) {
575 op |= ATMEL_NFC_DATAEN;
576 nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
577
578 if (nc->op.data == ATMEL_NFC_WRITE_DATA)
579 op |= ATMEL_NFC_NFCWR;
580 }
581
582 /* Clear all flags. */
583 regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
584
585 /* Send the command. */
586 regmap_write(nc->io, op, addr);
587
588 ret = atmel_nfc_wait(nc, poll, 0);
589 if (ret)
590 dev_err(nc->base.dev,
591 "Failed to send NAND command (err = %d)!",
592 ret);
593
594 /* Reset the op state. */
595 memset(&nc->op, 0, sizeof(nc->op));
596
597 return ret;
598}
599
600static void atmel_hsmc_nand_cmd_ctrl(struct nand_chip *chip, int dat,
601 unsigned int ctrl)
602{
603 struct atmel_nand *nand = to_atmel_nand(chip);
604 struct atmel_hsmc_nand_controller *nc;
605
606 nc = to_hsmc_nand_controller(chip->controller);
607
608 if (ctrl & NAND_ALE) {
609 if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
610 return;
611
612 nc->op.addrs[nc->op.naddrs++] = dat;
613 } else if (ctrl & NAND_CLE) {
614 if (nc->op.ncmds > 1)
615 return;
616
617 nc->op.cmds[nc->op.ncmds++] = dat;
618 }
619
620 if (dat == NAND_CMD_NONE) {
621 nc->op.cs = nand->activecs->id;
622 atmel_nfc_exec_op(nc, true);
623 }
624}
625
626static void atmel_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
627 unsigned int ctrl)
628{
629 struct atmel_nand *nand = to_atmel_nand(chip);
630 struct atmel_nand_controller *nc;
631
632 nc = to_nand_controller(chip->controller);
633
634 if ((ctrl & NAND_CTRL_CHANGE) && nand->activecs->csgpio) {
635 if (ctrl & NAND_NCE)
636 gpiod_set_value(nand->activecs->csgpio, 0);
637 else
638 gpiod_set_value(nand->activecs->csgpio, 1);
639 }
640
641 if (ctrl & NAND_ALE)
642 writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs);
643 else if (ctrl & NAND_CLE)
644 writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs);
645}
646
647static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
648 bool oob_required)
649{
650 struct mtd_info *mtd = nand_to_mtd(chip);
651 struct atmel_hsmc_nand_controller *nc;
652 int ret = -EIO;
653
654 nc = to_hsmc_nand_controller(chip->controller);
655
656 if (nc->base.dmac)
657 ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
658 nc->sram.dma, mtd->writesize,
659 DMA_TO_DEVICE);
660
661 /* Falling back to CPU copy. */
662 if (ret)
663 memcpy_toio(nc->sram.virt, buf, mtd->writesize);
664
665 if (oob_required)
666 memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
667 mtd->oobsize);
668}
669
670static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
671 bool oob_required)
672{
673 struct mtd_info *mtd = nand_to_mtd(chip);
674 struct atmel_hsmc_nand_controller *nc;
675 int ret = -EIO;
676
677 nc = to_hsmc_nand_controller(chip->controller);
678
679 if (nc->base.dmac)
680 ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
681 mtd->writesize, DMA_FROM_DEVICE);
682
683 /* Falling back to CPU copy. */
684 if (ret)
685 memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
686
687 if (oob_required)
688 memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
689 mtd->oobsize);
690}
691
692static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
693{
694 struct mtd_info *mtd = nand_to_mtd(chip);
695 struct atmel_hsmc_nand_controller *nc;
696
697 nc = to_hsmc_nand_controller(chip->controller);
698
699 if (column >= 0) {
700 nc->op.addrs[nc->op.naddrs++] = column;
701
702 /*
703 * 2 address cycles for the column offset on large page NANDs.
704 */
705 if (mtd->writesize > 512)
706 nc->op.addrs[nc->op.naddrs++] = column >> 8;
707 }
708
709 if (page >= 0) {
710 nc->op.addrs[nc->op.naddrs++] = page;
711 nc->op.addrs[nc->op.naddrs++] = page >> 8;
712
713 if (chip->options & NAND_ROW_ADDR_3)
714 nc->op.addrs[nc->op.naddrs++] = page >> 16;
715 }
716}
717
718static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
719{
720 struct atmel_nand *nand = to_atmel_nand(chip);
721 struct atmel_nand_controller *nc;
722 int ret;
723
724 nc = to_nand_controller(chip->controller);
725
726 if (raw)
727 return 0;
728
729 ret = atmel_pmecc_enable(nand->pmecc, op);
730 if (ret)
731 dev_err(nc->dev,
732 "Failed to enable ECC engine (err = %d)\n", ret);
733
734 return ret;
735}
736
737static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
738{
739 struct atmel_nand *nand = to_atmel_nand(chip);
740
741 if (!raw)
742 atmel_pmecc_disable(nand->pmecc);
743}
744
745static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
746{
747 struct atmel_nand *nand = to_atmel_nand(chip);
748 struct mtd_info *mtd = nand_to_mtd(chip);
749 struct atmel_nand_controller *nc;
750 struct mtd_oob_region oobregion;
751 void *eccbuf;
752 int ret, i;
753
754 nc = to_nand_controller(chip->controller);
755
756 if (raw)
757 return 0;
758
759 ret = atmel_pmecc_wait_rdy(nand->pmecc);
760 if (ret) {
761 dev_err(nc->dev,
762 "Failed to transfer NAND page data (err = %d)\n",
763 ret);
764 return ret;
765 }
766
767 mtd_ooblayout_ecc(mtd, 0, &oobregion);
768 eccbuf = chip->oob_poi + oobregion.offset;
769
770 for (i = 0; i < chip->ecc.steps; i++) {
771 atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
772 eccbuf);
773 eccbuf += chip->ecc.bytes;
774 }
775
776 return 0;
777}
778
779static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
780 bool raw)
781{
782 struct atmel_nand *nand = to_atmel_nand(chip);
783 struct mtd_info *mtd = nand_to_mtd(chip);
784 struct atmel_nand_controller *nc;
785 struct mtd_oob_region oobregion;
786 int ret, i, max_bitflips = 0;
787 void *databuf, *eccbuf;
788
789 nc = to_nand_controller(chip->controller);
790
791 if (raw)
792 return 0;
793
794 ret = atmel_pmecc_wait_rdy(nand->pmecc);
795 if (ret) {
796 dev_err(nc->dev,
797 "Failed to read NAND page data (err = %d)\n",
798 ret);
799 return ret;
800 }
801
802 mtd_ooblayout_ecc(mtd, 0, &oobregion);
803 eccbuf = chip->oob_poi + oobregion.offset;
804 databuf = buf;
805
806 for (i = 0; i < chip->ecc.steps; i++) {
807 ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
808 eccbuf);
809 if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
810 ret = nand_check_erased_ecc_chunk(databuf,
811 chip->ecc.size,
812 eccbuf,
813 chip->ecc.bytes,
814 NULL, 0,
815 chip->ecc.strength);
816
817 if (ret >= 0) {
818 mtd->ecc_stats.corrected += ret;
819 max_bitflips = max(ret, max_bitflips);
820 } else {
821 mtd->ecc_stats.failed++;
822 }
823
824 databuf += chip->ecc.size;
825 eccbuf += chip->ecc.bytes;
826 }
827
828 return max_bitflips;
829}
830
831static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
832 bool oob_required, int page, bool raw)
833{
834 struct mtd_info *mtd = nand_to_mtd(chip);
835 struct atmel_nand *nand = to_atmel_nand(chip);
836 int ret;
837
838 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
839
840 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
841 if (ret)
842 return ret;
843
844 atmel_nand_write_buf(chip, buf, mtd->writesize);
845
846 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
847 if (ret) {
848 atmel_pmecc_disable(nand->pmecc);
849 return ret;
850 }
851
852 atmel_nand_pmecc_disable(chip, raw);
853
854 atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
855
856 return nand_prog_page_end_op(chip);
857}
858
859static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
860 int oob_required, int page)
861{
862 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
863}
864
865static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
866 const u8 *buf, int oob_required,
867 int page)
868{
869 return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
870}
871
872static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
873 bool oob_required, int page, bool raw)
874{
875 struct mtd_info *mtd = nand_to_mtd(chip);
876 int ret;
877
878 nand_read_page_op(chip, page, 0, NULL, 0);
879
880 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
881 if (ret)
882 return ret;
883
884 atmel_nand_read_buf(chip, buf, mtd->writesize);
885 atmel_nand_read_buf(chip, chip->oob_poi, mtd->oobsize);
886
887 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
888
889 atmel_nand_pmecc_disable(chip, raw);
890
891 return ret;
892}
893
894static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
895 int oob_required, int page)
896{
897 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
898}
899
900static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
901 int oob_required, int page)
902{
903 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
904}
905
906static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
907 const u8 *buf, bool oob_required,
908 int page, bool raw)
909{
910 struct mtd_info *mtd = nand_to_mtd(chip);
911 struct atmel_nand *nand = to_atmel_nand(chip);
912 struct atmel_hsmc_nand_controller *nc;
913 int ret, status;
914
915 nc = to_hsmc_nand_controller(chip->controller);
916
917 atmel_nfc_copy_to_sram(chip, buf, false);
918
919 nc->op.cmds[0] = NAND_CMD_SEQIN;
920 nc->op.ncmds = 1;
921 atmel_nfc_set_op_addr(chip, page, 0x0);
922 nc->op.cs = nand->activecs->id;
923 nc->op.data = ATMEL_NFC_WRITE_DATA;
924
925 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
926 if (ret)
927 return ret;
928
929 ret = atmel_nfc_exec_op(nc, false);
930 if (ret) {
931 atmel_nand_pmecc_disable(chip, raw);
932 dev_err(nc->base.dev,
933 "Failed to transfer NAND page data (err = %d)\n",
934 ret);
935 return ret;
936 }
937
938 ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
939
940 atmel_nand_pmecc_disable(chip, raw);
941
942 if (ret)
943 return ret;
944
945 atmel_nand_write_buf(chip, chip->oob_poi, mtd->oobsize);
946
947 nc->op.cmds[0] = NAND_CMD_PAGEPROG;
948 nc->op.ncmds = 1;
949 nc->op.cs = nand->activecs->id;
950 ret = atmel_nfc_exec_op(nc, false);
951 if (ret)
952 dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n",
953 ret);
954
955 status = chip->legacy.waitfunc(chip);
956 if (status & NAND_STATUS_FAIL)
957 return -EIO;
958
959 return ret;
960}
961
962static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
963 const u8 *buf, int oob_required,
964 int page)
965{
966 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
967 false);
968}
969
970static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
971 const u8 *buf,
972 int oob_required, int page)
973{
974 return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
975 true);
976}
977
978static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
979 bool oob_required, int page,
980 bool raw)
981{
982 struct mtd_info *mtd = nand_to_mtd(chip);
983 struct atmel_nand *nand = to_atmel_nand(chip);
984 struct atmel_hsmc_nand_controller *nc;
985 int ret;
986
987 nc = to_hsmc_nand_controller(chip->controller);
988
989 /*
990 * Optimized read page accessors only work when the NAND R/B pin is
991 * connected to a native SoC R/B pin. If that's not the case, fallback
992 * to the non-optimized one.
993 */
994 if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
995 nand_read_page_op(chip, page, 0, NULL, 0);
996
997 return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
998 raw);
999 }
1000
1001 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1002
1003 if (mtd->writesize > 512)
1004 nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1005
1006 atmel_nfc_set_op_addr(chip, page, 0x0);
1007 nc->op.cs = nand->activecs->id;
1008 nc->op.data = ATMEL_NFC_READ_DATA;
1009
1010 ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1011 if (ret)
1012 return ret;
1013
1014 ret = atmel_nfc_exec_op(nc, false);
1015 if (ret) {
1016 atmel_nand_pmecc_disable(chip, raw);
1017 dev_err(nc->base.dev,
1018 "Failed to load NAND page data (err = %d)\n",
1019 ret);
1020 return ret;
1021 }
1022
1023 atmel_nfc_copy_from_sram(chip, buf, true);
1024
1025 ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1026
1027 atmel_nand_pmecc_disable(chip, raw);
1028
1029 return ret;
1030}
1031
1032static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
1033 int oob_required, int page)
1034{
1035 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1036 false);
1037}
1038
1039static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
1040 u8 *buf, int oob_required,
1041 int page)
1042{
1043 return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1044 true);
1045}
1046
1047static int atmel_nand_pmecc_init(struct nand_chip *chip)
1048{
1049 struct mtd_info *mtd = nand_to_mtd(chip);
1050 struct atmel_nand *nand = to_atmel_nand(chip);
1051 struct atmel_nand_controller *nc;
1052 struct atmel_pmecc_user_req req;
1053
1054 nc = to_nand_controller(chip->controller);
1055
1056 if (!nc->pmecc) {
1057 dev_err(nc->dev, "HW ECC not supported\n");
1058 return -ENOTSUPP;
1059 }
1060
1061 if (nc->caps->legacy_of_bindings) {
1062 u32 val;
1063
1064 if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1065 &val))
1066 chip->ecc.strength = val;
1067
1068 if (!of_property_read_u32(nc->dev->of_node,
1069 "atmel,pmecc-sector-size",
1070 &val))
1071 chip->ecc.size = val;
1072 }
1073
1074 if (chip->ecc.options & NAND_ECC_MAXIMIZE)
1075 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1076 else if (chip->ecc.strength)
1077 req.ecc.strength = chip->ecc.strength;
1078 else if (chip->base.eccreq.strength)
1079 req.ecc.strength = chip->base.eccreq.strength;
1080 else
1081 req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1082
1083 if (chip->ecc.size)
1084 req.ecc.sectorsize = chip->ecc.size;
1085 else if (chip->base.eccreq.step_size)
1086 req.ecc.sectorsize = chip->base.eccreq.step_size;
1087 else
1088 req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1089
1090 req.pagesize = mtd->writesize;
1091 req.oobsize = mtd->oobsize;
1092
1093 if (mtd->writesize <= 512) {
1094 req.ecc.bytes = 4;
1095 req.ecc.ooboffset = 0;
1096 } else {
1097 req.ecc.bytes = mtd->oobsize - 2;
1098 req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1099 }
1100
1101 nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1102 if (IS_ERR(nand->pmecc))
1103 return PTR_ERR(nand->pmecc);
1104
1105 chip->ecc.algo = NAND_ECC_BCH;
1106 chip->ecc.size = req.ecc.sectorsize;
1107 chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1108 chip->ecc.strength = req.ecc.strength;
1109
1110 chip->options |= NAND_NO_SUBPAGE_WRITE;
1111
1112 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1113
1114 return 0;
1115}
1116
1117static int atmel_nand_ecc_init(struct nand_chip *chip)
1118{
1119 struct atmel_nand_controller *nc;
1120 int ret;
1121
1122 nc = to_nand_controller(chip->controller);
1123
1124 switch (chip->ecc.mode) {
1125 case NAND_ECC_NONE:
1126 case NAND_ECC_SOFT:
1127 /*
1128 * Nothing to do, the core will initialize everything for us.
1129 */
1130 break;
1131
1132 case NAND_ECC_HW:
1133 ret = atmel_nand_pmecc_init(chip);
1134 if (ret)
1135 return ret;
1136
1137 chip->ecc.read_page = atmel_nand_pmecc_read_page;
1138 chip->ecc.write_page = atmel_nand_pmecc_write_page;
1139 chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1140 chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1141 break;
1142
1143 default:
1144 /* Other modes are not supported. */
1145 dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1146 chip->ecc.mode);
1147 return -ENOTSUPP;
1148 }
1149
1150 return 0;
1151}
1152
1153static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
1154{
1155 int ret;
1156
1157 ret = atmel_nand_ecc_init(chip);
1158 if (ret)
1159 return ret;
1160
1161 if (chip->ecc.mode != NAND_ECC_HW)
1162 return 0;
1163
1164 /* Adjust the ECC operations for the HSMC IP. */
1165 chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1166 chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1167 chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1168 chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1169
1170 return 0;
1171}
1172
1173static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1174 const struct nand_data_interface *conf,
1175 struct atmel_smc_cs_conf *smcconf)
1176{
1177 u32 ncycles, totalcycles, timeps, mckperiodps;
1178 struct atmel_nand_controller *nc;
1179 int ret;
1180
1181 nc = to_nand_controller(nand->base.controller);
1182
1183 /* DDR interface not supported. */
1184 if (conf->type != NAND_SDR_IFACE)
1185 return -ENOTSUPP;
1186
1187 /*
1188 * tRC < 30ns implies EDO mode. This controller does not support this
1189 * mode.
1190 */
1191 if (conf->timings.sdr.tRC_min < 30000)
1192 return -ENOTSUPP;
1193
1194 atmel_smc_cs_conf_init(smcconf);
1195
1196 mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1197 mckperiodps *= 1000;
1198
1199 /*
1200 * Set write pulse timing. This one is easy to extract:
1201 *
1202 * NWE_PULSE = tWP
1203 */
1204 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1205 totalcycles = ncycles;
1206 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1207 ncycles);
1208 if (ret)
1209 return ret;
1210
1211 /*
1212 * The write setup timing depends on the operation done on the NAND.
1213 * All operations goes through the same data bus, but the operation
1214 * type depends on the address we are writing to (ALE/CLE address
1215 * lines).
1216 * Since we have no way to differentiate the different operations at
1217 * the SMC level, we must consider the worst case (the biggest setup
1218 * time among all operation types):
1219 *
1220 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1221 */
1222 timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1223 conf->timings.sdr.tALS_min);
1224 timeps = max(timeps, conf->timings.sdr.tDS_min);
1225 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1226 ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1227 totalcycles += ncycles;
1228 ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1229 ncycles);
1230 if (ret)
1231 return ret;
1232
1233 /*
1234 * As for the write setup timing, the write hold timing depends on the
1235 * operation done on the NAND:
1236 *
1237 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1238 */
1239 timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1240 conf->timings.sdr.tALH_min);
1241 timeps = max3(timeps, conf->timings.sdr.tDH_min,
1242 conf->timings.sdr.tWH_min);
1243 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1244 totalcycles += ncycles;
1245
1246 /*
1247 * The write cycle timing is directly matching tWC, but is also
1248 * dependent on the other timings on the setup and hold timings we
1249 * calculated earlier, which gives:
1250 *
1251 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1252 */
1253 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1254 ncycles = max(totalcycles, ncycles);
1255 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1256 ncycles);
1257 if (ret)
1258 return ret;
1259
1260 /*
1261 * We don't want the CS line to be toggled between each byte/word
1262 * transfer to the NAND. The only way to guarantee that is to have the
1263 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1264 *
1265 * NCS_WR_PULSE = NWE_CYCLE
1266 */
1267 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1268 ncycles);
1269 if (ret)
1270 return ret;
1271
1272 /*
1273 * As for the write setup timing, the read hold timing depends on the
1274 * operation done on the NAND:
1275 *
1276 * NRD_HOLD = max(tREH, tRHOH)
1277 */
1278 timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1279 ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1280 totalcycles = ncycles;
1281
1282 /*
1283 * TDF = tRHZ - NRD_HOLD
1284 */
1285 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1286 ncycles -= totalcycles;
1287
1288 /*
1289 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1290 * we might end up with a config that does not fit in the TDF field.
1291 * Just take the max value in this case and hope that the NAND is more
1292 * tolerant than advertised.
1293 */
1294 if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1295 ncycles = ATMEL_SMC_MODE_TDF_MAX;
1296 else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1297 ncycles = ATMEL_SMC_MODE_TDF_MIN;
1298
1299 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1300 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1301
1302 /*
1303 * Read pulse timing directly matches tRP:
1304 *
1305 * NRD_PULSE = tRP
1306 */
1307 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1308 totalcycles += ncycles;
1309 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1310 ncycles);
1311 if (ret)
1312 return ret;
1313
1314 /*
1315 * The write cycle timing is directly matching tWC, but is also
1316 * dependent on the setup and hold timings we calculated earlier,
1317 * which gives:
1318 *
1319 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
1320 *
1321 * NRD_SETUP is always 0.
1322 */
1323 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1324 ncycles = max(totalcycles, ncycles);
1325 ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1326 ncycles);
1327 if (ret)
1328 return ret;
1329
1330 /*
1331 * We don't want the CS line to be toggled between each byte/word
1332 * transfer from the NAND. The only way to guarantee that is to have
1333 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1334 *
1335 * NCS_RD_PULSE = NRD_CYCLE
1336 */
1337 ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1338 ncycles);
1339 if (ret)
1340 return ret;
1341
1342 /* Txxx timings are directly matching tXXX ones. */
1343 ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1344 ret = atmel_smc_cs_conf_set_timing(smcconf,
1345 ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1346 ncycles);
1347 if (ret)
1348 return ret;
1349
1350 ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1351 ret = atmel_smc_cs_conf_set_timing(smcconf,
1352 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1353 ncycles);
1354 /*
1355 * Version 4 of the ONFI spec mandates that tADL be at least 400
1356 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1357 * fit in the tADL field of the SMC reg. We need to relax the check and
1358 * accept the -ERANGE return code.
1359 *
1360 * Note that previous versions of the ONFI spec had a lower tADL_min
1361 * (100 or 200 ns). It's not clear why this timing constraint got
1362 * increased but it seems most NANDs are fine with values lower than
1363 * 400ns, so we should be safe.
1364 */
1365 if (ret && ret != -ERANGE)
1366 return ret;
1367
1368 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1369 ret = atmel_smc_cs_conf_set_timing(smcconf,
1370 ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1371 ncycles);
1372 if (ret)
1373 return ret;
1374
1375 ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1376 ret = atmel_smc_cs_conf_set_timing(smcconf,
1377 ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1378 ncycles);
1379 if (ret)
1380 return ret;
1381
1382 ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1383 ret = atmel_smc_cs_conf_set_timing(smcconf,
1384 ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1385 ncycles);
1386 if (ret)
1387 return ret;
1388
1389 /* Attach the CS line to the NFC logic. */
1390 smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1391
1392 /* Set the appropriate data bus width. */
1393 if (nand->base.options & NAND_BUSWIDTH_16)
1394 smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1395
1396 /* Operate in NRD/NWE READ/WRITEMODE. */
1397 smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1398 ATMEL_SMC_MODE_WRITEMODE_NWE;
1399
1400 return 0;
1401}
1402
1403static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand,
1404 int csline,
1405 const struct nand_data_interface *conf)
1406{
1407 struct atmel_nand_controller *nc;
1408 struct atmel_smc_cs_conf smcconf;
1409 struct atmel_nand_cs *cs;
1410 int ret;
1411
1412 nc = to_nand_controller(nand->base.controller);
1413
1414 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1415 if (ret)
1416 return ret;
1417
1418 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1419 return 0;
1420
1421 cs = &nand->cs[csline];
1422 cs->smcconf = smcconf;
1423 atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1424
1425 return 0;
1426}
1427
1428static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand,
1429 int csline,
1430 const struct nand_data_interface *conf)
1431{
1432 struct atmel_hsmc_nand_controller *nc;
1433 struct atmel_smc_cs_conf smcconf;
1434 struct atmel_nand_cs *cs;
1435 int ret;
1436
1437 nc = to_hsmc_nand_controller(nand->base.controller);
1438
1439 ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1440 if (ret)
1441 return ret;
1442
1443 if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1444 return 0;
1445
1446 cs = &nand->cs[csline];
1447 cs->smcconf = smcconf;
1448
1449 if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1450 cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1451
1452 atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1453 &cs->smcconf);
1454
1455 return 0;
1456}
1457
1458static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline,
1459 const struct nand_data_interface *conf)
1460{
1461 struct atmel_nand *nand = to_atmel_nand(chip);
1462 struct atmel_nand_controller *nc;
1463
1464 nc = to_nand_controller(nand->base.controller);
1465
1466 if (csline >= nand->numcs ||
1467 (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1468 return -EINVAL;
1469
1470 return nc->caps->ops->setup_data_interface(nand, csline, conf);
1471}
1472
1473static void atmel_nand_init(struct atmel_nand_controller *nc,
1474 struct atmel_nand *nand)
1475{
1476 struct nand_chip *chip = &nand->base;
1477 struct mtd_info *mtd = nand_to_mtd(chip);
1478
1479 mtd->dev.parent = nc->dev;
1480 nand->base.controller = &nc->base;
1481
1482 chip->legacy.cmd_ctrl = atmel_nand_cmd_ctrl;
1483 chip->legacy.read_byte = atmel_nand_read_byte;
1484 chip->legacy.write_byte = atmel_nand_write_byte;
1485 chip->legacy.read_buf = atmel_nand_read_buf;
1486 chip->legacy.write_buf = atmel_nand_write_buf;
1487 chip->legacy.select_chip = atmel_nand_select_chip;
1488
1489 if (!nc->mck || !nc->caps->ops->setup_data_interface)
1490 chip->options |= NAND_KEEP_TIMINGS;
1491
1492 /* Some NANDs require a longer delay than the default one (20us). */
1493 chip->legacy.chip_delay = 40;
1494
1495 /*
1496 * Use a bounce buffer when the buffer passed by the MTD user is not
1497 * suitable for DMA.
1498 */
1499 if (nc->dmac)
1500 chip->options |= NAND_USE_BOUNCE_BUFFER;
1501
1502 /* Default to HW ECC if pmecc is available. */
1503 if (nc->pmecc)
1504 chip->ecc.mode = NAND_ECC_HW;
1505}
1506
1507static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1508 struct atmel_nand *nand)
1509{
1510 struct nand_chip *chip = &nand->base;
1511 struct atmel_smc_nand_controller *smc_nc;
1512 int i;
1513
1514 atmel_nand_init(nc, nand);
1515
1516 smc_nc = to_smc_nand_controller(chip->controller);
1517 if (!smc_nc->ebi_csa_regmap)
1518 return;
1519
1520 /* Attach the CS to the NAND Flash logic. */
1521 for (i = 0; i < nand->numcs; i++)
1522 regmap_update_bits(smc_nc->ebi_csa_regmap,
1523 smc_nc->ebi_csa->offs,
1524 BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1525
1526 if (smc_nc->ebi_csa->nfd0_on_d16)
1527 regmap_update_bits(smc_nc->ebi_csa_regmap,
1528 smc_nc->ebi_csa->offs,
1529 smc_nc->ebi_csa->nfd0_on_d16,
1530 smc_nc->ebi_csa->nfd0_on_d16);
1531}
1532
1533static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
1534 struct atmel_nand *nand)
1535{
1536 struct nand_chip *chip = &nand->base;
1537
1538 atmel_nand_init(nc, nand);
1539
1540 /* Overload some methods for the HSMC controller. */
1541 chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
1542 chip->legacy.select_chip = atmel_hsmc_nand_select_chip;
1543}
1544
1545static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
1546{
1547 struct nand_chip *chip = &nand->base;
1548 struct mtd_info *mtd = nand_to_mtd(chip);
1549 int ret;
1550
1551 ret = mtd_device_unregister(mtd);
1552 if (ret)
1553 return ret;
1554
1555 nand_cleanup(chip);
1556 list_del(&nand->node);
1557
1558 return 0;
1559}
1560
1561static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1562 struct device_node *np,
1563 int reg_cells)
1564{
1565 struct atmel_nand *nand;
1566 struct gpio_desc *gpio;
1567 int numcs, ret, i;
1568
1569 numcs = of_property_count_elems_of_size(np, "reg",
1570 reg_cells * sizeof(u32));
1571 if (numcs < 1) {
1572 dev_err(nc->dev, "Missing or invalid reg property\n");
1573 return ERR_PTR(-EINVAL);
1574 }
1575
1576 nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
1577 if (!nand) {
1578 dev_err(nc->dev, "Failed to allocate NAND object\n");
1579 return ERR_PTR(-ENOMEM);
1580 }
1581
1582 nand->numcs = numcs;
1583
1584 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0,
1585 &np->fwnode, GPIOD_IN,
1586 "nand-det");
1587 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1588 dev_err(nc->dev,
1589 "Failed to get detect gpio (err = %ld)\n",
1590 PTR_ERR(gpio));
1591 return ERR_CAST(gpio);
1592 }
1593
1594 if (!IS_ERR(gpio))
1595 nand->cdgpio = gpio;
1596
1597 for (i = 0; i < numcs; i++) {
1598 struct resource res;
1599 u32 val;
1600
1601 ret = of_address_to_resource(np, 0, &res);
1602 if (ret) {
1603 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1604 ret);
1605 return ERR_PTR(ret);
1606 }
1607
1608 ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1609 &val);
1610 if (ret) {
1611 dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1612 ret);
1613 return ERR_PTR(ret);
1614 }
1615
1616 nand->cs[i].id = val;
1617
1618 nand->cs[i].io.dma = res.start;
1619 nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1620 if (IS_ERR(nand->cs[i].io.virt))
1621 return ERR_CAST(nand->cs[i].io.virt);
1622
1623 if (!of_property_read_u32(np, "atmel,rb", &val)) {
1624 if (val > ATMEL_NFC_MAX_RB_ID)
1625 return ERR_PTR(-EINVAL);
1626
1627 nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1628 nand->cs[i].rb.id = val;
1629 } else {
1630 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev,
1631 "rb", i, &np->fwnode,
1632 GPIOD_IN, "nand-rb");
1633 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1634 dev_err(nc->dev,
1635 "Failed to get R/B gpio (err = %ld)\n",
1636 PTR_ERR(gpio));
1637 return ERR_CAST(gpio);
1638 }
1639
1640 if (!IS_ERR(gpio)) {
1641 nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1642 nand->cs[i].rb.gpio = gpio;
1643 }
1644 }
1645
1646 gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs",
1647 i, &np->fwnode,
1648 GPIOD_OUT_HIGH,
1649 "nand-cs");
1650 if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1651 dev_err(nc->dev,
1652 "Failed to get CS gpio (err = %ld)\n",
1653 PTR_ERR(gpio));
1654 return ERR_CAST(gpio);
1655 }
1656
1657 if (!IS_ERR(gpio))
1658 nand->cs[i].csgpio = gpio;
1659 }
1660
1661 nand_set_flash_node(&nand->base, np);
1662
1663 return nand;
1664}
1665
1666static int
1667atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1668 struct atmel_nand *nand)
1669{
1670 struct nand_chip *chip = &nand->base;
1671 struct mtd_info *mtd = nand_to_mtd(chip);
1672 int ret;
1673
1674 /* No card inserted, skip this NAND. */
1675 if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1676 dev_info(nc->dev, "No SmartMedia card inserted.\n");
1677 return 0;
1678 }
1679
1680 nc->caps->ops->nand_init(nc, nand);
1681
1682 ret = nand_scan(chip, nand->numcs);
1683 if (ret) {
1684 dev_err(nc->dev, "NAND scan failed: %d\n", ret);
1685 return ret;
1686 }
1687
1688 ret = mtd_device_register(mtd, NULL, 0);
1689 if (ret) {
1690 dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1691 nand_cleanup(chip);
1692 return ret;
1693 }
1694
1695 list_add_tail(&nand->node, &nc->chips);
1696
1697 return 0;
1698}
1699
1700static int
1701atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1702{
1703 struct atmel_nand *nand, *tmp;
1704 int ret;
1705
1706 list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1707 ret = atmel_nand_controller_remove_nand(nand);
1708 if (ret)
1709 return ret;
1710 }
1711
1712 return 0;
1713}
1714
1715static int
1716atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1717{
1718 struct device *dev = nc->dev;
1719 struct platform_device *pdev = to_platform_device(dev);
1720 struct atmel_nand *nand;
1721 struct gpio_desc *gpio;
1722 struct resource *res;
1723
1724 /*
1725 * Legacy bindings only allow connecting a single NAND with a unique CS
1726 * line to the controller.
1727 */
1728 nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1729 GFP_KERNEL);
1730 if (!nand)
1731 return -ENOMEM;
1732
1733 nand->numcs = 1;
1734
1735 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1736 nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1737 if (IS_ERR(nand->cs[0].io.virt))
1738 return PTR_ERR(nand->cs[0].io.virt);
1739
1740 nand->cs[0].io.dma = res->start;
1741
1742 /*
1743 * The old driver was hardcoding the CS id to 3 for all sama5
1744 * controllers. Since this id is only meaningful for the sama5
1745 * controller we can safely assign this id to 3 no matter the
1746 * controller.
1747 * If one wants to connect a NAND to a different CS line, he will
1748 * have to use the new bindings.
1749 */
1750 nand->cs[0].id = 3;
1751
1752 /* R/B GPIO. */
1753 gpio = devm_gpiod_get_index_optional(dev, NULL, 0, GPIOD_IN);
1754 if (IS_ERR(gpio)) {
1755 dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1756 PTR_ERR(gpio));
1757 return PTR_ERR(gpio);
1758 }
1759
1760 if (gpio) {
1761 nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1762 nand->cs[0].rb.gpio = gpio;
1763 }
1764
1765 /* CS GPIO. */
1766 gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1767 if (IS_ERR(gpio)) {
1768 dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1769 PTR_ERR(gpio));
1770 return PTR_ERR(gpio);
1771 }
1772
1773 nand->cs[0].csgpio = gpio;
1774
1775 /* Card detect GPIO. */
1776 gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1777 if (IS_ERR(gpio)) {
1778 dev_err(dev,
1779 "Failed to get detect gpio (err = %ld)\n",
1780 PTR_ERR(gpio));
1781 return PTR_ERR(gpio);
1782 }
1783
1784 nand->cdgpio = gpio;
1785
1786 nand_set_flash_node(&nand->base, nc->dev->of_node);
1787
1788 return atmel_nand_controller_add_nand(nc, nand);
1789}
1790
1791static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1792{
1793 struct device_node *np, *nand_np;
1794 struct device *dev = nc->dev;
1795 int ret, reg_cells;
1796 u32 val;
1797
1798 /* We do not retrieve the SMC syscon when parsing old DTs. */
1799 if (nc->caps->legacy_of_bindings)
1800 return atmel_nand_controller_legacy_add_nands(nc);
1801
1802 np = dev->of_node;
1803
1804 ret = of_property_read_u32(np, "#address-cells", &val);
1805 if (ret) {
1806 dev_err(dev, "missing #address-cells property\n");
1807 return ret;
1808 }
1809
1810 reg_cells = val;
1811
1812 ret = of_property_read_u32(np, "#size-cells", &val);
1813 if (ret) {
1814 dev_err(dev, "missing #size-cells property\n");
1815 return ret;
1816 }
1817
1818 reg_cells += val;
1819
1820 for_each_child_of_node(np, nand_np) {
1821 struct atmel_nand *nand;
1822
1823 nand = atmel_nand_create(nc, nand_np, reg_cells);
1824 if (IS_ERR(nand)) {
1825 ret = PTR_ERR(nand);
1826 goto err;
1827 }
1828
1829 ret = atmel_nand_controller_add_nand(nc, nand);
1830 if (ret)
1831 goto err;
1832 }
1833
1834 return 0;
1835
1836err:
1837 atmel_nand_controller_remove_nands(nc);
1838
1839 return ret;
1840}
1841
1842static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1843{
1844 if (nc->dmac)
1845 dma_release_channel(nc->dmac);
1846
1847 clk_put(nc->mck);
1848}
1849
1850static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
1851 .offs = AT91SAM9260_MATRIX_EBICSA,
1852};
1853
1854static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
1855 .offs = AT91SAM9261_MATRIX_EBICSA,
1856};
1857
1858static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
1859 .offs = AT91SAM9263_MATRIX_EBI0CSA,
1860};
1861
1862static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
1863 .offs = AT91SAM9RL_MATRIX_EBICSA,
1864};
1865
1866static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
1867 .offs = AT91SAM9G45_MATRIX_EBICSA,
1868};
1869
1870static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
1871 .offs = AT91SAM9N12_MATRIX_EBICSA,
1872};
1873
1874static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
1875 .offs = AT91SAM9X5_MATRIX_EBICSA,
1876};
1877
1878static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
1879 .offs = AT91_SFR_CCFG_EBICSA,
1880 .nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
1881};
1882
1883static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
1884 {
1885 .compatible = "atmel,at91sam9260-matrix",
1886 .data = &at91sam9260_ebi_csa,
1887 },
1888 {
1889 .compatible = "atmel,at91sam9261-matrix",
1890 .data = &at91sam9261_ebi_csa,
1891 },
1892 {
1893 .compatible = "atmel,at91sam9263-matrix",
1894 .data = &at91sam9263_ebi_csa,
1895 },
1896 {
1897 .compatible = "atmel,at91sam9rl-matrix",
1898 .data = &at91sam9rl_ebi_csa,
1899 },
1900 {
1901 .compatible = "atmel,at91sam9g45-matrix",
1902 .data = &at91sam9g45_ebi_csa,
1903 },
1904 {
1905 .compatible = "atmel,at91sam9n12-matrix",
1906 .data = &at91sam9n12_ebi_csa,
1907 },
1908 {
1909 .compatible = "atmel,at91sam9x5-matrix",
1910 .data = &at91sam9x5_ebi_csa,
1911 },
1912 {
1913 .compatible = "microchip,sam9x60-sfr",
1914 .data = &sam9x60_ebi_csa,
1915 },
1916 { /* sentinel */ },
1917};
1918
1919static int atmel_nand_attach_chip(struct nand_chip *chip)
1920{
1921 struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
1922 struct atmel_nand *nand = to_atmel_nand(chip);
1923 struct mtd_info *mtd = nand_to_mtd(chip);
1924 int ret;
1925
1926 ret = nc->caps->ops->ecc_init(chip);
1927 if (ret)
1928 return ret;
1929
1930 if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1931 /*
1932 * We keep the MTD name unchanged to avoid breaking platforms
1933 * where the MTD cmdline parser is used and the bootloader
1934 * has not been updated to use the new naming scheme.
1935 */
1936 mtd->name = "atmel_nand";
1937 } else if (!mtd->name) {
1938 /*
1939 * If the new bindings are used and the bootloader has not been
1940 * updated to pass a new mtdparts parameter on the cmdline, you
1941 * should define the following property in your nand node:
1942 *
1943 * label = "atmel_nand";
1944 *
1945 * This way, mtd->name will be set by the core when
1946 * nand_set_flash_node() is called.
1947 */
1948 mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
1949 "%s:nand.%d", dev_name(nc->dev),
1950 nand->cs[0].id);
1951 if (!mtd->name) {
1952 dev_err(nc->dev, "Failed to allocate mtd->name\n");
1953 return -ENOMEM;
1954 }
1955 }
1956
1957 return 0;
1958}
1959
1960static const struct nand_controller_ops atmel_nand_controller_ops = {
1961 .attach_chip = atmel_nand_attach_chip,
1962 .setup_data_interface = atmel_nand_setup_data_interface,
1963};
1964
1965static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
1966 struct platform_device *pdev,
1967 const struct atmel_nand_controller_caps *caps)
1968{
1969 struct device *dev = &pdev->dev;
1970 struct device_node *np = dev->of_node;
1971 int ret;
1972
1973 nand_controller_init(&nc->base);
1974 nc->base.ops = &atmel_nand_controller_ops;
1975 INIT_LIST_HEAD(&nc->chips);
1976 nc->dev = dev;
1977 nc->caps = caps;
1978
1979 platform_set_drvdata(pdev, nc);
1980
1981 nc->pmecc = devm_atmel_pmecc_get(dev);
1982 if (IS_ERR(nc->pmecc)) {
1983 ret = PTR_ERR(nc->pmecc);
1984 if (ret != -EPROBE_DEFER)
1985 dev_err(dev, "Could not get PMECC object (err = %d)\n",
1986 ret);
1987 return ret;
1988 }
1989
1990 if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
1991 dma_cap_mask_t mask;
1992
1993 dma_cap_zero(mask);
1994 dma_cap_set(DMA_MEMCPY, mask);
1995
1996 nc->dmac = dma_request_channel(mask, NULL, NULL);
1997 if (!nc->dmac)
1998 dev_err(nc->dev, "Failed to request DMA channel\n");
1999 }
2000
2001 /* We do not retrieve the SMC syscon when parsing old DTs. */
2002 if (nc->caps->legacy_of_bindings)
2003 return 0;
2004
2005 nc->mck = of_clk_get(dev->parent->of_node, 0);
2006 if (IS_ERR(nc->mck)) {
2007 dev_err(dev, "Failed to retrieve MCK clk\n");
2008 ret = PTR_ERR(nc->mck);
2009 goto out_release_dma;
2010 }
2011
2012 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2013 if (!np) {
2014 dev_err(dev, "Missing or invalid atmel,smc property\n");
2015 ret = -EINVAL;
2016 goto out_release_dma;
2017 }
2018
2019 nc->smc = syscon_node_to_regmap(np);
2020 of_node_put(np);
2021 if (IS_ERR(nc->smc)) {
2022 ret = PTR_ERR(nc->smc);
2023 dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2024 goto out_release_dma;
2025 }
2026
2027 return 0;
2028
2029out_release_dma:
2030 if (nc->dmac)
2031 dma_release_channel(nc->dmac);
2032
2033 return ret;
2034}
2035
2036static int
2037atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2038{
2039 struct device *dev = nc->base.dev;
2040 const struct of_device_id *match;
2041 struct device_node *np;
2042 int ret;
2043
2044 /* We do not retrieve the EBICSA regmap when parsing old DTs. */
2045 if (nc->base.caps->legacy_of_bindings)
2046 return 0;
2047
2048 np = of_parse_phandle(dev->parent->of_node,
2049 nc->base.caps->ebi_csa_regmap_name, 0);
2050 if (!np)
2051 return 0;
2052
2053 match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
2054 if (!match) {
2055 of_node_put(np);
2056 return 0;
2057 }
2058
2059 nc->ebi_csa_regmap = syscon_node_to_regmap(np);
2060 of_node_put(np);
2061 if (IS_ERR(nc->ebi_csa_regmap)) {
2062 ret = PTR_ERR(nc->ebi_csa_regmap);
2063 dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
2064 return ret;
2065 }
2066
2067 nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
2068
2069 /*
2070 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2071 * add 4 to ->ebi_csa->offs.
2072 */
2073 if (of_device_is_compatible(dev->parent->of_node,
2074 "atmel,at91sam9263-ebi1"))
2075 nc->ebi_csa->offs += 4;
2076
2077 return 0;
2078}
2079
2080static int
2081atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2082{
2083 struct regmap_config regmap_conf = {
2084 .reg_bits = 32,
2085 .val_bits = 32,
2086 .reg_stride = 4,
2087 };
2088
2089 struct device *dev = nc->base.dev;
2090 struct device_node *nand_np, *nfc_np;
2091 void __iomem *iomem;
2092 struct resource res;
2093 int ret;
2094
2095 nand_np = dev->of_node;
2096 nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2097 if (!nfc_np) {
2098 dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2099 return -ENODEV;
2100 }
2101
2102 nc->clk = of_clk_get(nfc_np, 0);
2103 if (IS_ERR(nc->clk)) {
2104 ret = PTR_ERR(nc->clk);
2105 dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2106 ret);
2107 goto out;
2108 }
2109
2110 ret = clk_prepare_enable(nc->clk);
2111 if (ret) {
2112 dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2113 ret);
2114 goto out;
2115 }
2116
2117 nc->irq = of_irq_get(nand_np, 0);
2118 if (nc->irq <= 0) {
2119 ret = nc->irq ?: -ENXIO;
2120 if (ret != -EPROBE_DEFER)
2121 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2122 ret);
2123 goto out;
2124 }
2125
2126 ret = of_address_to_resource(nfc_np, 0, &res);
2127 if (ret) {
2128 dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2129 ret);
2130 goto out;
2131 }
2132
2133 iomem = devm_ioremap_resource(dev, &res);
2134 if (IS_ERR(iomem)) {
2135 ret = PTR_ERR(iomem);
2136 goto out;
2137 }
2138
2139 regmap_conf.name = "nfc-io";
2140 regmap_conf.max_register = resource_size(&res) - 4;
2141 nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2142 if (IS_ERR(nc->io)) {
2143 ret = PTR_ERR(nc->io);
2144 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2145 ret);
2146 goto out;
2147 }
2148
2149 ret = of_address_to_resource(nfc_np, 1, &res);
2150 if (ret) {
2151 dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2152 ret);
2153 goto out;
2154 }
2155
2156 iomem = devm_ioremap_resource(dev, &res);
2157 if (IS_ERR(iomem)) {
2158 ret = PTR_ERR(iomem);
2159 goto out;
2160 }
2161
2162 regmap_conf.name = "smc";
2163 regmap_conf.max_register = resource_size(&res) - 4;
2164 nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2165 if (IS_ERR(nc->base.smc)) {
2166 ret = PTR_ERR(nc->base.smc);
2167 dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2168 ret);
2169 goto out;
2170 }
2171
2172 ret = of_address_to_resource(nfc_np, 2, &res);
2173 if (ret) {
2174 dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2175 ret);
2176 goto out;
2177 }
2178
2179 nc->sram.virt = devm_ioremap_resource(dev, &res);
2180 if (IS_ERR(nc->sram.virt)) {
2181 ret = PTR_ERR(nc->sram.virt);
2182 goto out;
2183 }
2184
2185 nc->sram.dma = res.start;
2186
2187out:
2188 of_node_put(nfc_np);
2189
2190 return ret;
2191}
2192
2193static int
2194atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2195{
2196 struct device *dev = nc->base.dev;
2197 struct device_node *np;
2198 int ret;
2199
2200 np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2201 if (!np) {
2202 dev_err(dev, "Missing or invalid atmel,smc property\n");
2203 return -EINVAL;
2204 }
2205
2206 nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2207
2208 nc->irq = of_irq_get(np, 0);
2209 of_node_put(np);
2210 if (nc->irq <= 0) {
2211 ret = nc->irq ?: -ENXIO;
2212 if (ret != -EPROBE_DEFER)
2213 dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2214 ret);
2215 return ret;
2216 }
2217
2218 np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2219 if (!np) {
2220 dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2221 return -EINVAL;
2222 }
2223
2224 nc->io = syscon_node_to_regmap(np);
2225 of_node_put(np);
2226 if (IS_ERR(nc->io)) {
2227 ret = PTR_ERR(nc->io);
2228 dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2229 return ret;
2230 }
2231
2232 nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2233 "atmel,nfc-sram", 0);
2234 if (!nc->sram.pool) {
2235 dev_err(nc->base.dev, "Missing SRAM\n");
2236 return -ENOMEM;
2237 }
2238
2239 nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
2240 ATMEL_NFC_SRAM_SIZE,
2241 &nc->sram.dma);
2242 if (!nc->sram.virt) {
2243 dev_err(nc->base.dev,
2244 "Could not allocate memory from the NFC SRAM pool\n");
2245 return -ENOMEM;
2246 }
2247
2248 return 0;
2249}
2250
2251static int
2252atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2253{
2254 struct atmel_hsmc_nand_controller *hsmc_nc;
2255 int ret;
2256
2257 ret = atmel_nand_controller_remove_nands(nc);
2258 if (ret)
2259 return ret;
2260
2261 hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2262 if (hsmc_nc->sram.pool)
2263 gen_pool_free(hsmc_nc->sram.pool,
2264 (unsigned long)hsmc_nc->sram.virt,
2265 ATMEL_NFC_SRAM_SIZE);
2266
2267 if (hsmc_nc->clk) {
2268 clk_disable_unprepare(hsmc_nc->clk);
2269 clk_put(hsmc_nc->clk);
2270 }
2271
2272 atmel_nand_controller_cleanup(nc);
2273
2274 return 0;
2275}
2276
2277static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2278 const struct atmel_nand_controller_caps *caps)
2279{
2280 struct device *dev = &pdev->dev;
2281 struct atmel_hsmc_nand_controller *nc;
2282 int ret;
2283
2284 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2285 if (!nc)
2286 return -ENOMEM;
2287
2288 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2289 if (ret)
2290 return ret;
2291
2292 if (caps->legacy_of_bindings)
2293 ret = atmel_hsmc_nand_controller_legacy_init(nc);
2294 else
2295 ret = atmel_hsmc_nand_controller_init(nc);
2296
2297 if (ret)
2298 return ret;
2299
2300 /* Make sure all irqs are masked before registering our IRQ handler. */
2301 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2302 ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2303 IRQF_SHARED, "nfc", nc);
2304 if (ret) {
2305 dev_err(dev,
2306 "Could not get register NFC interrupt handler (err = %d)\n",
2307 ret);
2308 goto err;
2309 }
2310
2311 /* Initial NFC configuration. */
2312 regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2313 ATMEL_HSMC_NFC_CFG_DTO_MAX);
2314
2315 ret = atmel_nand_controller_add_nands(&nc->base);
2316 if (ret)
2317 goto err;
2318
2319 return 0;
2320
2321err:
2322 atmel_hsmc_nand_controller_remove(&nc->base);
2323
2324 return ret;
2325}
2326
2327static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2328 .probe = atmel_hsmc_nand_controller_probe,
2329 .remove = atmel_hsmc_nand_controller_remove,
2330 .ecc_init = atmel_hsmc_nand_ecc_init,
2331 .nand_init = atmel_hsmc_nand_init,
2332 .setup_data_interface = atmel_hsmc_nand_setup_data_interface,
2333};
2334
2335static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2336 .has_dma = true,
2337 .ale_offs = BIT(21),
2338 .cle_offs = BIT(22),
2339 .ops = &atmel_hsmc_nc_ops,
2340};
2341
2342/* Only used to parse old bindings. */
2343static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2344 .has_dma = true,
2345 .ale_offs = BIT(21),
2346 .cle_offs = BIT(22),
2347 .ops = &atmel_hsmc_nc_ops,
2348 .legacy_of_bindings = true,
2349};
2350
2351static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2352 const struct atmel_nand_controller_caps *caps)
2353{
2354 struct device *dev = &pdev->dev;
2355 struct atmel_smc_nand_controller *nc;
2356 int ret;
2357
2358 nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2359 if (!nc)
2360 return -ENOMEM;
2361
2362 ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2363 if (ret)
2364 return ret;
2365
2366 ret = atmel_smc_nand_controller_init(nc);
2367 if (ret)
2368 return ret;
2369
2370 return atmel_nand_controller_add_nands(&nc->base);
2371}
2372
2373static int
2374atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2375{
2376 int ret;
2377
2378 ret = atmel_nand_controller_remove_nands(nc);
2379 if (ret)
2380 return ret;
2381
2382 atmel_nand_controller_cleanup(nc);
2383
2384 return 0;
2385}
2386
2387/*
2388 * The SMC reg layout of at91rm9200 is completely different which prevents us
2389 * from re-using atmel_smc_nand_setup_data_interface() for the
2390 * ->setup_data_interface() hook.
2391 * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2392 * ->setup_data_interface() unassigned.
2393 */
2394static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2395 .probe = atmel_smc_nand_controller_probe,
2396 .remove = atmel_smc_nand_controller_remove,
2397 .ecc_init = atmel_nand_ecc_init,
2398 .nand_init = atmel_smc_nand_init,
2399};
2400
2401static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2402 .ale_offs = BIT(21),
2403 .cle_offs = BIT(22),
2404 .ebi_csa_regmap_name = "atmel,matrix",
2405 .ops = &at91rm9200_nc_ops,
2406};
2407
2408static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2409 .probe = atmel_smc_nand_controller_probe,
2410 .remove = atmel_smc_nand_controller_remove,
2411 .ecc_init = atmel_nand_ecc_init,
2412 .nand_init = atmel_smc_nand_init,
2413 .setup_data_interface = atmel_smc_nand_setup_data_interface,
2414};
2415
2416static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2417 .ale_offs = BIT(21),
2418 .cle_offs = BIT(22),
2419 .ebi_csa_regmap_name = "atmel,matrix",
2420 .ops = &atmel_smc_nc_ops,
2421};
2422
2423static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2424 .ale_offs = BIT(22),
2425 .cle_offs = BIT(21),
2426 .ebi_csa_regmap_name = "atmel,matrix",
2427 .ops = &atmel_smc_nc_ops,
2428};
2429
2430static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2431 .has_dma = true,
2432 .ale_offs = BIT(21),
2433 .cle_offs = BIT(22),
2434 .ebi_csa_regmap_name = "atmel,matrix",
2435 .ops = &atmel_smc_nc_ops,
2436};
2437
2438static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
2439 .has_dma = true,
2440 .ale_offs = BIT(21),
2441 .cle_offs = BIT(22),
2442 .ebi_csa_regmap_name = "microchip,sfr",
2443 .ops = &atmel_smc_nc_ops,
2444};
2445
2446/* Only used to parse old bindings. */
2447static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2448 .ale_offs = BIT(21),
2449 .cle_offs = BIT(22),
2450 .ops = &atmel_smc_nc_ops,
2451 .legacy_of_bindings = true,
2452};
2453
2454static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2455 .ale_offs = BIT(22),
2456 .cle_offs = BIT(21),
2457 .ops = &atmel_smc_nc_ops,
2458 .legacy_of_bindings = true,
2459};
2460
2461static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2462 .has_dma = true,
2463 .ale_offs = BIT(21),
2464 .cle_offs = BIT(22),
2465 .ops = &atmel_smc_nc_ops,
2466 .legacy_of_bindings = true,
2467};
2468
2469static const struct of_device_id atmel_nand_controller_of_ids[] = {
2470 {
2471 .compatible = "atmel,at91rm9200-nand-controller",
2472 .data = &atmel_rm9200_nc_caps,
2473 },
2474 {
2475 .compatible = "atmel,at91sam9260-nand-controller",
2476 .data = &atmel_sam9260_nc_caps,
2477 },
2478 {
2479 .compatible = "atmel,at91sam9261-nand-controller",
2480 .data = &atmel_sam9261_nc_caps,
2481 },
2482 {
2483 .compatible = "atmel,at91sam9g45-nand-controller",
2484 .data = &atmel_sam9g45_nc_caps,
2485 },
2486 {
2487 .compatible = "atmel,sama5d3-nand-controller",
2488 .data = &atmel_sama5_nc_caps,
2489 },
2490 {
2491 .compatible = "microchip,sam9x60-nand-controller",
2492 .data = &microchip_sam9x60_nc_caps,
2493 },
2494 /* Support for old/deprecated bindings: */
2495 {
2496 .compatible = "atmel,at91rm9200-nand",
2497 .data = &atmel_rm9200_nand_caps,
2498 },
2499 {
2500 .compatible = "atmel,sama5d4-nand",
2501 .data = &atmel_rm9200_nand_caps,
2502 },
2503 {
2504 .compatible = "atmel,sama5d2-nand",
2505 .data = &atmel_rm9200_nand_caps,
2506 },
2507 { /* sentinel */ },
2508};
2509MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2510
2511static int atmel_nand_controller_probe(struct platform_device *pdev)
2512{
2513 const struct atmel_nand_controller_caps *caps;
2514
2515 if (pdev->id_entry)
2516 caps = (void *)pdev->id_entry->driver_data;
2517 else
2518 caps = of_device_get_match_data(&pdev->dev);
2519
2520 if (!caps) {
2521 dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2522 return -EINVAL;
2523 }
2524
2525 if (caps->legacy_of_bindings) {
2526 struct device_node *nfc_node;
2527 u32 ale_offs = 21;
2528
2529 /*
2530 * If we are parsing legacy DT props and the DT contains a
2531 * valid NFC node, forward the request to the sama5 logic.
2532 */
2533 nfc_node = of_get_compatible_child(pdev->dev.of_node,
2534 "atmel,sama5d3-nfc");
2535 if (nfc_node) {
2536 caps = &atmel_sama5_nand_caps;
2537 of_node_put(nfc_node);
2538 }
2539
2540 /*
2541 * Even if the compatible says we are dealing with an
2542 * at91rm9200 controller, the atmel,nand-has-dma specify that
2543 * this controller supports DMA, which means we are in fact
2544 * dealing with an at91sam9g45+ controller.
2545 */
2546 if (!caps->has_dma &&
2547 of_property_read_bool(pdev->dev.of_node,
2548 "atmel,nand-has-dma"))
2549 caps = &atmel_sam9g45_nand_caps;
2550
2551 /*
2552 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2553 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2554 * actually dealing with an at91sam9261 controller.
2555 */
2556 of_property_read_u32(pdev->dev.of_node,
2557 "atmel,nand-addr-offset", &ale_offs);
2558 if (ale_offs != 21)
2559 caps = &atmel_sam9261_nand_caps;
2560 }
2561
2562 return caps->ops->probe(pdev, caps);
2563}
2564
2565static int atmel_nand_controller_remove(struct platform_device *pdev)
2566{
2567 struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2568
2569 return nc->caps->ops->remove(nc);
2570}
2571
2572static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2573{
2574 struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2575 struct atmel_nand *nand;
2576
2577 if (nc->pmecc)
2578 atmel_pmecc_reset(nc->pmecc);
2579
2580 list_for_each_entry(nand, &nc->chips, node) {
2581 int i;
2582
2583 for (i = 0; i < nand->numcs; i++)
2584 nand_reset(&nand->base, i);
2585 }
2586
2587 return 0;
2588}
2589
2590static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2591 atmel_nand_controller_resume);
2592
2593static struct platform_driver atmel_nand_controller_driver = {
2594 .driver = {
2595 .name = "atmel-nand-controller",
2596 .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2597 .pm = &atmel_nand_controller_pm_ops,
2598 },
2599 .probe = atmel_nand_controller_probe,
2600 .remove = atmel_nand_controller_remove,
2601};
2602module_platform_driver(atmel_nand_controller_driver);
2603
2604MODULE_LICENSE("GPL");
2605MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2606MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2607MODULE_ALIAS("platform:atmel-nand-controller");