blob: 8187442c5c68b5b3b39f05766bef1fcaddcfbb27 [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/dma-mapping.h>
23#include <linux/wait.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/mtd/mtd.h>
27#include <linux/module.h>
28
29#include <linux/clk.h>
30#include <linux/err.h>
31#include <linux/io.h>
32#include <linux/ioport.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/platform_device.h>
36#include <linux/of.h>
37#include <linux/of_device.h>
38#include <mach/spinlock.h>
39#include <linux/wakelock.h>
40#include <linux/soc/zte/pm/drv_idle.h>
41
42#include <linux/types.h>
43#include <linux/mtd/map.h>
44#include <linux/mtd/partitions.h>
45#include <asm/io.h>
46#include "denali.h"
47#include <linux/gpio.h>
48
49MODULE_LICENSE("GPL");
50
51/* We define a module parameter that allows the user to override
52 * the hardware and decide what timing mode should be used.
53 */
54#define NAND_DEFAULT_TIMINGS -1
55
56static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
57int denali_int_en_flag = 0;
58module_param(onfi_timing_mode, int, S_IRUGO);
59MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
60 " -1 indicates use default timings");
61
62#define DENALI_NAND_NAME "denali-nand"
63
64/* We define a macro here that combines all interrupts this driver uses into
65 * a single constant value, for convenience. */
66#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
67 INTR_STATUS__ECC_TRANSACTION_DONE | \
68 INTR_STATUS__ECC_ERR | \
69 INTR_STATUS__PROGRAM_FAIL | \
70 INTR_STATUS__LOAD_COMP | \
71 INTR_STATUS__PROGRAM_COMP | \
72 INTR_STATUS__TIME_OUT | \
73 INTR_STATUS__ERASE_FAIL | \
74 INTR_STATUS__RST_COMP | \
75 INTR_STATUS__ERASE_COMP)
76
77/* indicates whether or not the internal value for the flash bank is
78 * valid or not */
79#define CHIP_SELECT_INVALID -1
80
81#define SUPPORT_8BITECC 1
82
83/* This macro divides two integers and rounds fractional values up
84 * to the nearest integer value. */
85#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
86
87/* this macro allows us to convert from an MTD structure to our own
88 * device context (denali) structure.
89 */
90#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
91
92/* These constants are defined by the driver to enable common driver
93 * configuration options. */
94#define SPARE_ACCESS 0x41
95#define MAIN_ACCESS 0x42
96#define MAIN_SPARE_ACCESS 0x43
97
98#define DENALI_READ 0
99#define DENALI_WRITE 0x100
100
101/* types of device accesses. We can issue commands and get status */
102#define COMMAND_CYCLE 0
103#define ADDR_CYCLE 1
104#define STATUS_CYCLE 2
105
106/* this is a helper macro that allows us to
107 * format the bank into the proper bits for the controller */
108#define BANK(x) ((x) << 24)
109
110/* List of platforms this NAND controller has be integrated into */
111
112
113/* forward declarations */
114static void clear_interrupts(struct denali_nand_info *denali);
115static uint32_t wait_for_irq(struct denali_nand_info *denali,
116 uint32_t irq_mask);
117static void denali_irq_enable(struct denali_nand_info *denali,
118 uint32_t int_mask);
119static uint32_t read_interrupt_status(struct denali_nand_info *denali);
120
121static uint32_t detect_nand_bus_freq(void);
122
123struct mtd_info *mtd_fota;
124static struct wake_lock nand_wake_lock;
125extern struct cmdline_mtd_partition *partitions;
126
127struct cmdline_mtd_partition {
128 struct cmdline_mtd_partition *next;
129 char *mtd_id;
130 int num_parts;
131 struct mtd_partition *parts;
132};
133
134
135/* Certain operations for the denali NAND controller use
136 * an indexed mode to read/write data. The operation is
137 * performed by writing the address value of the command
138 * to the device memory followed by the data. This function
139 * abstracts this common operation.
140*/
141static void index_addr(struct denali_nand_info *denali,
142 uint32_t address, uint32_t data)
143{
144 iowrite32(address, denali->flash_mem);
145 iowrite32(data, denali->flash_mem + 0x10);
146}
147
148/* Perform an indexed read of the device */
149static void index_addr_read_data(struct denali_nand_info *denali,
150 uint32_t address, uint32_t *pdata)
151{
152 iowrite32(address, denali->flash_mem);
153 *pdata = ioread32(denali->flash_mem + 0x10);
154}
155
156/* We need to buffer some data for some of the NAND core routines.
157 * The operations manage buffering that data. */
158static void reset_buf(struct denali_nand_info *denali)
159{
160 denali->buf.head = denali->buf.tail = 0;
161}
162
163static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
164{
165 BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
166 denali->buf.buf[denali->buf.tail++] = byte;
167}
168
169/* reads the status of the device */
170static void read_status(struct denali_nand_info *denali)
171{
172 uint32_t cmd = 0x0;
173 /* initialize the data buffer to store status */
174 reset_buf(denali);
175
176 cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
177 if (cmd)
178 write_byte_to_buf(denali, NAND_STATUS_WP);
179 else
180 write_byte_to_buf(denali, 0);
181}
182
183/* resets a specific device connected to the core */
184static void reset_bank(struct denali_nand_info *denali)
185{
186 uint32_t irq_status = 0;
187 uint32_t irq_mask = INTR_STATUS__RST_COMP |
188 INTR_STATUS__TIME_OUT;
189
190 clear_interrupts(denali);
191
192 iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
193
194 irq_status = wait_for_irq(denali, irq_mask);
195
196 if (irq_status & INTR_STATUS__TIME_OUT)
197 dev_err(denali->dev, "reset bank failed.\n");
198
199
200}
201
202/* Reset the flash controller */
203static uint16_t denali_nand_reset(struct denali_nand_info *denali)
204{
205 uint32_t i;
206
207 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
208 __FILE__, __LINE__, __func__);
209
210 for (i = 0 ; i < denali->max_banks; i++)
211 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
212 denali->flash_reg + INTR_STATUS(i));
213
214 for (i = 0 ; i < denali->max_banks; i++) {
215 iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
216 while (!(ioread32(denali->flash_reg +
217 INTR_STATUS(i)) &
218 (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
219 cpu_relax();
220 if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
221 INTR_STATUS__TIME_OUT)
222 dev_dbg(denali->dev,
223 "NAND Reset operation timed out on bank %d\n", i);
224 }
225
226 for (i = 0; i < denali->max_banks; i++)
227 iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
228 denali->flash_reg + INTR_STATUS(i));
229
230 return PASS;
231}
232
233
234uint32_t main_size, spare_size;
235static void get_samsung_nand_para(struct denali_nand_info *denali,
236 uint8_t device_id)
237{
238 uint32_t bus_freq;
239 bus_freq = 25;
240
241 main_size = 2048;
242 spare_size = 64;
243 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
244 iowrite32(main_size, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
245 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
246 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
247 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
248 iowrite32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
249 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
250 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
251 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
252 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
253 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
254 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
255 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
256 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
257 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
258 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
259 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
260 denali->nand.ecc.size = 512;//BRUCE
261
262}
263
264static void get_toshiba_nand_para(struct denali_nand_info *denali)
265{
266 uint32_t tmp;
267
268 /* Workaround to fix a controller bug which reports a wrong */
269 /* spare area size for some kind of Toshiba NAND device */
270 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
271 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
272 iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
273 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
274 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
275 iowrite32(tmp,
276 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
277#if SUPPORT_15BITECC
278 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
279#elif SUPPORT_8BITECC
280 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
281#endif
282 }
283}
284
285
286
287static void get_hynix_nand_para(struct denali_nand_info *denali,
288 uint8_t device_id)
289{
290
291 uint32_t bus_freq;
292 bus_freq = detect_nand_bus_freq();
293
294 switch (device_id) {
295
296 case 0xAC: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
297 main_size = 2048;
298 spare_size = 128;
299 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
300 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
301 iowrite32(128, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
302 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
303 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
304 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
305 iowrite32(128, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
306 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
307 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
308 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
309 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
310 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
311 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
312 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
313 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
314 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
315 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
316 denali->nand.ecc.size = 512;//BRUCE
317 break;
318 case 0xAA: /* Hynix H272G8_6F2C,JSC2G1G */
319 main_size = 2048;
320 spare_size = 128;
321 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
322 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
323 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
324 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
325 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
326 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
327 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
328
329 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
330 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
331 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
332 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
333 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
334 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
335 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
336 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
337 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
338
339 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
340 denali->nand.ecc.size = 512;//BRUCE
341 break;
342 case 0xDA: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
343 main_size = 2048;
344 spare_size = 64;
345 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
346 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
347 iowrite32(64, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
348 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
349 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
350 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
351 iowrite32(64, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
352 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
353 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
354 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
355 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
356 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
357 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
358 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
359 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
360 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
361 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
362 denali->nand.ecc.size = 512;//BRUCE
363 break;
364 default:
365 dev_warn(denali->dev,
366 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
367 "Will use default parameter values instead.\n",
368 device_id);
369 }
370}
371static void get_micron_nand_para(struct denali_nand_info *denali,
372 uint8_t device_id)
373{
374
375 uint32_t bus_freq;
376 bus_freq = detect_nand_bus_freq();
377 switch (device_id) {
378 case 0xD5: /* Micron */
379 case 0xAA: /* Micron MT29F2G08ABBEA */
380 case 0xAC:
381 main_size = 2048;
382 spare_size = 64;
383 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
384 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
385 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
386 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
387 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
388 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
389 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
390
391 iowrite32(((60 * bus_freq)/1000+2) | (((25 * bus_freq)/1000+2)<<8),
392 denali->flash_reg + WE_2_RE);
393 iowrite32(((100* bus_freq)/1000+2) | (((22* bus_freq)/1000+2)<<8),
394 denali->flash_reg + ADDR_2_DATA);
395 iowrite32((100 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /*timing*/
396 iowrite32((25 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT);
397 iowrite32((15 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT);
398 iowrite32((25 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT);
399 iowrite32((60 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE);
400 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
401 denali->nand.ecc.size = 512;//BRUCE
402
403 break;
404 default:
405 dev_warn(denali->dev,
406 "Spectra: Unknown Micron NAND (Device ID: 0x%x)."
407 "Will use default parameter values instead.\n",
408 device_id);
409 }
410}
411static void get_jsc_nand_para(struct denali_nand_info *denali,
412 uint8_t device_id)
413{
414
415 uint32_t bus_freq;
416 bus_freq = detect_nand_bus_freq();
417 switch (device_id) {
418 case 0xAC: /* JSFN7XB 4G2G */
419 main_size = 2048;
420 spare_size = 128;
421 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
422 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
423 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
424 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
425 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
426 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
427 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
428
429 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
430 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
431 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
432 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
433 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
434 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
435 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
436 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
437 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
438 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
439 denali->nand.ecc.size = 512;//BRUCE
440
441 break;
442 default:
443 dev_warn(denali->dev,
444 "Spectra: Unknown JSC NAND (Device ID: 0x%x)."
445 "Will use default parameter values instead.\n",
446 device_id);
447 }
448}
449static void get_esmt_nand_para(struct denali_nand_info *denali,
450 uint8_t device_id)
451{
452
453 uint32_t bus_freq;
454 bus_freq = detect_nand_bus_freq();
455 switch (device_id) {
456 case 0xAC: /* ESMT FM6BD4G2GA */
457 main_size = 2048;
458 spare_size = 64;
459 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
460 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
461 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
462 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
463 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
464 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
465 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
466
467 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
468 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
469 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
470 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
471 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
472 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
473 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
474 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
475 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
476 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
477 denali->nand.ecc.size = 512;//BRUCE
478 break;
479
480 case 0xAA: /* ESMT FM6BD2G1GA */
481 main_size = 2048;
482 spare_size = 64;
483 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
484 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
485 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
486 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
487 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
488 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
489 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
490
491 iowrite32(((80 * bus_freq)/1000+2) | (((30 * bus_freq)/1000+2)<<8),
492 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
493 iowrite32(((120* bus_freq)/1000+2) | (((30* bus_freq)/1000+2)<<8),
494 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
495 iowrite32((120 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
496 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
497 iowrite32((30 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
498 iowrite32((40 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
499 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
500 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
501 denali->nand.ecc.size = 512;//BRUCE
502
503 break;
504 default:
505 dev_warn(denali->dev,
506 "Spectra: Unknown ESMT NAND (Device ID: 0x%x)."
507 "Will use default parameter values instead.\n",
508 device_id);
509 }
510}
511static void get_nanya_nand_para(struct denali_nand_info *denali,
512 uint8_t device_id)
513{
514
515 uint32_t bus_freq;
516 bus_freq = detect_nand_bus_freq();
517 switch (device_id) {
518 case 0xAC: /* NM1482KSLAXBJ 4G2G */
519 main_size = 4096;
520 spare_size = 256;
521 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
522 iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
523 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
524 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
525 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
526 iowrite32(4096, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
527 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
528
529 iowrite32(((60 * bus_freq)/1000+2) | (((25 * bus_freq)/1000+2)<<8),
530 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
531 iowrite32(((100* bus_freq)/1000+2) | (((25* bus_freq)/1000+2)<<8),
532 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
533 iowrite32((50 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
534 iowrite32((25 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
535 iowrite32((15 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
536 iowrite32((20 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
537 iowrite32((50 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
538 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
539 denali->nand.ecc.size = 512;//BRUCE
540 break;
541 case 0xAA: /* NM1281KSLAXAJ 2G1G */
542 main_size = 2048;
543 spare_size = 128;
544 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
545 iowrite32(main_size, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
546 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
547 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
548 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
549 iowrite32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
550 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
551
552 iowrite32(((60 * bus_freq)/1000+2) | (((25 * bus_freq)/1000+2)<<8),
553 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
554 iowrite32(((100* bus_freq)/1000+2) | (((25* bus_freq)/1000+2)<<8),
555 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
556 iowrite32((100 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
557 iowrite32((25 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
558 iowrite32((15 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
559 iowrite32((35 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
560 iowrite32((60 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
561 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
562 denali->nand.ecc.size = 512;//BRUCE
563
564 break;
565 default:
566 dev_warn(denali->dev,
567 "Spectra: Unknown Nanya NAND (Device ID: 0x%x)."
568 "Will use default parameter values instead.\n",
569 device_id);
570 }
571}
572
573static void get_winbond_nand_para(struct denali_nand_info *denali,
574 uint8_t device_id)
575{
576
577 uint32_t bus_freq;
578 bus_freq = detect_nand_bus_freq();
579 switch (device_id) {
580
581
582 case 0xAA: /* W71NW20GF3FW */
583 main_size = 2048;
584 spare_size = 64;
585 iowrite32(64, denali->flash_reg + PAGES_PER_BLOCK);
586 iowrite32(2048, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
587 iowrite32(spare_size, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
588 iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
589 iowrite32(4, denali->flash_reg + ECC_CORRECTION);
590 iowrite32(2048, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
591 iowrite32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
592
593 iowrite32(((80 * bus_freq)/1000+2) | (((25 * bus_freq)/1000+2)<<8),
594 denali->flash_reg + WE_2_RE); /* Twhr,Trr1*/
595 iowrite32(((100* bus_freq)/1000+2) | (((25* bus_freq)/1000+2)<<8),
596 denali->flash_reg + ADDR_2_DATA); /* Tadl,Trr2 */
597 iowrite32((100 * bus_freq)/1000+2, denali->flash_reg + RE_2_WE); /* Trhw */
598 iowrite32((25 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_LO_CNT); /* Trp */
599 iowrite32((15 * bus_freq)/1000+2, denali->flash_reg + RDWR_EN_HI_CNT); /* Treh */
600 iowrite32((35 * bus_freq)/1000+2, denali->flash_reg + CS_SETUP_CNT); /* Tcs */
601 iowrite32((80 * bus_freq)/1000+2, denali->flash_reg + RE_2_RE); /* Trhz */
602 denali->nand.ecc.strength = ioread32(denali->flash_reg + ECC_CORRECTION); //BRUCE
603 denali->nand.ecc.size = 512;//BRUCE
604
605 break;
606 default:
607 dev_warn(denali->dev,
608 "Spectra: Unknown ESMT NAND (Device ID: 0x%x)."
609 "Will use default parameter values instead.\n",
610 device_id);
611 }
612}
613static uint32_t detect_nand_bus_freq(void)
614{
615 void __iomem *reg;
616 uint32_t clk_reg = 0;
617 reg = ioremap(0x01307050,4);
618
619 clk_reg = readl(reg);/*MOD_CLK_SEL[13:12]=00,7520v2 NAND 104MHz*/
620 clk_reg &= 0xffffcfff;
621 writel(clk_reg, reg);
622
623 if((((readl(reg))>>12) & 0x3) == 0)
624 return 104;
625 else
626 return 26;
627}
628/* determines how many NAND chips are connected to the controller. Note for
629 * Intel CE4100 devices we don't support more than one device.
630 */
631static void find_valid_banks(struct denali_nand_info *denali)
632{
633 uint32_t id[denali->max_banks];
634 int i;
635
636 denali->total_used_banks = 1;
637 for (i = 0; i < denali->max_banks; i++) {
638 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
639 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
640 index_addr_read_data(denali,
641 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
642
643 dev_dbg(denali->dev,
644 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
645
646 if (i == 0) {
647 if (!(id[i] & 0x0ff))
648 break; /* WTF? */
649 } else {
650 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
651 denali->total_used_banks++;
652 else
653 break;
654 }
655 }
656
657 if (denali->platform == INTEL_CE4100) {
658 /* Platform limitations of the CE4100 device limit
659 * users to a single chip solution for NAND.
660 * Multichip support is not enabled.
661 */
662 if (denali->total_used_banks != 1) {
663 dev_err(denali->dev,
664 "Sorry, Intel CE4100 only supports "
665 "a single NAND device.\n");
666 BUG();
667 }
668 }
669 dev_dbg(denali->dev,
670 "denali->total_used_banks: %d\n", denali->total_used_banks);
671}
672
673/*
674 * Use the configuration feature register to determine the maximum number of
675 * banks that the hardware supports.
676 */
677/*static void detect_max_banks(struct denali_nand_info *denali)
678{
679 uint32_t features = ioread32(denali->flash_reg + FEATURES);
680
681 denali->max_banks = 2 << (features & FEATURES__N_BANKS);
682}*/
683
684static void detect_partition_feature(struct denali_nand_info *denali)
685{
686 /* For MRST platform, denali->fwblks represent the
687 * number of blocks firmware is taken,
688 * FW is in protect partition and MTD driver has no
689 * permission to access it. So let driver know how many
690 * blocks it can't touch.
691 * */
692 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
693 if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
694 PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
695 denali->fwblks =
696 ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
697 MIN_MAX_BANK__MIN_VALUE) *
698 denali->blksperchip)
699 +
700 (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
701 MIN_BLK_ADDR__VALUE);
702 } else
703 denali->fwblks = SPECTRA_START_BLOCK;
704 } else
705 denali->fwblks = SPECTRA_START_BLOCK;
706}
707
708static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
709{
710 uint16_t status = PASS;
711 uint32_t id_bytes[5], addr;
712 uint8_t i, maf_id, device_id;
713
714 dev_dbg(denali->dev,
715 "%s, Line %d, Function: %s\n",
716 __FILE__, __LINE__, __func__);
717
718 /* Use read id method to get device ID and other
719 * params. For some NAND chips, controller can't
720 * report the correct device ID by reading from
721 * DEVICE_ID register
722 * */
723 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
724 index_addr(denali, (uint32_t)addr | 0, 0x90);
725 index_addr(denali, (uint32_t)addr | 1, 0);
726 for (i = 0; i < 5; i++)
727 index_addr_read_data(denali, addr | 2, &id_bytes[i]);
728 maf_id = id_bytes[0];
729 device_id = id_bytes[1];
730 dev_info(denali->dev,"maf = %x,dev = %x\n",maf_id,device_id);
731 if (maf_id == 0xEC) { /* Samsung NAND */
732 get_samsung_nand_para(denali, device_id);
733 } else if (maf_id == 0x98) { /* Toshiba NAND */
734 get_nanya_nand_para(denali, device_id);
735 } else if (maf_id == 0xAD) { /* Hynix NAND */
736 get_hynix_nand_para(denali, device_id);
737 } else if (maf_id == 0x2C) { /* Micron NAND */
738 get_micron_nand_para(denali, device_id);
739 } else if (maf_id == 0x01) { /* JSC NAND */
740 get_jsc_nand_para(denali, device_id);
741 } else if (maf_id == 0xC8) { /* ESMT NAND */
742 get_esmt_nand_para(denali, device_id);
743 } else if (maf_id == 0xEF) { /* W71NW20GF3FW NAND */
744 get_winbond_nand_para(denali, device_id);
745 }
746
747 dev_info(denali->dev,
748 "Dump timing register values:"
749 "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
750 "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
751 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
752 ioread32(denali->flash_reg + ACC_CLKS),
753 ioread32(denali->flash_reg + RE_2_WE),
754 ioread32(denali->flash_reg + RE_2_RE),
755 ioread32(denali->flash_reg + WE_2_RE),
756 ioread32(denali->flash_reg + ADDR_2_DATA),
757 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
758 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
759 ioread32(denali->flash_reg + CS_SETUP_CNT));
760
761 find_valid_banks(denali);
762
763 detect_partition_feature(denali);
764
765 /* If the user specified to override the default timings
766 * with a specific ONFI mode, we apply those changes here.
767 */
768 /*if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
769 nand_onfi_timing_set(denali, onfi_timing_mode);*/
770
771 return status;
772}
773
774
775 void denali_set_intr_modes(struct denali_nand_info *denali,
776 uint16_t INT_ENABLE)
777{
778 dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
779 __FILE__, __LINE__, __func__);
780
781 if (INT_ENABLE)
782 iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
783 else
784 iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
785}
786
787/* validation function to verify that the controlling software is making
788 * a valid request
789 */
790static inline bool is_flash_bank_valid(int flash_bank)
791{
792 return (flash_bank >= 0 && flash_bank < 4);
793}
794
795static void denali_irq_init(struct denali_nand_info *denali)
796{
797 uint32_t int_mask = 0;
798 int i;
799
800 /* Disable global interrupts */
801 denali_set_intr_modes(denali, false);
802
803 int_mask = DENALI_IRQ_ALL;
804
805 /* Clear all status bits */
806 for (i = 0; i < denali->max_banks; ++i)
807 iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
808
809 denali_irq_enable(denali, int_mask);
810}
811
812static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
813{
814 denali_set_intr_modes(denali, false);
815 free_irq(irqnum, denali);
816}
817
818static void denali_irq_enable(struct denali_nand_info *denali,
819 uint32_t int_mask)
820{
821 int i;
822
823 for (i = 0; i < denali->max_banks; ++i)
824 iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
825}
826
827/* This function only returns when an interrupt that this driver cares about
828 * occurs. This is to reduce the overhead of servicing interrupts
829 */
830static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
831{
832 return read_interrupt_status(denali) & DENALI_IRQ_ALL;
833}
834
835/* Interrupts are cleared by writing a 1 to the appropriate status bit */
836static inline void clear_interrupt(struct denali_nand_info *denali,
837 uint32_t irq_mask)
838{
839 uint32_t intr_status_reg = 0;
840
841 intr_status_reg = INTR_STATUS(denali->flash_bank);
842
843 iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
844}
845
846static void clear_interrupts(struct denali_nand_info *denali)
847{
848 uint32_t status = 0x0;
849 spin_lock_irq(&denali->irq_lock);
850
851 status = read_interrupt_status(denali);
852 clear_interrupt(denali, status);
853
854 denali->irq_status = 0x0;
855 spin_unlock_irq(&denali->irq_lock);
856}
857
858static uint32_t read_interrupt_status(struct denali_nand_info *denali)
859{
860 uint32_t intr_status_reg = 0;
861
862 intr_status_reg = INTR_STATUS(denali->flash_bank);
863
864 return ioread32(denali->flash_reg + intr_status_reg);
865}
866
867/* This is the interrupt service routine. It handles all interrupts
868 * sent to this device. Note that on CE4100, this is a shared
869 * interrupt.
870 */
871static irqreturn_t denali_isr(int irq, void *dev_id)
872{
873 struct denali_nand_info *denali = dev_id;
874 uint32_t irq_status = 0x0;
875 irqreturn_t result = IRQ_NONE;
876
877 spin_lock(&denali->irq_lock);
878
879 /* check to see if a valid NAND chip has
880 * been selected.
881 */
882 if (is_flash_bank_valid(denali->flash_bank)) {
883 /* check to see if controller generated
884 * the interrupt, since this is a shared interrupt */
885 irq_status = denali_irq_detected(denali);
886 if (irq_status != 0) {
887 /* handle interrupt */
888 /* first acknowledge it */
889 clear_interrupt(denali, irq_status);
890 /* store the status in the device context for someone
891 to read */
892 denali->irq_status |= irq_status;
893 /* notify anyone who cares that it happened */
894 complete(&denali->complete);
895 /* tell the OS that we've handled this */
896 result = IRQ_HANDLED;
897 }
898 }
899 spin_unlock(&denali->irq_lock);
900 return result;
901}
902#define BANK(x) ((x) << 24)
903
904static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
905{
906 unsigned long comp_res = 0;
907 uint32_t intr_status = 0;
908 bool retry = false;
909 unsigned long timeout = msecs_to_jiffies(1000);
910
911 do {
912 comp_res =
913 wait_for_completion_timeout(&denali->complete, timeout);
914 spin_lock_irq(&denali->irq_lock);
915 intr_status = denali->irq_status;
916
917 if (intr_status & irq_mask) {
918 denali->irq_status &= ~irq_mask;
919 spin_unlock_irq(&denali->irq_lock);
920 /* our interrupt was detected */
921 break;
922 } else {
923 /* these are not the interrupts you are looking for -
924 * need to wait again */
925 spin_unlock_irq(&denali->irq_lock);
926 retry = true;
927 }
928 } while (comp_res != 0);
929
930 if (comp_res == 0) {
931 /* timeout */
932 printk(KERN_ERR "denali timeout occurred, status = 0x%x, mask = 0x%x\n",
933 intr_status, irq_mask);
934
935 intr_status = 0;
936 }
937
938 return intr_status;
939}
940
941/* This helper function setups the registers for ECC and whether or not
942 * the spare area will be transferred. */
943static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
944 bool transfer_spare)
945{
946 int ecc_en_flag = 0, transfer_spare_flag = 0;
947
948 /* set ECC, transfer spare bits if needed */
949 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
950 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
951
952 /* Enable spare area/ECC per user's request. */
953 iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
954 iowrite32(transfer_spare_flag,
955 denali->flash_reg + TRANSFER_SPARE_REG);
956}
957
958/* sends a pipeline command operation to the controller. See the Denali NAND
959 * controller's user guide for more information (section 4.2.3.6).
960 */
961static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
962 bool ecc_en,
963 bool transfer_spare,
964 int access_type,
965 int op)
966{
967 int status = PASS;
968 uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
969 irq_mask = 0;
970
971 if (op == DENALI_READ)
972 irq_mask = INTR_STATUS__LOAD_COMP;
973 else if (op == DENALI_WRITE)
974 irq_mask = 0;
975 else
976 BUG();
977
978 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
979
980 /* clear interrupts */
981 clear_interrupts(denali);
982
983 addr = BANK(denali->flash_bank) | denali->page;
984
985 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
986 cmd = MODE_01 | addr;
987 iowrite32(cmd, denali->flash_mem);
988 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
989 /* read spare area */
990 cmd = MODE_10 | addr;
991 index_addr(denali, (uint32_t)cmd, access_type);
992
993 cmd = MODE_01 | addr;
994 iowrite32(cmd, denali->flash_mem);
995 } else if (op == DENALI_READ) {
996 /* setup page read request for access type */
997 cmd = MODE_10 | addr;
998 index_addr(denali, (uint32_t)cmd, access_type);
999
1000 /* page 33 of the NAND controller spec indicates we should not
1001 use the pipeline commands in Spare area only mode. So we
1002 don't.
1003 */
1004 if (access_type == SPARE_ACCESS)
1005 {
1006 cmd = MODE_01 | addr;
1007 iowrite32(cmd, denali->flash_mem);
1008 } else {
1009 index_addr(denali, (uint32_t)cmd,
1010 0x2000 | op | page_count);
1011
1012 /* wait for command to be accepted
1013 * can always use status0 bit as the
1014 * mask is identical for each
1015 * bank. */
1016 irq_status = wait_for_irq(denali, irq_mask);
1017
1018 if (irq_status == 0) {
1019 dev_err(denali->dev,
1020 "cmd, page, addr on timeout "
1021 "(0x%x, 0x%x, 0x%x)\n",
1022 cmd, denali->page, addr);
1023 status = FAIL;
1024 } else {
1025 cmd = MODE_01 | addr;
1026 iowrite32(cmd, denali->flash_mem);
1027 }
1028 }
1029 }
1030 return status;
1031}
1032
1033/* helper function that simply writes a buffer to the flash */
1034static int write_data_to_flash_mem(struct denali_nand_info *denali,
1035 const uint8_t *buf,
1036 int len)
1037{
1038 uint32_t i = 0, *buf32;
1039
1040 /* verify that the len is a multiple of 4. see comment in
1041 * read_data_from_flash_mem() */
1042 BUG_ON((len % 4) != 0);
1043
1044 /* write the data to the flash memory */
1045 buf32 = (uint32_t *)buf;
1046 for (i = 0; i < len / 4; i++)
1047 iowrite32(*buf32++, denali->flash_mem + 0x10);
1048 return i*4; /* intent is to return the number of bytes read */
1049}
1050
1051/* helper function that simply reads a buffer from the flash */
1052static int read_data_from_flash_mem(struct denali_nand_info *denali,
1053 uint8_t *buf,
1054 int len)
1055{
1056 uint32_t i = 0, *buf32;
1057
1058 /* we assume that len will be a multiple of 4, if not
1059 * it would be nice to know about it ASAP rather than
1060 * have random failures...
1061 * This assumption is based on the fact that this
1062 * function is designed to be used to read flash pages,
1063 * which are typically multiples of 4...
1064 */
1065
1066 BUG_ON((len % 4) != 0);
1067
1068 /* transfer the data from the flash */
1069 buf32 = (uint32_t *)buf;
1070 for (i = 0; i < len / 4; i++)
1071 {
1072 *buf32++ = ioread32(denali->flash_mem + 0x10);
1073
1074 }
1075 return i*4; /* intent is to return the number of bytes read */
1076}
1077
1078/* writes OOB data to the device */
1079static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1080{
1081 struct denali_nand_info *denali = mtd_to_denali(mtd);
1082 uint32_t irq_status = 0;
1083 uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
1084 INTR_STATUS__PROGRAM_FAIL;
1085 int status = 0, addr = 0x0, cmd = 0x0;
1086 denali->page = page;
1087 //printk("denali_write_oob %d page\n",denali->page);
1088 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
1089 DENALI_WRITE) == PASS) {
1090 write_data_to_flash_mem(denali, buf, mtd->oobsize);
1091
1092 /* wait for operation to complete */
1093 irq_status = wait_for_irq(denali, irq_mask);
1094
1095 if (irq_status == 0) {
1096 dev_err(denali->dev, "OOB write failed\n");
1097 status = -EIO;
1098 }
1099 } else {
1100 dev_err(denali->dev, "unable to send pipeline command\n");
1101 status = -EIO;
1102 }
1103
1104 /* We set the device back to MAIN_ACCESS here as I observed
1105 * instability with the controller if you do a block erase
1106 * and the last transaction was a SPARE_ACCESS. Block erase
1107 * is reliable (according to the MTD test infrastructure)
1108 * if you are in MAIN_ACCESS.
1109 */
1110 addr = BANK(denali->flash_bank) | denali->page;
1111 cmd = MODE_10 | addr;
1112 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1113
1114 return status;
1115}
1116
1117/* reads OOB data from the device */
1118static int read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1119{
1120 struct denali_nand_info *denali = mtd_to_denali(mtd);
1121 uint32_t irq_mask = INTR_STATUS__LOAD_COMP|INTR_STATUS__TIME_OUT,
1122 irq_status = 0, addr = 0x0, cmd = 0x0;
1123 int ret = 0;
1124
1125 denali->page = page;
1126// dev_err(denali->dev, "OOB read %d page\n",page);
1127 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
1128 DENALI_READ) == PASS) {
1129 read_data_from_flash_mem(denali, buf, mtd->oobsize);
1130// dev_err(denali->dev, "OOB read OK\n");
1131 /* wait for command to be accepted
1132 * can always use status0 bit as the mask is identical for each
1133 * bank. */
1134 irq_status = wait_for_irq(denali, irq_mask);
1135
1136 if (irq_status == 0) {
1137 ret = -ETIME;
1138 dev_err(denali->dev, "page on OOB timeout %d\n",
1139 denali->page);
1140 }
1141
1142 /* We set the device back to MAIN_ACCESS here as I observed
1143 * instability with the controller if you do a block erase
1144 * and the last transaction was a SPARE_ACCESS. Block erase
1145 * is reliable (according to the MTD test infrastructure)
1146 * if you are in MAIN_ACCESS.
1147 */
1148 addr = BANK(denali->flash_bank) | denali->page;
1149 cmd = MODE_10 | addr;
1150 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1151 }
1152
1153 return ret;
1154}
1155
1156/* this function examines buffers to see if they contain data that
1157 * indicate that the buffer is part of an erased region of flash.
1158 */
1159bool is_erased(uint8_t *buf, int len)
1160{
1161 int i = 0;
1162 for (i = 0; i < len; i++)
1163 if (buf[i] != 0xFF)
1164 return false;
1165 return true;
1166}
1167#define ECC_SECTOR_SIZE 512
1168
1169#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1170#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1171#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1172#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
1173#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
1174#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1175
1176static int handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
1177 uint32_t irq_status)
1178{
1179 int check_erased_page = 0;
1180 uint32_t err_correction_value = 0;
1181 uint32_t err_correction_info = 0;
1182
1183
1184 /*zx297510 use*/
1185 if (irq_status & INTR_STATUS__ECC_ERR)
1186 {
1187 check_erased_page = 1;
1188
1189 }
1190 else
1191 {
1192
1193 switch(denali->flash_bank)
1194 {
1195 case 0:
1196 err_correction_info = ioread32(denali->flash_reg +ERR_CORRECTION_INFO_B01);
1197 err_correction_value = err_correction_info & ERR_CORRECTION_INFO_B01__MAX_ERRORS_B0;
1198 break;
1199
1200 case 1:
1201 err_correction_info = ioread32(denali->flash_reg +ERR_CORRECTION_INFO_B01);
1202 err_correction_value = (err_correction_info & ERR_CORRECTION_INFO_B01__MAX_ERRORS_B1)>>8;
1203 break;
1204
1205 case 2:
1206 err_correction_info = ioread32(denali->flash_reg +ERR_CORRECTION_INFO_B23);
1207 err_correction_value = err_correction_info & ERR_CORRECTION_INFO_B01__MAX_ERRORS_B2;
1208 break;
1209
1210 case 3:
1211 err_correction_info = ioread32(denali->flash_reg +ERR_CORRECTION_INFO_B23);
1212 err_correction_value = (err_correction_info & ERR_CORRECTION_INFO_B01__MAX_ERRORS_B3)>>8;
1213 break;
1214
1215 default:
1216 break;
1217
1218 }
1219 //if(err_correction_value)
1220 //printk("correct %d bit errors on page %x.\n",err_correction_value,denali->page);
1221
1222
1223 }
1224 return check_erased_page;
1225
1226}
1227
1228/* programs the controller to either enable/disable DMA transfers */
1229static void denali_enable_dma(struct denali_nand_info *denali, bool en)
1230{
1231 uint32_t reg_val = 0x0;
1232
1233 if (en)
1234 reg_val = DMA_ENABLE__FLAG;
1235
1236 iowrite32(reg_val, denali->flash_reg + DMA_ENABLE);
1237 ioread32(denali->flash_reg + DMA_ENABLE);
1238}
1239
1240/* setups the HW to perform the data DMA */
1241static void denali_setup_dma(struct denali_nand_info *denali, int op)
1242{
1243 uint32_t mode = 0x0;
1244 const int page_count = 1;
1245 dma_addr_t addr = denali->buf.dma_buf;
1246
1247 mode = MODE_10 | BANK(denali->flash_bank);
1248
1249 /* DMA is a four step process */
1250
1251 /* 1. setup transfer type and # of pages */
1252 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1253
1254 /* 2. set memory high address bits 23:8 */
1255 index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
1256
1257 /* 3. set memory low address bits 23:8 */
1258 index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
1259
1260 /* 4. interrupt when complete, burst len = 64 bytes*/
1261 index_addr(denali, mode | 0x14000, 0x2400);
1262}
1263
1264/* writes a page. user specifies type, and this function handles the
1265 * configuration details. */
1266static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1267 const uint8_t *buf, bool raw_xfer)
1268{
1269 struct denali_nand_info *denali = mtd_to_denali(mtd);
1270
1271 dma_addr_t addr = denali->buf.dma_buf;
1272 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1273
1274 uint32_t irq_status = 0;
1275 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1276 INTR_STATUS__PROGRAM_FAIL;
1277
1278 /* if it is a raw xfer, we want to disable ecc, and send
1279 * the spare area.
1280 * !raw_xfer - enable ecc
1281 * raw_xfer - transfer spare
1282 */
1283 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1284
1285 /* copy buffer into DMA buffer */
1286 memcpy(denali->buf.buf, buf, mtd->writesize);
1287
1288 if (raw_xfer) {
1289 /* transfer the data to the spare area */
1290 memcpy(denali->buf.buf + mtd->writesize,
1291 chip->oob_poi,
1292 mtd->oobsize);
1293 }
1294
1295 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1296
1297 clear_interrupts(denali);
1298 denali_enable_dma(denali, true);
1299
1300 denali_setup_dma(denali, DENALI_WRITE);
1301
1302 /* wait for operation to complete */
1303 irq_status = wait_for_irq(denali, irq_mask);
1304
1305 if (irq_status == 0) {
1306 dev_err(denali->dev,
1307 "timeout on write_page (type = %d)\n",
1308 raw_xfer);
1309 denali->status =
1310 (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
1311 NAND_STATUS_FAIL : PASS;
1312 }
1313
1314 denali_enable_dma(denali, false);
1315 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1316}
1317
1318//add by zhouqi
1319static void write_page_ops(struct mtd_info *mtd, struct nand_chip *chip,
1320 const uint8_t *buf)
1321{
1322 int N,len,sector_size,ecc_bytes,i;
1323 struct denali_nand_info *denali = mtd_to_denali(mtd);
1324 dma_addr_t addr = denali->buf.dma_buf;
1325 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1326 uint32_t irq_status = 0;
1327 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1328 INTR_STATUS__PROGRAM_FAIL;
1329
1330 /* if it is a raw xfer, we want to disable ecc, and send
1331 * the spare area.
1332 * !raw_xfer - enable ecc
1333 * raw_xfer - transfer spare
1334 */
1335 setup_ecc_for_xfer(denali, true, true);
1336
1337 /* copy buffer into DMA buffer */
1338
1339 memset((void *)(denali->buf.buf),0xff,denali->mtd.writesize+denali->mtd.oobsize);
1340
1341 sector_size = denali->nand.ecc.size;
1342 ecc_bytes = denali->nand.ecc.bytes;
1343 N = denali->mtd.writesize/(sector_size+ecc_bytes) + 1;
1344 len = sector_size;
1345
1346 for(i=0;i < N;i++)
1347 {
1348 if(i==N-1)
1349 {
1350 len = denali->mtd.writesize - (sector_size+ecc_bytes)*i;
1351 }
1352
1353 memcpy((void *)(denali->buf.buf+(sector_size+ecc_bytes)*i), (void *)(buf+sector_size*i), len); ;
1354
1355 }
1356
1357
1358 len = sector_size - len;
1359
1360 memcpy((void *)(denali->buf.buf + denali->mtd.writesize+2), (void *)(buf + sector_size*i -len), len);
1361
1362 memcpy((void *)(denali->buf.buf+denali->mtd.writesize+2+len+ecc_bytes), (void *)(chip->oob_poi + 2+len+ecc_bytes), \
1363 denali->mtd.oobsize-2-len-ecc_bytes);
1364
1365 //dma_sync_single_for_device(denali->dev, addr, denali->mtd.writesize, DMA_TO_DEVICE);
1366 dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1367 clear_interrupts(denali);
1368 denali_enable_dma(denali, true);
1369
1370 denali_setup_dma(denali, DENALI_WRITE);
1371
1372 /* wait for operation to complete */
1373 irq_status = wait_for_irq(denali, irq_mask);
1374
1375 if (irq_status == 0) {
1376 dev_err(denali->dev,
1377 "timeout on write_page (type = %d)\n",
1378 1);
1379 denali->status =
1380 (irq_status & INTR_STATUS__PROGRAM_FAIL) ?
1381 NAND_STATUS_FAIL : PASS;
1382 }
1383
1384 denali_enable_dma(denali, false);
1385 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1386 setup_ecc_for_xfer(denali,false, false); //zhouqi
1387}
1388/* NAND core entry points */
1389
1390/* this is the callback that the NAND core calls to write a page. Since
1391 * writing a page with ECC or without is similar, all the work is done
1392 * by write_page above.
1393 * */
1394static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1395 const uint8_t *buf, struct mtd_oob_ops *ops)
1396{
1397 /* for regular page writes, we let HW handle all the ECC
1398 * data written to the device. */
1399 //write_page(mtd, chip, buf, false);
1400 struct denali_nand_info *denali = mtd_to_denali(mtd);
1401
1402 if(denali->page < 512) /*zloader,no ecc*/
1403 {
1404 int ecc_bits = readl(denali->flash_reg + ECC_CORRECTION);
1405 writel(0x8, denali->flash_reg + ECC_CORRECTION);
1406 write_page(mtd, chip, buf, false);
1407 writel(ecc_bits, denali->flash_reg + ECC_CORRECTION);
1408 }
1409 else
1410 {
1411 if((ops->oobbuf != NULL) && (ops->ooblen != 0))
1412 {
1413 write_page_ops(mtd, chip, buf);
1414 }
1415 else
1416 {
1417 write_page(mtd, chip, buf, false);
1418 }
1419 }
1420}
1421
1422/* This is the callback that the NAND core calls to write a page without ECC.
1423 * raw access is similar to ECC page writes, so all the work is done in the
1424 * write_page() function above.
1425 */
1426static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1427 const uint8_t *buf)
1428{
1429 /* for raw page writes, we want to disable ECC and simply write
1430 whatever data is in the buffer. */
1431 write_page(mtd, chip, buf, true);
1432}
1433
1434static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1435 int page)
1436{
1437 return write_oob_data(mtd, chip->oob_poi, page);
1438}
1439
1440static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1441 int page, int sndcmd)
1442{
1443 return read_oob_data(mtd, chip->oob_poi, page); /* notify NAND core to send command to
1444 NAND device. */
1445}
1446
1447//add by zhouqi
1448static int read_page_ops(struct mtd_info *mtd, struct nand_chip *chip,
1449 uint8_t *buf, int page)
1450{
1451 int N,len,sector_size,ecc_bytes,i;
1452 struct denali_nand_info *denali = mtd_to_denali(mtd);
1453 dma_addr_t addr = denali->buf.dma_buf;
1454 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1455
1456 uint32_t irq_status = 0;
1457// uint32_t status_mask = INTR_STATUS__ECC_ERR |
1458// INTR_STATUS__ECC_ERR;
1459 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1460 uint32_t check_erased_page = false;
1461 int ret = 0;
1462
1463 if (page != denali->page) {
1464 dev_err(denali->dev, "IN %s: page %d is not"
1465 " equal to denali->page %d, investigate!!",
1466 __func__, page, denali->page);
1467 BUG();
1468 }
1469
1470
1471 setup_ecc_for_xfer(denali, true, true);
1472 denali_enable_dma(denali, true);
1473 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1474
1475 clear_interrupts(denali);
1476 denali_setup_dma(denali, DENALI_READ);
1477
1478 /* wait for operation to complete */
1479 irq_status = wait_for_irq(denali, irq_mask);
1480 if (!irq_status) {
1481 ret = -ETIME;
1482 }
1483 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1484 sector_size = denali->nand.ecc.size;
1485 ecc_bytes = denali->nand.ecc.bytes;
1486 N = denali->mtd.writesize/(sector_size+ecc_bytes) + 1;
1487 len = sector_size;
1488
1489 for(i=0;i < N;i++)
1490 {
1491 if(i==N-1)
1492 {
1493 len = denali->mtd.writesize - (sector_size+ecc_bytes)*i;
1494 }
1495
1496 memcpy((void *)(buf+sector_size*i),(void *)( denali->buf.buf + (sector_size+ecc_bytes)*i),len);
1497
1498 }
1499
1500
1501 len = sector_size - len;
1502 memcpy((void *)(buf + sector_size*(N-1)+len), (void *)(denali->buf.buf + denali->mtd.writesize +2), len);
1503 memset((void *)(chip->oob_poi), 0xFF, len +ecc_bytes+2);
1504 memcpy((void *)(chip->oob_poi + len+ecc_bytes+2), (void *)(denali->buf.buf + denali->mtd.writesize+len+ecc_bytes+2),\
1505 denali->mtd.oobsize-len -ecc_bytes-2);
1506 check_erased_page = handle_ecc(denali, buf, irq_status);
1507 denali_enable_dma(denali, false);
1508 setup_ecc_for_xfer(denali,false, false);
1509
1510 if (check_erased_page) {
1511 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1512
1513 /* check ECC failures that may have occurred on erased pages */
1514 if (check_erased_page) {
1515 if (!is_erased(buf, denali->mtd.writesize))
1516 {
1517 denali->mtd.ecc_stats.failed++;
1518 }
1519 if (!is_erased(chip->oob_poi, denali->mtd.oobsize))
1520 {
1521 //denali->mtd.ecc_stats.failed++;
1522 }
1523 }
1524 }
1525 return ret;
1526}
1527static int read_page(struct mtd_info *mtd, struct nand_chip *chip,
1528 uint8_t *buf, int page)
1529{
1530 struct denali_nand_info *denali = mtd_to_denali(mtd);
1531
1532 dma_addr_t addr = denali->buf.dma_buf;
1533 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1534 uint32_t irq_status = 0;
1535 //uint32_t irq_mask = INTR_STATUS__ECC_ERR|INTR_STATUS__DMA_CMD_COMP;
1536 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1537 bool check_erased_page = false;
1538 int ret = 0;
1539
1540 if (page != denali->page) {
1541 dev_err(denali->dev, "IN %s: page %d is not"
1542 " equal to denali->page %d, investigate!!",
1543 __func__, page, denali->page);
1544 BUG();
1545 }
1546
1547 setup_ecc_for_xfer(denali,true, false);
1548 denali_enable_dma(denali, true);
1549 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1550
1551 clear_interrupts(denali);
1552 denali_setup_dma(denali, DENALI_READ);
1553
1554 /* wait for operation to complete */
1555 irq_status = wait_for_irq(denali, irq_mask);
1556 if (!irq_status) {
1557 ret = -ETIME;
1558 }
1559
1560 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1561 memcpy(buf, denali->buf.buf, mtd->writesize);
1562 check_erased_page = handle_ecc(denali, buf, irq_status);
1563 denali_enable_dma(denali, false);
1564 setup_ecc_for_xfer(denali,false, false);
1565
1566 if (check_erased_page) {
1567 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1568
1569 /* check ECC failures that may have occurred on erased pages */
1570 if (check_erased_page) {
1571 if (!is_erased(buf, denali->mtd.writesize))
1572 {
1573 denali->mtd.ecc_stats.failed++;
1574 }
1575 if (!is_erased(chip->oob_poi, denali->mtd.oobsize))
1576 {
1577 //denali->mtd.ecc_stats.failed++;
1578 }
1579 }
1580 }
1581 return ret;
1582}
1583
1584 int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1585 uint8_t *buf, int page)
1586{
1587 struct denali_nand_info *denali = mtd_to_denali(mtd);
1588 dma_addr_t addr = denali->buf.dma_buf;
1589 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1590
1591 uint32_t irq_status = 0;
1592 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1593
1594 if (page != denali->page) {
1595 dev_err(denali->dev, "IN %s: page %d is not"
1596 " equal to denali->page %d, investigate!!",
1597 __func__, page, denali->page);
1598 BUG();
1599 }
1600
1601 setup_ecc_for_xfer(denali, false, true);
1602 denali_enable_dma(denali, true);
1603
1604 dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1605
1606 clear_interrupts(denali);
1607 denali_setup_dma(denali, DENALI_READ);
1608
1609 /* wait for operation to complete */
1610 irq_status = wait_for_irq(denali, irq_mask);
1611
1612 dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1613
1614 denali_enable_dma(denali, false);
1615
1616 memcpy(buf, denali->buf.buf, mtd->writesize);
1617 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1618
1619 return 0;
1620}
1621 //add by zhouqi
1622 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1623 uint8_t *buf, int page, struct mtd_oob_ops *ops)//add by zhouqi
1624 {
1625
1626 struct denali_nand_info *denali = mtd_to_denali(mtd);
1627 if(page < 512) /*zloader,no ECC*/
1628 {
1629 int ecc_bits = readl(denali->flash_reg + ECC_CORRECTION);
1630 writel(0x8, denali->flash_reg + ECC_CORRECTION);
1631 read_page(mtd, chip, buf, page);
1632 writel(ecc_bits, denali->flash_reg + ECC_CORRECTION);
1633
1634 }
1635 else
1636 {
1637 if((ops->oobbuf != NULL) && ops->ooblen != 0)
1638 {
1639
1640 read_page_ops(mtd, chip, buf, page);
1641 }
1642 else
1643 {
1644
1645 read_page(mtd, chip, buf, page);
1646 }
1647 }
1648 return 0;
1649 }
1650
1651static uint8_t denali_read_byte(struct mtd_info *mtd)
1652{
1653 struct denali_nand_info *denali = mtd_to_denali(mtd);
1654 uint8_t result = 0xff;
1655
1656 if (denali->buf.head < denali->buf.tail)
1657 result = denali->buf.buf[denali->buf.head++];
1658
1659 return result;
1660}
1661
1662static void denali_select_chip(struct mtd_info *mtd, int chip)
1663{
1664 struct denali_nand_info *denali = mtd_to_denali(mtd);
1665
1666 spin_lock_irq(&denali->irq_lock);
1667 denali->flash_bank = chip;
1668 spin_unlock_irq(&denali->irq_lock);
1669
1670}
1671
1672static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1673{
1674 struct denali_nand_info *denali = mtd_to_denali(mtd);
1675
1676 int status = denali->status;
1677 denali->status = 0;
1678
1679 return status;
1680}
1681
1682static void denali_erase(struct mtd_info *mtd, int page)
1683{
1684 struct denali_nand_info *denali = mtd_to_denali(mtd);
1685
1686 uint32_t cmd = 0x0, irq_status = 0;
1687
1688 /* clear interrupts */
1689 clear_interrupts(denali);
1690
1691 /* setup page read request for access type */
1692 cmd = MODE_10 | BANK(denali->flash_bank) | page;
1693 index_addr(denali, (uint32_t)cmd, 0x1);
1694
1695 /* wait for erase to complete or failure to occur */
1696 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1697 INTR_STATUS__ERASE_FAIL);
1698
1699 denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
1700 NAND_STATUS_FAIL : PASS;
1701
1702
1703}
1704
1705static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1706 int page)
1707{
1708 struct denali_nand_info *denali = mtd_to_denali(mtd);
1709 uint32_t addr, id;
1710 int i;
1711
1712 switch (cmd) {
1713 case NAND_CMD_PAGEPROG:
1714 break;
1715 case NAND_CMD_STATUS:
1716 read_status(denali);
1717 break;
1718 case NAND_CMD_READID:
1719 case NAND_CMD_PARAM:
1720 reset_buf(denali);
1721 /*sometimes ManufactureId read from register is not right
1722 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1723 * So here we send READID cmd to NAND insteand
1724 * */
1725 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
1726 index_addr(denali, (uint32_t)addr | 0, 0x90);
1727 index_addr(denali, (uint32_t)addr | 1, 0);
1728 for (i = 0; i < 5; i++) {
1729 index_addr_read_data(denali,
1730 (uint32_t)addr | 2,
1731 &id);
1732 write_byte_to_buf(denali, id);
1733 }
1734 break;
1735 case NAND_CMD_READ0:
1736 case NAND_CMD_SEQIN:
1737 denali->page = page;
1738 break;
1739 case NAND_CMD_RESET:
1740 reset_bank(denali);
1741 break;
1742 case NAND_CMD_READOOB:
1743 /* TODO: Read OOB data */
1744 break;
1745 default:
1746 printk(KERN_ERR ": unsupported command"
1747 " received 0x%x\n", cmd);
1748 break;
1749 }
1750
1751}
1752
1753/* stubs for ECC functions not used by the NAND core */
1754static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1755 uint8_t *ecc_code)
1756{
1757 struct denali_nand_info *denali = mtd_to_denali(mtd);
1758
1759 dev_err(denali->dev,
1760 "denali_ecc_calculate called unexpectedly\n");
1761 BUG();
1762
1763 return -EIO;
1764}
1765
1766static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1767 uint8_t *read_ecc, uint8_t *calc_ecc)
1768{
1769 struct denali_nand_info *denali = mtd_to_denali(mtd);
1770
1771 dev_err(denali->dev,
1772 "denali_ecc_correct called unexpectedly\n");
1773 BUG();
1774
1775 return -EIO;
1776}
1777
1778static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1779{
1780 struct denali_nand_info *denali = mtd_to_denali(mtd);
1781
1782 dev_err(denali->dev,
1783 "denali_ecc_hwctl called unexpectedly\n");
1784 BUG();
1785
1786}
1787/* end NAND core entry points */
1788void denali_gpio_init(void)
1789{
1790 int ret;
1791
1792 ret = gpio_request(ZX29_GPIO_0, "nand_we");
1793 if (ret)
1794 {
1795 pr_info("GPIO0_NAND_WE gpio request error.\n");
1796 return ;
1797 }
1798 zx29_gpio_config(ZX29_GPIO_0, GPIO0_NAND_WE);
1799
1800 ret = gpio_request(ZX29_GPIO_1, "nand_csn");
1801 if (ret)
1802 {
1803 pr_info("GPIO1_NAND_CS0 gpio request error.\n");
1804 return ;
1805 }
1806 zx29_gpio_config(ZX29_GPIO_1, GPIO1_NAND_CS0);
1807
1808 ret = gpio_request(ZX29_GPIO_2, "nand_ready");
1809 if (ret)
1810 {
1811 pr_info("GPIO2_NAND_READY gpio request error.\n");
1812 return ;
1813 }
1814 zx29_gpio_config(ZX29_GPIO_2, GPIO2_NAND_READY);
1815
1816 ret = gpio_request(ZX29_GPIO_3, "nand_cle");
1817 if (ret)
1818 {
1819 pr_info("GPIO3_NAND_CLE gpio request error.\n");
1820 return ;
1821 }
1822 zx29_gpio_config(ZX29_GPIO_3, GPIO3_NAND_CLE);
1823
1824 ret = gpio_request(ZX29_GPIO_4, "nand_ale");
1825 if (ret)
1826 {
1827 pr_info("GPIO4_NAND_ALE gpio request error.\n");
1828 return ;
1829 }
1830 zx29_gpio_config(ZX29_GPIO_4, GPIO4_NAND_ALE);
1831
1832 ret = gpio_request(ZX29_GPIO_5, "nand_re");
1833 if (ret)
1834 {
1835 pr_info("GPIO5_NAND_RE gpio request error.\n");
1836 return ;
1837 }
1838 zx29_gpio_config(ZX29_GPIO_5, GPIO5_NAND_RE);
1839
1840 ret = gpio_request(ZX29_GPIO_6, "nand_wp");
1841 if (ret)
1842 {
1843 pr_info("GPIO6_NAND_WRITE_PROTECT gpio request error.\n");
1844 return ;
1845 }
1846 zx29_gpio_config(ZX29_GPIO_6, GPIO6_NAND_WRITE_PROTECT);
1847
1848 ret = gpio_request(ZX29_GPIO_7, "nand_data_0");
1849 if (ret)
1850 {
1851 pr_info("GPIO7_NAND_DATA0 gpio request error.\n");
1852 return ;
1853 }
1854 zx29_gpio_config(ZX29_GPIO_7, GPIO7_NAND_DATA0);
1855
1856 ret = gpio_request(ZX29_GPIO_8, "nand_data_1");
1857 if (ret)
1858 {
1859 pr_info("GPIO8_NAND_DATA1 gpio request error.\n");
1860 return ;
1861 }
1862 zx29_gpio_config(ZX29_GPIO_8, GPIO8_NAND_DATA1);
1863
1864 ret = gpio_request(ZX29_GPIO_9, "nand_data_2");
1865 if (ret)
1866 {
1867 pr_info("GPIO9_NAND_DATA2 gpio request error.\n");
1868 return ;
1869 }
1870 zx29_gpio_config(ZX29_GPIO_9, GPIO9_NAND_DATA2);
1871
1872 ret = gpio_request(ZX29_GPIO_10, "nand_data_3");
1873 if (ret)
1874 {
1875 pr_info("GPIO10_NAND_DATA3 gpio request error.\n");
1876 return ;
1877 }
1878 zx29_gpio_config(ZX29_GPIO_10, GPIO10_NAND_DATA3);
1879
1880 ret = gpio_request(ZX29_GPIO_11, "nand_data_4");
1881 if (ret)
1882 {
1883 pr_info("GPIO11_NAND_DATA4 gpio request error.\n");
1884 return ;
1885 }
1886 zx29_gpio_config(ZX29_GPIO_11, GPIO11_NAND_DATA4);
1887
1888 ret = gpio_request(ZX29_GPIO_12, "nand_data_5");
1889 if (ret)
1890 {
1891 pr_info("GPIO12_NAND_DATA5 gpio request error.\n");
1892 return ;
1893 }
1894 zx29_gpio_config(ZX29_GPIO_12, GPIO12_NAND_DATA5);
1895
1896 ret = gpio_request(ZX29_GPIO_13, "nand_data_6");
1897 if (ret)
1898 {
1899 pr_info("GPIO13_NAND_DATA6 gpio request error.\n");
1900 return ;
1901 }
1902 zx29_gpio_config(ZX29_GPIO_13, GPIO13_NAND_DATA6);
1903
1904 ret = gpio_request(ZX29_GPIO_14, "nand_data_7");
1905 if (ret)
1906 {
1907 pr_info("GPIO14_NAND_DATA7 gpio request error.\n");
1908 return ;
1909 }
1910 zx29_gpio_config(ZX29_GPIO_14, GPIO14_NAND_DATA7);
1911 return ;
1912}
1913/* Initialization code to bring the device up to a known good state */
1914static void denali_hw_init(struct denali_nand_info *denali)
1915{
1916 /* tell driver how many bit controller will skip before
1917 * writing ECC code in OOB, this register may be already
1918 * set by firmware. So we read this value out.
1919 * if this value is 0, just let it be.
1920 * */
1921
1922 //detect_max_banks(denali);zangxiaofeng mod
1923 denali_gpio_init();
1924 denali->max_banks = 1;
1925 denali_nand_reset(denali);
1926 iowrite32(2, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
1927 denali->bbtskipbytes = ioread32(denali->flash_reg +
1928 SPARE_AREA_SKIP_BYTES);
1929 iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1930 iowrite32(0,denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1931 iowrite32(1,denali->flash_reg + DEVICES_CONNECTED);
1932
1933 iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1934
1935 /* Should set value for these registers when init */
1936 iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1937 iowrite32(1, denali->flash_reg + ECC_ENABLE);
1938 denali_nand_timing_set(denali);
1939 denali_irq_init(denali);
1940}
1941
1942void denali_nand_lock(struct mtd_info *mtd)
1943{
1944 struct denali_nand_info *denali = mtd_to_denali(mtd);
1945 wake_lock(&nand_wake_lock);
1946 zx_cpuidle_set_busy(IDLE_FLAG_FLASH);
1947 soft_spin_lock(NAND_SFLOCK);
1948 if(!denali_int_en_flag)
1949 {
1950 enable_irq(denali->irq);
1951 denali_int_en_flag = 1;
1952 }
1953
1954}
1955void denali_nand_unlock(struct mtd_info *mtd)
1956{
1957 struct denali_nand_info *denali = mtd_to_denali(mtd);
1958 clear_interrupts(denali);
1959 if(denali_int_en_flag)
1960 {
1961 disable_irq(denali->irq);
1962 denali_int_en_flag = 0;
1963 }
1964 soft_spin_unlock(NAND_SFLOCK);
1965 zx_cpuidle_set_free(IDLE_FLAG_FLASH);
1966 wake_unlock(&nand_wake_lock);
1967}
1968
1969/* Althogh controller spec said SLC ECC is forceb to be 4bit,
1970 * but denali controller in MRST only support 15bit and 8bit ECC
1971 * correction
1972 * */
1973
1974
1975static struct nand_ecclayout g_nand_oob;
1976
1977static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1978static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1979
1980static struct nand_bbt_descr bbt_main_descr = {
1981 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1982 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP|NAND_BBT_SAVECONTENT,
1983 .offs = 8,
1984 .len = 4,
1985 .veroffs = 12,
1986 .maxblocks = 4,
1987 .pattern = bbt_pattern,
1988};
1989
1990static struct nand_bbt_descr bbt_mirror_descr = {
1991 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1992 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP|NAND_BBT_SAVECONTENT,
1993 .offs = 8,
1994 .len = 4,
1995 .veroffs = 12,
1996 .maxblocks = 4,
1997 .pattern = mirror_pattern,
1998};
1999
2000/* initialize driver data structures */
2001void denali_drv_init(struct denali_nand_info *denali)
2002{
2003 denali->idx = 0;
2004
2005 /* setup interrupt handler */
2006 /* the completion object will be used to notify
2007 * the callee that the interrupt is done */
2008 init_completion(&denali->complete);
2009
2010 /* the spinlock will be used to synchronize the ISR
2011 * with any element that might be access shared
2012 * data (interrupt status) */
2013 spin_lock_init(&denali->irq_lock);
2014
2015 /* indicate that MTD has not selected a valid bank yet */
2016 denali->flash_bank = CHIP_SELECT_INVALID;
2017
2018 /* initialize our irq_status variable to indicate no interrupts */
2019 denali->irq_status = 0;
2020}
2021
2022/* driver entry point */
2023struct denali_dt {
2024 struct denali_nand_info denali;
2025 struct clk *clk;
2026};
2027
2028static void __iomem *request_and_map(struct device *dev,
2029 const struct resource *res)
2030{
2031 void __iomem *ptr;
2032
2033 if (!devm_request_mem_region(dev, res->start, resource_size(res),
2034 "denali-dt")) {
2035 dev_err(dev, "unable to request %s\n", res->name);
2036 return NULL;
2037 }
2038
2039 ptr = devm_ioremap_nocache(dev, res->start, resource_size(res));
2040 if (!ptr)
2041 dev_err(dev, "ioremap_nocache of %s failed!", res->name);
2042
2043 return ptr;
2044}
2045static const struct of_device_id denali_nand_dt_ids[] = {
2046 { .compatible = "denali,denali-nand-dt" },
2047 { /* sentinel */ }
2048 };
2049
2050unsigned char *flag_buf = NULL;
2051int fota_flag_store()
2052{
2053 size_t retlen = 0;
2054 struct cmdline_mtd_partition *part;
2055 struct erase_info ei;
2056 size_t fotalen = 0;
2057 int offset = 0;
2058 int size = 0;
2059 int addr = 0;
2060 int i =0;
2061 int ret = 0;
2062
2063 part = partitions;
2064 for(i=0;i<part->num_parts;i++)
2065 {
2066 if ( strcmp( (char *)part->parts[i].name, "fotaflag" ) == 0 )
2067 break;
2068 }
2069 offset = (int)part->parts[i].offset;
2070 size = (int)part->parts[i].size;
2071 flag_buf = kzalloc(mtd_fota->writesize, GFP_KERNEL);
2072 fotalen = mtd_fota->writesize;
2073
2074 /* read partition */
2075 addr = offset;
2076 while (addr < (offset+size))
2077 {
2078 if (mtd_fota->_block_isbad (mtd_fota, addr))
2079 {
2080 addr += mtd_fota->erasesize;
2081 continue;
2082 }
2083 ret = mtd_fota->_read(mtd_fota, addr, fotalen, &retlen, flag_buf);
2084 if(ret == 0)
2085 {
2086 break;
2087 }
2088 else if (ret)
2089 {
2090 return ret;
2091 }
2092 }
2093 printk(" fota flag: %s \n", flag_buf);
2094
2095 /* erase partition */
2096 memset(&ei, 0, sizeof(struct erase_info) );
2097
2098 ei.mtd = mtd_fota;
2099 ei.addr = (uint64_t)offset;
2100 ei.len = (uint64_t)(mtd_fota->erasesize);
2101 while((ei.addr < (offset+size)))
2102 {
2103 if(mtd_fota->_block_isbad (mtd_fota, ei.addr))
2104 {
2105 ei.addr += mtd_fota->erasesize;
2106 continue ;
2107 }
2108
2109 ei.state = 0;
2110 ret = mtd_fota->_erase(mtd_fota, &ei);
2111 if(ret == 0)
2112 {
2113 break;
2114 }
2115 else if(ret && (ei.state == MTD_ERASE_FAILED))
2116 {
2117 mtd_fota->_block_markbad(mtd_fota,ei.addr);
2118 }
2119 else
2120 {
2121 printk(" erase fota_flag error\n");
2122 return ret;
2123 }
2124
2125 ei.addr += mtd_fota->erasesize;
2126 }
2127
2128
2129 /* read partition */
2130 while (addr < (offset+size))
2131 {
2132 if (mtd_fota->_block_isbad (mtd_fota, addr))
2133 {
2134 addr += mtd_fota->erasesize;
2135 continue;
2136 }
2137 ret = mtd_fota->_read(mtd_fota, addr, fotalen, &retlen, flag_buf);
2138 if(ret == 0)
2139 {
2140 break;
2141 }
2142 else if (ret)
2143 {
2144 return ret;
2145 }
2146 }
2147 printk(" fota flag after erase: %s \n", flag_buf);
2148
2149
2150 /* write partition */
2151 memcpy(flag_buf,"FOTA-UPDATE",12);
2152 addr = offset;
2153 while ((addr < (offset+size)))
2154 {
2155 if (mtd_fota->_block_isbad (mtd_fota, addr))
2156 {
2157 addr += mtd_fota->erasesize ;
2158 continue;
2159 }
2160
2161 ret = mtd_fota->_write(mtd_fota, addr, fotalen, &retlen,
2162 (const u_char *)flag_buf);
2163 if(ret == 0)
2164 {
2165 break;
2166 }
2167 else if (ret != 0)
2168 {
2169 printk(" write fota_flag error\n");
2170 return ret;
2171 }
2172
2173 }
2174
2175 /* read partition */
2176 while (addr < (offset+size))
2177 {
2178 if (mtd_fota->_block_isbad (mtd_fota, addr))
2179 {
2180 addr += mtd_fota->erasesize;
2181 continue;
2182 }
2183 ret = mtd_fota->_read(mtd_fota, addr, fotalen, &retlen, flag_buf);
2184 if(ret == 0)
2185 {
2186 break;
2187 }
2188 else if (ret)
2189 {
2190 return ret;
2191 }
2192 }
2193 printk(" fota flag after write: %s \n", flag_buf);
2194
2195 kfree(flag_buf);
2196 return 0;
2197}
2198
2199
2200MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
2201static u64 denali_dma_mask;
2202
2203static int denali_nand_ecc_init(struct denali_nand_info *denali)
2204{
2205 int i,eccpos_start;
2206 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
2207 denali->nand.ecc.steps = denali->mtd.writesize/denali->nand.ecc.size;
2208
2209 switch (denali->nand.ecc.size) {
2210 case 512:
2211 denali->nand.ecc.bytes =
2212 ( denali->nand.ecc.strength * 13 + 15) / 16 * 2;
2213 break;
2214 case 1024:
2215 denali->nand.ecc.bytes =
2216 ( denali->nand.ecc.strength * 14 + 15) / 16 * 2;
2217 break;
2218 default:
2219 printk("Unsupported ECC sector size\n");
2220 BUG();
2221 }
2222
2223 denali->nand.ecc.total = denali->nand.ecc.bytes* denali->nand.ecc.steps;
2224 if(denali->mtd.oobsize >= (denali->nand.ecc.total+denali->bbtskipbytes + 8))
2225 {
2226 g_nand_oob.eccbytes = denali->nand.ecc.total;
2227
2228 eccpos_start = denali->bbtskipbytes;
2229
2230 for (i = 0; i < g_nand_oob.eccbytes; i++)
2231 {
2232 g_nand_oob.eccpos[i] = eccpos_start + i;
2233 }
2234
2235 g_nand_oob.oobfree[0].offset = g_nand_oob.eccbytes+denali->bbtskipbytes;
2236 g_nand_oob.oobfree[0].length = denali->mtd.oobsize -(g_nand_oob.eccbytes+denali->bbtskipbytes);
2237 denali->nand.ecc.layout = &g_nand_oob;
2238 }
2239 else
2240 {
2241 printk("Unsupported ECC strength,please check the id table\n");
2242 BUG();
2243 }
2244
2245
2246 return 0;
2247}
2248
2249
2250static int denali_nand_probe(struct platform_device *ofdev)
2251{
2252 int ret = 0;
2253 struct resource *denali_reg, *nand_data;
2254 struct denali_dt *dt;
2255 struct denali_nand_info *denali;
2256 unsigned char *test_wbuf=NULL;
2257 unsigned char *test_rbuf=NULL;
2258
2259 soft_spin_lock(NAND_SFLOCK);
2260
2261 pr_err("denali_nand_probe------------\n");
2262
2263
2264 dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
2265 if (!dt)
2266 return -ENOMEM;
2267 denali = &dt->denali;
2268 denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
2269 nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
2270 if (!denali_reg || !nand_data)
2271 {
2272 dev_err(&ofdev->dev, "resources not completely defined\n");
2273 return -EINVAL;
2274 }
2275 denali->platform = ZX7510;
2276 denali->dev = &ofdev->dev;
2277 denali->irq = platform_get_irq(ofdev, 0);
2278 if (denali->irq < 0)
2279 {
2280 dev_err(&ofdev->dev, "no irq defined\n");
2281 return denali->irq;
2282 }
2283 denali->flash_reg = (void __iomem *)(denali_reg->start);//request_and_map(&ofdev->dev, denali_reg);
2284
2285 if (!denali->flash_reg)
2286 return -ENOMEM;
2287
2288 denali->flash_mem = (void __iomem *)(nand_data->start);//request_and_map(&ofdev->dev, nand_data);
2289
2290 if (!denali->flash_mem)
2291 return -ENOMEM;
2292
2293 //printk("reg=%x,data=%x\n",denali->flash_reg,denali->flash_mem);
2294
2295 denali->dev->dma_mask = &denali_dma_mask;
2296
2297
2298 /* Is 32-bit DMA supported? */
2299 ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
2300 if (ret) {
2301 pr_err("Spectra: no usable DMA configuration\n");
2302 return ret;
2303 }
2304 denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
2305 DENALI_BUF_SIZE,
2306 DMA_BIDIRECTIONAL);
2307
2308 if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
2309 dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
2310 return -EIO;
2311 }
2312 denali->mtd.dev.parent = denali->dev;
2313 denali_hw_init(denali);
2314 denali_drv_init(denali);
2315
2316 /* denali_isr register is done after all the hardware
2317 * initilization is finished*/
2318 if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
2319 DENALI_NAND_NAME, denali)) {
2320 pr_err("Spectra: Unable to allocate IRQ\n");
2321 return -ENODEV;
2322 }
2323 else
2324 {
2325 denali_int_en_flag = 1;
2326 }
2327
2328 /* now that our ISR is registered, we can enable interrupts */
2329 denali_set_intr_modes(denali, true);
2330 denali->mtd.name = "denali-nand";
2331 denali->mtd.owner = THIS_MODULE;
2332 denali->mtd.priv = &denali->nand;
2333
2334 /* register the driver with the NAND core subsystem */
2335 denali->nand.select_chip = denali_select_chip;
2336 denali->nand.cmdfunc = denali_cmdfunc;
2337 denali->nand.read_byte = denali_read_byte;
2338 denali->nand.waitfunc = denali_waitfunc;
2339
2340 wake_lock_init(&nand_wake_lock, WAKE_LOCK_SUSPEND, "nand");
2341 printk("deanli wakelock ok\n");
2342
2343 /* scan for NAND devices attached to the controller
2344 * this is the first stage in a two step process to register
2345 * with the nand subsystem */
2346 if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
2347 ret = -ENXIO;
2348 goto failed_req_irq;
2349 }
2350
2351 /* MTD supported page sizes vary by kernel. We validate our
2352 * kernel supports the device here.
2353 */
2354 if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
2355 ret = -ENODEV;
2356 printk(KERN_ERR "Spectra: device size not supported by this "
2357 "version of MTD.");
2358 goto failed_req_irq;
2359 }
2360
2361 /* support for multi nand
2362 * MTD known nothing about multi nand,
2363 * so we should tell it the real pagesize
2364 * and anything necessery
2365 */
2366 denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
2367 denali->nand.chipsize <<= (denali->devnum - 1);
2368 denali->nand.page_shift += (denali->devnum - 1);
2369 denali->nand.pagemask = (denali->nand.chipsize >>
2370 denali->nand.page_shift) - 1;
2371 denali->nand.bbt_erase_shift += (denali->devnum - 1);
2372 denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
2373 denali->nand.chip_shift += (denali->devnum - 1);
2374 denali->mtd.writesize <<= (denali->devnum - 1);
2375 denali->mtd.oobsize <<= (denali->devnum - 1);
2376 denali->mtd.erasesize <<= (denali->devnum - 1);
2377 denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
2378 denali->bbtskipbytes *= denali->devnum;
2379
2380 /* second stage of the NAND scan
2381 * this stage requires information regarding ECC and
2382 * bad block management. */
2383
2384 /* Bad block management */
2385 denali->nand.bbt_td = &bbt_main_descr;
2386 denali->nand.bbt_md = &bbt_mirror_descr;
2387
2388 /* skip the scan for now until we have OOB read and write support */
2389 denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
2390
2391 // init ecc
2392 denali_nand_ecc_init(denali);
2393
2394 /* Let driver know the total blocks number and
2395 * how many blocks contained by each nand chip.
2396 * blksperchip will help driver to know how many
2397 * blocks is taken by FW.
2398 * */
2399 denali->totalblks = denali->mtd.size >>
2400 denali->nand.phys_erase_shift;
2401 denali->blksperchip = denali->totalblks / denali->nand.numchips;
2402
2403 /* These functions are required by the NAND core framework, otherwise,
2404 * the NAND core will assert. However, we don't need them, so we'll stub
2405 * them out. */
2406 denali->nand.ecc.calculate = denali_ecc_calculate;
2407 denali->nand.ecc.correct = denali_ecc_correct;
2408 denali->nand.ecc.hwctl = denali_ecc_hwctl;
2409
2410 /* override the default read operations */
2411 denali->nand.ecc.read_page = denali_read_page;
2412 denali->nand.ecc.read_page_raw = denali_read_page_raw;
2413 denali->nand.ecc.write_page = denali_write_page;
2414 denali->nand.ecc.write_page_raw = denali_write_page_raw;
2415 denali->nand.ecc.read_oob = denali_read_oob;
2416 denali->nand.ecc.write_oob = denali_write_oob;
2417 denali->nand.erase_cmd = denali_erase;
2418
2419 if (nand_scan_tail(&denali->mtd)) {
2420 ret = -ENXIO;
2421 goto failed_req_irq;
2422 }
2423
2424 mtd_fota = &denali->mtd;
2425 ret = mtd_device_register(&denali->mtd, NULL, 0);
2426 if (ret) {
2427 dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
2428 ret);
2429 goto failed_req_irq;
2430 }
2431
2432 platform_set_drvdata(ofdev, dt);
2433
2434 soft_spin_unlock(NAND_SFLOCK);
2435 return 0;
2436
2437failed_req_irq:
2438 denali_irq_cleanup(denali->irq, denali);
2439 kfree(denali);
2440 denali_nand_unlock(&denali->mtd);
2441 wake_unlock(&nand_wake_lock);
2442 return ret;
2443}
2444
2445/* driver exit point */
2446
2447void denali_remove(struct denali_nand_info *denali)
2448{
2449 denali_irq_cleanup(denali->irq, denali);
2450 dma_unmap_single(denali->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
2451 DMA_BIDIRECTIONAL);
2452}
2453
2454
2455
2456static int denali_nand_remove(struct platform_device *ofdev)
2457{
2458 struct denali_dt *dt = platform_get_drvdata(ofdev);
2459
2460 denali_remove(&dt->denali);
2461 clk_disable(dt->clk);
2462 clk_put(dt->clk);
2463 wake_lock_destroy(&nand_wake_lock);
2464
2465 return 0;
2466}
2467
2468
2469
2470static struct platform_driver denali_dt_driver = {
2471
2472 .probe = denali_nand_probe,
2473
2474 .remove = denali_nand_remove,
2475
2476 .driver = {
2477
2478 .name = "denali-nand-dt",
2479
2480 .owner = THIS_MODULE,
2481
2482 .of_match_table = denali_nand_dt_ids,
2483
2484 },
2485
2486};
2487
2488module_platform_driver(denali_dt_driver);