blob: a116298606414229208f8a799695ffcadc327802 [file] [log] [blame]
/*
* drivers/mtd/nand/pxa3xx_nand.c
*
* Copyright © 2009 Marvell International Ltd.
* Lei Wen <leiwen@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <nand.h>
#include <malloc.h>
#include <common.h>
#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <asm/errno.h>
#include <asm/io.h>
#include <asm/bitops.h>
#include <mtd/pxa3xx_bbm.h>
#include <asm/arch/pxa3xx_nand.h>
#include <asm/arch/nand_supported.h>
#include <asm/arch/cpu.h>
/* convert nano-seconds to nand flash controller clock cycles */
#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
#define cycle2ns(cycle, clk) (cycle * 1000 / (clk / 1000000))
#define CHIP_DELAY_TIMEOUT (500)
#define NAND_STOP_DELAY (100)
#define BCH_THRESHOLD (8)
#define PAGE_CHUNK_SIZE (2048)
#define OOB_CHUNK_SIZE (64)
#undef PXA3XX_NAND_DEBUG
#ifdef PXA3XX_NAND_DEBUG
#define DBG_NAND(x) do{x;}while(0)
#else
#define DBG_NAND(x)
#endif
static inline int is_power_of_2(unsigned long n)
{
return(n != 0 &&((n&(n-1))==0));
}
static char mtd_names[][6] = {"nand0", "nand1"};
static struct nand_ecclayout hw_smallpage_ecclayout = {
.eccbytes = 6,
.eccpos = {8, 9, 10, 11, 12, 13 },
.oobfree = { {2, 6} }
};
static struct nand_ecclayout hw_largepage_ecclayout = {
.eccbytes = 24,
.eccpos = {
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63},
.oobfree = { {2, 38} }
};
static inline int is_buf_blank(uint8_t *buf, size_t len)
{
for (; len > 0; len--)
if (*buf++ != 0xff)
return 0;
return 1;
}
static void nand_error_dump(struct pxa3xx_nand *nand)
{
struct mtd_info *mtd = nand->mtd[nand->chip_select];
struct pxa3xx_nand_info *info = mtd->priv;
int i;
printf("NAND controller state wrong!!!\n");
printf("state %x, current seqs %d, errcode %x, bad count %d\n",
nand->state, info->current_cmd_seqs,
nand->errcode, nand->bad_count);
printf("Totally %d command for sending\n",
info->total_cmds);
for (i = 0; i < info->total_cmds; i ++)
printf("NDCB0:%d: %x\n", i, info->ndcb0[i]);
printf("\nRegister DUMPing ##############\n");
printf("NDCR %x\n"
"NDSR %x\n"
"NDCB0 %x\n"
"NDCB1 %x\n"
"NDCB2 %x\n"
"NDTR0CS0 %x\n"
"NDTR1CS0 %x\n"
"NDBBR0 %x\n"
"NDBBR1 %x\n"
"NDREDEL %x\n"
"NDECCCTRL %x\n"
"NDBZCNT %x\n\n",
nand_readl(nand, NDCR),
nand_readl(nand, NDSR),
nand_readl(nand, NDCB0),
nand_readl(nand, NDCB1),
nand_readl(nand, NDCB2),
nand_readl(nand, NDTR0CS0),
nand_readl(nand, NDTR1CS0),
nand_readl(nand, NDBBR0),
nand_readl(nand, NDBBR1),
nand_readl(nand, NDREDEL),
nand_readl(nand, NDECCCTRL),
nand_readl(nand, NDBZCNT));
}
/*
* This function shows the real timing when NAND controller
* send signal to the NAND chip.
*/
static void show_real_timing(uint32_t ndtr0, uint32_t ndtr1, unsigned long nand_clk)
{
uint32_t rtADL, rtCH, rtCS, rtWH, rtWP, rtRH, rtRP;
uint32_t rtR, rtRHW, rtWHR, rtAR, tmp;
rtCH = ((ndtr0 >> 19) & 0x7) + 1;
rtCS = ((ndtr0 >> 16) & 0x7) + 1;
rtWH = ((ndtr0 >> 11) & 0x7) + 1;
rtWP = ((ndtr0 >> 8) & 0x7) + 1;
rtADL= (ndtr0 >> 27) & 0x1f;
rtRH = ((ndtr0 >> 3) & 0x7) + 1;
rtRP = (ndtr0 & NDTR0_ETRP) ? ((0x8 | (ndtr0 & 0x7)) + 1)
: ((ndtr0 & 0x7) + 1);
rtRHW = (ndtr1 >> 8) & 0x3;
rtWHR = (ndtr1 >> 4) & 0xf;
rtAR = ndtr1 & 0xf;
if (rtADL != 0)
rtADL -= 3 + rtWP;
rtR = (ndtr1 >> 16) & 0xffff;
if (ndtr1 & NDTR1_PRESCALE)
rtR *= 16;
rtR += rtCH + 2;
switch(rtRHW) {
case 0:
rtRHW = 0;
break;
case 1:
rtRHW = 16;
break;
case 2:
rtRHW = 32;
break;
case 3:
rtRHW = 48;
break;
}
/*
* TWHR delay=max(tAR, max(0, tWHR-max(tWH, tCH)))
* TAR delay=max(tAR, max(0, tWHR-max(tWH, tCH))) + 2
*/
if (rtWH > rtCH)
tmp = rtWH - 1;
else
tmp = rtCH - 1;
if (rtWHR < tmp)
rtWHR = rtAR;
else {
if (rtAR > (rtWHR - tmp))
rtWHR = rtAR;
else
rtWHR = rtWHR - tmp;
}
rtAR = rtWHR + 2;
debug("Shows real timing(ns):\n");
if (ndtr0 & NDTR0_SELCNTR)
debug("NDTR0 SELCNTR is set\n");
else
debug("NDTR0 SELCNTR is not set\n");
if (ndtr0 & NDTR0_RD_CNT_DEL_MASK)
debug("Read Strobe delay is %d\n",
(ndtr0 & NDTR0_RD_CNT_DEL_MASK) >> 22);
else
debug("No Read Stobe delay\n");
if (ndtr0 & NDTR0_sel_NRE_EDGE)
debug("Controller is using falling edge to detect RE\n");
else
debug("Controller is using rising edge to detect RE\n");
if (ndtr1 & NDTR1_WAIT_MODE)
debug("NDTR1 wait mode is set\n");
else
debug("NDTR1 wait mode is not set\n");
debug("TADL is %ld TCH is %ld TCS is %ld TWH is %ld TWP is %ld"
" TRH is %ld TRP is %ld TR is %ld TRHW is %ld TWHR is %ld"
" TAR is %ld\n",
cycle2ns(rtADL, nand_clk), cycle2ns(rtCH, nand_clk),
cycle2ns(rtCS, nand_clk), cycle2ns(rtWH, nand_clk),
cycle2ns(rtWP, nand_clk), cycle2ns(rtRH, nand_clk),
cycle2ns(rtRP, nand_clk), cycle2ns(rtR, nand_clk),
cycle2ns(rtRHW, nand_clk), cycle2ns(rtWHR, nand_clk),
cycle2ns(rtAR, nand_clk));
}
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
const struct pxa3xx_nand_timing *t, int show_timing)
{
struct pxa3xx_nand *nand = info->nand_data;
unsigned long nand_clk = NAND_DEF_CLOCK;
uint32_t ndtr0, ndtr1;
int tWH, tRP, tR, tRHW, tADL_X_ND;
if (!info->timing0 && !info->timing1) {
ndtr0 = ndtr1 = 0;
tRP = ns2cycle(t->tRP, nand_clk);
tRP = (tRP > 0xf) ? 0xf : tRP;
if (tRP > 0x7) {
ndtr0 |= NDTR0_ETRP;
tRP -= 0x7;
}
tR = ns2cycle(t->tR, nand_clk);
if (tR > 0xffff) {
ndtr1 |= NDTR1_PRESCALE;
tR /= 16;
}
tRHW = 0;
if (t->tRHW > 0) {
tRHW = ns2cycle(t->tRHW, nand_clk) + 1;
if (tRHW <= 16)
tRHW = 1;
else if (tRHW <= 32)
tRHW = 2;
else
tRHW = 3;
}
tWH = ns2cycle(t->tWH, nand_clk);
/*
* tADL - Adress to Write Data delay
* 1. tADL/Tclk = (tWH+1) + (tWP+1) + tadl_x_mod_nd + 1
* 2. tadl_x_mod_nd = tADL_X_ND - (tWP+1) - 2
* 1 & 2 ==> tADL_X_ND = tADL/Tclk - tWH
*/
tADL_X_ND = 0;
if (t->tADL > t->tWH + t->tWP)
tADL_X_ND = ns2cycle(t->tADL, nand_clk) + 1 - tWH;
if (nand->RD_CNT_DEL > 0)
ndtr0 |= NDTR0_SELCNTR
| (NDTR0_RD_CNT_DEL(nand->RD_CNT_DEL - 1));
ndtr0 |= NDTR0_tADL(tADL_X_ND)
| NDTR0_tCH(ns2cycle(t->tCH, nand_clk))
| NDTR0_tCS(ns2cycle(t->tCS, nand_clk))
| NDTR0_tWH(tWH)
| NDTR0_tWP(ns2cycle(t->tWP, nand_clk))
| NDTR0_tRH(ns2cycle(t->tRH, nand_clk))
| NDTR0_tRP(tRP);
if (nand->wait_mode)
ndtr1 |= NDTR1_WAIT_MODE;
ndtr1 |= NDTR1_tR(tR)
| NDTR1_tRHW(tRHW)
| NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk))
| NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
info->timing0 = ndtr0;
info->timing1 = ndtr1;
if (show_timing)
show_real_timing(ndtr0, ndtr1, nand_clk);
}
nand_writel(nand, NDTR0CS0, info->timing0);
nand_writel(nand, NDTR1CS0, info->timing1);
}
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, int oob_enable)
{
const struct pxa3xx_nand_flash *flash_info = info->flash_info;
if (likely(flash_info->page_size >= PAGE_CHUNK_SIZE)) {
info->data_size = 2048;
if (!oob_enable) {
info->oob_size = 0;
return;
}
switch (info->use_ecc) {
case ECC_HAMMIN:
info->oob_size = 40;
break;
case ECC_BCH:
info->oob_size = 32;
break;
default:
info->oob_size = 64;
break;
}
}
else {
info->data_size = 512;
if (!oob_enable) {
info->oob_size = 0;
return;
}
switch (info->use_ecc) {
case ECC_HAMMIN:
info->oob_size = 8;
break;
case ECC_BCH:
printk("Don't support BCH on small"
" page device!!!\n");
break;
default:
info->oob_size = 16;
break;
}
}
}
/* NOTE: it is a must to set ND_RUN firstly, then write
* command buffer, otherwise, it does not work
*/
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
uint32_t ndcr, ndeccctrl;
struct pxa3xx_nand *nand = info->nand_data;
ndcr = info->reg_ndcr;
ndeccctrl = 0;
switch (info->use_ecc) {
case ECC_BCH:
ndeccctrl |= NDECCCTRL_BCH_EN;
ndeccctrl |= NDECCCTRL_ECC_THRESH(BCH_THRESHOLD);
case ECC_HAMMIN:
ndcr |= NDCR_ECC_EN;
break;
default:
break;
}
ndcr |= (NDCR_ND_RUN | NDCR_STOP_ON_UNCOR);
DBG_NAND(printk("@@@ndcr set: %x, ndeccctrl set %x\n",
ndcr, ndeccctrl));
nand_writel(nand, NDCR, 0);
/* clear status bits and run */
nand_writel(nand, NDECCCTRL, ndeccctrl);
nand_writel(nand, NDSR, NDSR_MASK);
nand_writel(nand, NDCR, ndcr);
}
static void pxa3xx_nand_stop(struct pxa3xx_nand* nand)
{
uint32_t ndcr, ndeccctrl;
int timeout = NAND_STOP_DELAY;
/* wait RUN bit in NDCR become 0 */
do {
/* clear status bits */
nand_writel(nand, NDSR, NDSR_MASK);
ndcr = nand_readl(nand, NDCR);
udelay(1);
} while ((ndcr & NDCR_ND_RUN) && (timeout -- > 0));
if (timeout <= 0) {
printf("NAND controller unable to stop,"
"please reconfigure your timing!!!\n");
nand_error_dump(nand);
ndcr &= ~(NDCR_ND_RUN);
nand_writel(nand, NDCR, ndcr);
}
/* clear the ECC control register */
ndeccctrl = nand_readl(nand, NDECCCTRL);
ndeccctrl &= ~(NDECCCTRL_BCH_EN | NDECCCTRL_ECC_THR_MSK);
nand_writel(nand, NDECCCTRL, ndeccctrl);
}
static void handle_data_pio(struct pxa3xx_nand *nand, int cmd_seqs)
{
unsigned int mmio_base = nand->mmio_base;
struct mtd_info *mtd = nand->mtd[nand->chip_select];
struct pxa3xx_nand_info *info = mtd->priv;
DBG_NAND(printk("data col %x, size %x, oob col %x, size %x\n",
info->data_column, info->ndcb3[cmd_seqs],
info->oob_column, info->oob_size));
if (nand->is_write) {
if (info->oob_size > 0) {
/* write data part */
__raw_writesl(mmio_base + NDDB, \
info->data_buff + info->data_column, \
info->ndcb3[cmd_seqs] >> 2);
/* write oob part */
__raw_writesl(mmio_base + NDDB, \
info->oob_buff + info->oob_column, \
info->oob_size >> 2);
}
else
__raw_writesl(mmio_base + NDDB, \
info->data_buff + info->data_column, \
info->ndcb3[cmd_seqs] >> 2);
}
else {
if (info->oob_size > 0) {
/* read data part */
__raw_readsl(mmio_base + NDDB, \
info->data_buff + info->data_column, \
info->ndcb3[cmd_seqs] >> 2);
/* read oob part */
__raw_readsl(mmio_base + NDDB, \
info->oob_buff + info->oob_column, \
info->oob_size >> 2);
}
else
__raw_readsl(mmio_base + NDDB, \
info->data_buff + info->data_column, \
info->ndcb3[cmd_seqs] >> 2);
}
info->data_column += info->ndcb3[cmd_seqs];
info->oob_column += info->oob_size;
}
static void pxa3xx_wait_cmd_done(struct pxa3xx_nand *nand)
{
int cmd_done, bad_detect, status;
cmd_done = (nand->chip_select) ? NDSR_CS1_CMDD : NDSR_CS0_CMDD;
bad_detect = (nand->chip_select) ? NDSR_CS1_BBD : NDSR_CS0_BBD;
do {
/* wait cmd done */
status = nand_readl(nand, NDSR);
nand->bad_count = (status & NDSR_ERR_CNT_MASK) >> 16;
if (status & NDSR_TRUSTVIO)
nand->errcode |= ERR_TRUSTVIO;
if (status & NDSR_CORERR)
nand->errcode |= ERR_CORERR;
if (status & NDSR_UNCERR)
nand->errcode |= ERR_DBERR;
if (status & bad_detect)
nand->errcode |= ERR_BBERR;
if (nand->errcode != ERR_NONE)
break;
if (status & cmd_done) {
nand->state |= STATE_CMD_DONE;
break;
}
} while (1);
pxa3xx_nand_stop(nand);
return;
}
static void pxa3xx_buff_read_data(struct mtd_info *mtd)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
if (nand->state & STATE_CMD_DONE)
return;
nand->state |= STATE_DATA_PROCESSING;
handle_data_pio(nand, info->current_cmd_seqs - 1);
nand->state |= STATE_DATA_DONE;
pxa3xx_wait_cmd_done(nand);
return;
}
static void pxa3xx_upload_page_direct(struct mtd_info *mtd, uint8_t *buf)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
struct nand_chip *chip = mtd->priv;
unsigned int mmio_base = nand->mmio_base;
if (nand->state & STATE_CMD_DONE)
return;
__raw_readsl(mmio_base + NDDB, buf, mtd->writesize >> 2);
if (info->oob_size > 0)
__raw_readsl(mmio_base + NDDB, chip->oob_poi, \
info->oob_size >> 2);
pxa3xx_wait_cmd_done(nand);
return;
}
static int pxa3xx_nand_irq(void *devid)
{
struct pxa3xx_nand *nand = devid;
struct pxa3xx_nand_info *info;
struct mtd_info *mtd;
unsigned int status;
int chip_select, cmd_done, ready, page_done, badblock_detect;
int cmd_seqs, ndcb1, ndcb2, is_completed = 0;
chip_select = nand->chip_select;
ready = (chip_select) ? NDSR_RDY : NDSR_FLASH_RDY;
cmd_done = (chip_select) ? NDSR_CS1_CMDD : NDSR_CS0_CMDD;
page_done = (chip_select) ? NDSR_CS1_PAGED : NDSR_CS0_PAGED;
badblock_detect = (chip_select) ? NDSR_CS1_BBD : NDSR_CS0_BBD;
mtd = nand->mtd[chip_select];
info = (struct pxa3xx_nand_info *)(mtd->priv);
cmd_seqs = info->current_cmd_seqs;
status = nand_readl(nand, NDSR);
nand->bad_count = (status & NDSR_ERR_CNT_MASK) >> 16;
DBG_NAND(if (status != 0)
printk("\t\tcmd seqs %d, status %x\n", cmd_seqs, status));
if (status & NDSR_TRUSTVIO)
nand->errcode |= ERR_TRUSTVIO;
if (status & NDSR_CORERR)
nand->errcode |= ERR_CORERR;
if (status & NDSR_UNCERR)
nand->errcode |= ERR_DBERR;
if (status & badblock_detect)
nand->errcode |= ERR_BBERR;
if ((status & NDSR_WRDREQ) || (status & NDSR_RDDREQ)) {
if (nand->command == NAND_CMD_READ0) {
is_completed = 1;
return is_completed;
}
nand->state |= STATE_DATA_PROCESSING;
handle_data_pio(nand, cmd_seqs - 1);
nand->state |= STATE_DATA_DONE;
}
if (status & cmd_done) {
nand->state |= STATE_CMD_DONE;
/* complete the command cycle when all command
* done, and don't wait for ready signal
*/
if ((cmd_seqs == info->total_cmds) \
&& !(cmd_seqs == info->need_wait_ready)) {
is_completed = 1;
}
}
if (status & ready) {
nand->state |= STATE_READY;
/*
* wait for the ready signal,
* then leavl the command cycle
*/
if ((cmd_seqs == info->total_cmds) \
&& (cmd_seqs == info->need_wait_ready)) {
is_completed = 1;
}
nand->is_ready = 1;
}
if (status & page_done)
nand->state |= STATE_PAGE_DONE;
if (nand->errcode != ERR_NONE)
goto ERR_IRQ_EXIT;
if (status & NDSR_WRCMDREQ) {
nand_writel(nand, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
if (cmd_seqs < info->total_cmds) {
info->current_cmd_seqs ++;
if (cmd_seqs == 0) {
ndcb1 = info->ndcb1;
ndcb2 = info->ndcb2;
}
else {
ndcb1 = 0;
ndcb2 = 0;
}
nand->state = STATE_CMD_WAIT_DONE;
nand_writel(nand, NDCB0, info->ndcb0[cmd_seqs]);
nand_writel(nand, NDCB0, ndcb1);
nand_writel(nand, NDCB0, ndcb2);
if (info->need_additional_addressing) {
nand_writel(nand, NDCB0, info->ndcb3[cmd_seqs]);
DBG_NAND(printk("\tndcb0 %x ndcb1 %x, "
"ndcb2 %x, ndcb3 %x\n",
info->ndcb0[cmd_seqs],
ndcb1, ndcb2, info->ndcb3[cmd_seqs]));
}
else {
DBG_NAND(printk("\tndcb0 %x ndcb1 %x ndcb2 %x\n",
info->ndcb0[cmd_seqs],
ndcb1, ndcb2));
}
}
else
is_completed = 1;
}
ERR_IRQ_EXIT:
/* clear NDSR to let the controller exit the IRQ */
nand_writel(nand, NDSR, status);
return is_completed || nand->errcode != ERR_NONE;
}
static int pxa3xx_nand_polling(struct pxa3xx_nand *nand, unsigned long timeout)
{
int ret = 0;
u32 ts;
ts = get_timer(0);
while (1) {
ret = pxa3xx_nand_irq(nand);
if (ret)
break;
if (get_timer(ts) > timeout) {
printk("Wait timeout!\n");
break;
}
}
return ret;
}
static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
int ready_mask = (nand->chip_select) \
? NDSR_RDY : NDSR_FLASH_RDY;
return (nand_readl(nand, NDSR) & ready_mask) ? 1 : 0;
}
static int prepare_command_pool(struct pxa3xx_nand *nand, int command,
uint16_t column, int page_addr)
{
uint16_t cmd;
int addr_cycle, exec_cmd, ndcb0, ndcb3 = 0, i, chunks = 0, ecc_strength;
struct mtd_info *mtd = nand->mtd[nand->chip_select];
struct pxa3xx_nand_info *info = mtd->priv;
struct nand_chip *chip = mtd->priv;
const struct pxa3xx_nand_flash *flash_info = info->flash_info;
ndcb0 = (nand->chip_select) ? NDCB0_CSEL : 0;
addr_cycle = 0;
exec_cmd = 1;
/* reset data and oob column point to handle data */
info->data_column = 0;
info->oob_column = 0;
info->buf_start = 0;
info->buf_count = 0;
info->current_cmd_seqs = 0;
info->need_wait_ready = -1;
info->oob_size = 0;
info->use_ecc = ECC_NONE;
nand->state = 0;
nand->is_write = 0;
nand->is_ready = 0;
nand->errcode = ERR_NONE;
nand->bad_count = 0;
nand->command = command;
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
if (chip->ecc.mode == NAND_ECC_HW)
info->use_ecc = flash_info->ecc_type;
case NAND_CMD_READOOB:
ecc_strength = (command == NAND_CMD_READOOB) ? 1
: flash_info->ecc_strength;
if (ecc_strength > 1) {
info->reg_ndcr &= ~NDCR_SPARE_EN;
ndcb0 |= NDCB0_LEN_OVRD;
}
else {
if ((cpu_is_pxa1826_z3() || cpu_is_pxa1826_a0())
&& (page_addr < 64 && page_addr >= 0))
info->reg_ndcr &= ~NDCR_SPARE_EN;
else
info->reg_ndcr |= NDCR_SPARE_EN;
}
pxa3xx_set_datasize(info, info->reg_ndcr & NDCR_SPARE_EN);
chunks = flash_info->page_size / info->data_size;
chunks = chunks * ecc_strength;
ndcb3 = info->data_size / ecc_strength;
break;
case NAND_CMD_SEQIN:
exec_cmd = 0;
break;
default:
info->ndcb1 = 0;
info->ndcb2 = 0;
break;
}
/* clear the command buffer */
for (i = 0; i < CMD_POOL_SIZE; i ++)
info->ndcb0[i] = ndcb0;
addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
+ info->col_addr_cycles);
if ((ndcb0 & NDCB0_LEN_OVRD)
|| ((info->row_addr_cycles + info->col_addr_cycles) > 5)) {
info->need_additional_addressing = 1;
}
else
info->need_additional_addressing = 0;
switch (command) {
case NAND_CMD_READOOB:
case NAND_CMD_READ0:
cmd = flash_info->cmdset->read1;
if (command == NAND_CMD_READOOB) {
if (!(info->reg_ndcr & NDCR_SPARE_EN))
return 0;
info->buf_start = mtd->writesize + column;
for (i = 1; i <= chunks; i ++)
info->ndcb3[i] = info->data_size;
}
else {
info->ndcb3[0] = info->data_size;
for (i = 1; i <= chunks; i ++)
info->ndcb3[i] = ndcb3;
info->buf_start = column;
}
if (unlikely(flash_info->page_size < PAGE_CHUNK_SIZE)) {
info->total_cmds = 1;
info->ndcb0[0] |= NDCB0_CMD_TYPE(0)
| addr_cycle
| cmd;
}
else {
info->total_cmds = chunks + 1;
info->ndcb0[0] |= NDCB0_CMD_XTYPE(0x6)
| NDCB0_CMD_TYPE(0)
| NDCB0_DBC
| NDCB0_NC
| addr_cycle
| cmd;
info->ndcb0[1] |= NDCB0_CMD_XTYPE(0x5)
| NDCB0_NC
| addr_cycle;
for (i = 2; i <= chunks; i ++)
info->ndcb0[i] = info->ndcb0[1];
info->ndcb0[chunks] &= ~NDCB0_NC;
}
case NAND_CMD_SEQIN:
/* small page addr setting */
if (unlikely(flash_info->page_size < PAGE_CHUNK_SIZE)) {
info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
| (column & 0xFF);
info->ndcb2 = 0;
}
else {
info->ndcb1 = ((page_addr & 0xFFFF) << 16) \
| (column & 0xFFFF);
if (page_addr & 0xFF0000)
info->ndcb2 = (page_addr & 0xFF0000) >> 16;
else
info->ndcb2 = 0;
}
info->buf_count = mtd->writesize + mtd->oobsize;
memset(info->data_buff, 0xFF, info->buf_count);
break;
case NAND_CMD_PAGEPROG:
if (is_buf_blank(info->data_buff,
(mtd->writesize + mtd->oobsize))) {
exec_cmd = 0;
break;
}
cmd = flash_info->cmdset->program;
nand->is_write = 1;
info->need_wait_ready = chunks + 1;
if (unlikely(flash_info->page_size < PAGE_CHUNK_SIZE)) {
info->total_cmds = 1;
info->ndcb0[0] |= NDCB0_CMD_TYPE(0x1)
| NDCB0_AUTO_RS
| NDCB0_ST_ROW_EN
| NDCB0_DBC
| cmd
| addr_cycle;
} else {
info->total_cmds = chunks + 1;
info->ndcb0[0] |= NDCB0_CMD_XTYPE(0x4)
| NDCB0_CMD_TYPE(0x1)
| NDCB0_NC
| NDCB0_AUTO_RS
| (cmd & NDCB0_CMD1_MASK)
| addr_cycle;
for (i = 1; i < chunks; i ++)
info->ndcb0[i] |= NDCB0_CMD_XTYPE(0x5)
| NDCB0_NC
| NDCB0_AUTO_RS
| NDCB0_CMD_TYPE(0x1)
| addr_cycle;
info->ndcb0[chunks] |= NDCB0_CMD_XTYPE(0x3)
| NDCB0_CMD_TYPE(0x1)
| NDCB0_ST_ROW_EN
| NDCB0_DBC
| (cmd & NDCB0_CMD2_MASK)
| NDCB0_CMD1_MASK
| addr_cycle;
}
if (ndcb3) {
for (i = 0; i < chunks; i ++)
info->ndcb3[i] = ndcb3;
info->ndcb3[i] = info->data_size;
}
break;
case NAND_CMD_READID:
info->total_cmds = 1;
cmd = flash_info->cmdset->read_id;
info->buf_count = info->read_id_bytes;
info->ndcb0[0] |= NDCB0_CMD_TYPE(3) \
| NDCB0_ADDR_CYC(1) \
| cmd;
info->ndcb3[0] = 8;
break;
case NAND_CMD_STATUS:
info->total_cmds = 1;
cmd = flash_info->cmdset->read_status;
info->buf_count = 1;
info->ndcb0[0] |= NDCB0_CMD_TYPE(4) \
| NDCB0_ADDR_CYC(1) \
| cmd;
info->ndcb3[0] = 8;
break;
case NAND_CMD_ERASE1:
info->total_cmds = 1;
cmd = flash_info->cmdset->erase;
info->ndcb0[0] |= NDCB0_CMD_TYPE(2)
| NDCB0_AUTO_RS
| NDCB0_ADDR_CYC(3)
| NDCB0_DBC
| cmd;
info->ndcb1 = page_addr;
info->ndcb2 = 0;
break;
case NAND_CMD_RESET:
info->total_cmds = 1;
cmd = flash_info->cmdset->reset;
info->ndcb0[0] |= NDCB0_CMD_TYPE(5) \
| cmd;
break;
case NAND_CMD_ERASE2:
exec_cmd = 0;
break;
default:
exec_cmd = 0;
printf("non-supported command.\n");
break;
}
nand->use_ecc = info->use_ecc;
return exec_cmd;
}
static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
int column, int page_addr)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
const struct pxa3xx_nand_flash *flash_info = info->flash_info;
struct pxa3xx_bbm *pxa3xx_bbm = mtd->bbm;
int ret, exec_cmd;
loff_t addr;
/* reset timing */
if (nand->chip_select != info->chip_select) {
pxa3xx_nand_set_timing(info, flash_info->timing, 0);
nand->chip_select = info->chip_select;
}
/* if this is a x16 device ,then convert the input
* "byte" address into a "word" address appropriate
* for indexing a word-oriented device
*/
if (flash_info->flash_width == 16)
column /= 2;
DBG_NAND(printk("command %x, page %x, ", command, page_addr););
if (pxa3xx_bbm && (command == NAND_CMD_READOOB
|| command == NAND_CMD_READ0
|| command == NAND_CMD_SEQIN
|| command == NAND_CMD_ERASE1)) {
if ((cpu_is_pxa1826_z3() || cpu_is_pxa1826_a0())
&& (page_addr < 64 && page_addr >= 0))
info->reg_ndcr &= ~NDCR_SPARE_EN;
else
info->reg_ndcr |= NDCR_SPARE_EN;
addr = (loff_t)page_addr << mtd->writesize_shift;
addr = pxa3xx_bbm->search(mtd, addr);
page_addr = addr >> mtd->writesize_shift;
}
DBG_NAND(printk("post page %x\n", page_addr));
exec_cmd = prepare_command_pool(nand, command, column, page_addr);
if (exec_cmd) {
pxa3xx_nand_start(info);
ret = pxa3xx_nand_polling(nand, CHIP_DELAY_TIMEOUT);
if (!ret) {
nand_error_dump(nand);
nand->errcode |= ERR_SENDCMD;
pxa3xx_nand_stop(nand);
} else if (nand->command != NAND_CMD_READ0) {
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(nand);
}
}
}
static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
char retval = 0xFF;
if (nand->command == NAND_CMD_READ0)
pxa3xx_buff_read_data(mtd);
if (info->buf_start < info->buf_count)
/* Has just send a new command? */
retval = info->data_buff[info->buf_start++];
return retval;
}
static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
u16 retval = 0xFFFF;
if (nand->command == NAND_CMD_READ0)
pxa3xx_buff_read_data(mtd);
if (!(info->buf_start & 0x01) \
&& info->buf_start < info->buf_count) {
retval = *((u16 *)(info->data_buff+info->buf_start));
info->buf_start += 2;
}
return retval;
}
static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
if (nand->command == NAND_CMD_READ0)
pxa3xx_buff_read_data(mtd);
memcpy(buf, info->data_buff + info->buf_start, real_len);
info->buf_start += real_len;
}
static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
struct pxa3xx_nand_info *info = mtd->priv;
int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
memcpy(info->data_buff + info->buf_start, buf, real_len);
info->buf_start += real_len;
}
static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
return 0;
}
static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
return;
}
/* Error handling expose to MTD level */
static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
if (nand->errcode & ERR_TRUSTVIO) {
nand_error_dump(nand);
return NAND_STATUS_FAIL;
}
if (nand->errcode & (ERR_BBERR | ERR_SENDCMD))
return NAND_STATUS_FAIL;
else
return 0;
}
static int pxa3xx_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand_flash *flash_info = info->flash_info;
uint8_t *tmp;
int oob_size;
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
oob_size = info->oob_size;
/* It is a special case that meta data need to re-order
* due to the page allocation in the large page (>2K)
*/
if (flash_info->page_size > PAGE_CHUNK_SIZE) {
tmp = chip->oob_poi;
memcpy(tmp, info->oob_buff, oob_size - 2);
tmp[oob_size - 1] = tmp[oob_size - 2] = 0xff;
tmp += oob_size;
memcpy(tmp, info->data_buff + PAGE_CHUNK_SIZE * 2 - 2, 2);
tmp += 2;
memcpy(tmp, info->oob_buff + oob_size, oob_size - 2);
} else
memcpy(chip->oob_poi, info->oob_buff, mtd->oobsize);
return 0;
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct pxa3xx_nand *nand = info->nand_data;
struct pxa3xx_nand_flash *flash_info = info->flash_info;
struct pxa3xx_bbm *pxa3xx_bbm = mtd->bbm;
uint8_t *tmp;
int oob_size;
int corrected = 0;
oob_size = info->oob_size;
/* It is a special case that meta data need to re-order
* due to the page allocation in the large page (>2K)
*/
if (flash_info->ecc_type == ECC_BCH && nand->use_ecc == ECC_NONE
&& mtd->size > PAGE_CHUNK_SIZE) {
tmp = (uint8_t *)buf;
memcpy(tmp, info->data_buff, PAGE_CHUNK_SIZE);
tmp += PAGE_CHUNK_SIZE;
memcpy(tmp, info->oob_buff + oob_size - 2, 2);
tmp += 2;
memcpy(tmp, info->data_buff + PAGE_CHUNK_SIZE, PAGE_CHUNK_SIZE);
tmp = chip->oob_poi;
memcpy(tmp, info->oob_buff, oob_size - 2);
tmp += oob_size;
memcpy(tmp, info->data_buff + PAGE_CHUNK_SIZE * 2 - 2, 2);
tmp += 2;
memcpy(tmp, info->oob_buff + oob_size, oob_size);
}
else {
if (nand->command == NAND_CMD_READ0) {
pxa3xx_upload_page_direct(mtd, buf);
} else {
memcpy(buf, info->data_buff, mtd->writesize);
memcpy(chip->oob_poi, info->oob_buff, mtd->oobsize);
}
}
if (nand->errcode & ERR_CORERR) {
DBG_NAND(printk("###correctable error detected\n"););
printf("###correctable error detected, bad_bits=%d\n",
nand->bad_count);
switch (nand->use_ecc) {
case ECC_BCH:
if (nand->bad_count > BCH_THRESHOLD)
mtd->ecc_stats.corrected +=
(nand->bad_count - BCH_THRESHOLD);
corrected = nand->bad_count;
break;
case ECC_HAMMIN:
corrected = 1;
break;
case ECC_NONE:
default:
break;
}
if (corrected && pxa3xx_bbm)
pxa3xx_bbm->scrub_read_disturb(mtd,
page << mtd->writesize_shift);
mtd->ecc_stats.corrected += corrected;
} else if (nand->errcode & ERR_DBERR) {
int buf_blank;
DBG_NAND(printk("###uncorrectable error!!!\n"));
buf_blank = is_buf_blank(buf, mtd->writesize);
if (!buf_blank)
mtd->ecc_stats.failed++;
}
return 0;
}
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
const uint8_t *p = buf;
chip->write_buf(mtd, p, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
struct pxa3xx_nand_flash *f, int show_timing)
{
/* enable all interrupts */
uint32_t ndcr = 0;
struct pxa3xx_nand *nand = info->nand_data;
/* calculate flash information */
info->oob_buff = info->data_buff + f->page_size;
info->read_id_bytes = (f->page_size >= 2048) ? 4 : 2;
/* calculate addressing information */
info->col_addr_cycles = (f->page_size >= 2048) ? 2 : 1;
if (f->num_blocks * f->page_per_block > 65536)
info->row_addr_cycles = 3;
else
info->row_addr_cycles = 2;
ndcr |= (nand->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
switch (f->page_per_block) {
case 32:
ndcr |= NDCR_PG_PER_BLK(0x0);
break;
case 128:
ndcr |= NDCR_PG_PER_BLK(0x1);
break;
case 256:
ndcr |= NDCR_PG_PER_BLK(0x3);
break;
case 64:
default:
ndcr |= NDCR_PG_PER_BLK(0x2);
break;
}
switch (f->page_size) {
case 512:
ndcr |= NDCR_PAGE_SZ(0x0);
break;
case 2048:
default:
ndcr |= NDCR_PAGE_SZ(0x1);
break;
}
ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
info->reg_ndcr = ndcr;
info->timing0 = info->timing1 = 0;
pxa3xx_nand_set_timing(info, f->timing, show_timing);
info->flash_info = f;
return 0;
}
static void pxa3xx_erase_cmd(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd->priv;
/* Send commands to erase a block */
chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
}
static void pxa3xx_nand_init_mtd(struct mtd_info *mtd)
{
struct pxa3xx_nand_info *info = mtd->priv;
struct nand_chip *this = &info->nand_chip;
const struct pxa3xx_nand_flash *f = info->flash_info;
this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
this->waitfunc = pxa3xx_nand_waitfunc;
this->select_chip = pxa3xx_nand_select_chip;
this->dev_ready = pxa3xx_nand_dev_ready;
this->cmdfunc = pxa3xx_nand_cmdfunc;
this->read_word = pxa3xx_nand_read_word;
this->read_byte = pxa3xx_nand_read_byte;
this->read_buf = pxa3xx_nand_read_buf;
this->write_buf = pxa3xx_nand_write_buf;
this->verify_buf = pxa3xx_nand_verify_buf;
this->erase_cmd = pxa3xx_erase_cmd;
this->errstat = NULL;
this->write_page = NULL;
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = f->page_size;
this->ecc.read_page = pxa3xx_nand_read_page_hwecc;
this->ecc.read_oob = pxa3xx_nand_read_oob;
this->ecc.write_page = pxa3xx_nand_write_page_hwecc;
this->chipsize = (uint64_t)f->num_blocks * \
f->page_per_block * \
f->page_size;
this->chip_shift = ffs(this->chipsize) - 1;
mtd->size = this->chipsize;
/* Calculate the address shift from the page size */
this->page_shift = ffs(mtd->writesize) - 1;
this->pagemask = do_div(this->chipsize, mtd->writesize) - 1;
this->bbt_erase_shift = this->phys_erase_shift =
ffs(mtd->erasesize) - 1;
/* Set the bad block position */
this->badblockpos = mtd->writesize > 512 ?
NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
/* Get chip options, preserve non chip based options */
//this->options &= ~NAND_CHIPOPTIONS_MSK;
this->controller = &this->hwcontrol;
/*
* Set chip as a default. Board drivers can override it,
* if necessary
*/
this->options |= NAND_NO_AUTOINCR;
//this->options |= NAND_NO_READRDY;
this->options |= BBT_RELOCATION_IFBAD;
if (f->page_size == 2048)
this->ecc.layout = &hw_largepage_ecclayout;
else
this->ecc.layout = &hw_smallpage_ecclayout;
this->numchips = 1;
this->chip_delay = 25;
this->scan_bbt = pxa3xx_scan_bbt;
this->block_bad = pxa3xx_block_bad;
this->block_markbad = pxa3xx_block_markbad;
}
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info, int cs)
{
struct pxa3xx_nand *nand = info->nand_data;;
struct pxa3xx_nand_flash *f = &nand_common;
struct mtd_info *mtd = nand->mtd[cs];
pxa3xx_nand_config_flash(info, f, 0);
pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
if (nand->is_ready)
return 1;
else
return 0;
}
static int pxa3xx_nand_detect_flash(struct pxa3xx_nand *nand)
{
struct pxa3xx_nand_flash *f;
struct nand_chip *chip;
struct pxa3xx_nand_info *info;
struct mtd_info *mtd;
uint32_t id = -1;
int i, ret, chip_select;
f = builtin_flash_types[0];
chip_select = 0;
for (; chip_select < NUM_CHIP_SELECT; chip_select ++) {
mtd = nand->mtd[chip_select];
chip = mtd->priv;
info = mtd->priv;
ret = pxa3xx_nand_sensing(info, chip_select);
if (!ret) {
free (nand->mtd[chip_select]);
nand->mtd[chip_select] = NULL;
continue;
}
pxa3xx_nand_cmdfunc(mtd, NAND_CMD_READID, 0, 0);
id = *((uint32_t *)(info->data_buff));
if (id == 0) {
kfree(mtd);
nand->mtd[chip_select] = NULL;
continue;
}
for (i = 1; i < ARRAY_SIZE(builtin_flash_types); i++) {
f = builtin_flash_types[i];
/* find the chip in default list */
if (f->chip_id == (id & f->chip_id_mask)) {
printf("detect chip id %x on cs %d, %s\n",
f->chip_id, chip_select, f->name);
pxa3xx_nand_config_flash(info, f, 1);
chip->cellinfo = info->data_buff[2];
mtd->writesize = f->page_size;
mtd->oobsize = mtd->writesize / 32;
mtd->erasesize = f->page_size * f->page_per_block;
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
mtd->erasesize_shift = 0;
if (is_power_of_2(mtd->writesize))
mtd->writesize_shift = ffs(mtd->writesize) - 1;
else
mtd->writesize_shift = 0;
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
break;
}
}
if (i == ARRAY_SIZE(builtin_flash_types)) {
kfree(mtd);
nand->mtd[chip_select] = NULL;
printf("ERROR!! flash on cs %d id %x not defined!!!\n",
chip_select, id);
continue;
}
}
return 0;
}
/* the max buff size should be large than
* the largest size of page of NAND flash
* that currently controller support
*/
#define MAX_BUFF_SIZE ((PAGE_CHUNK_SIZE + OOB_CHUNK_SIZE) * 2)
static struct pxa3xx_nand *alloc_nand_resource(struct pxa3xx_nand_platform_data *pdata)
{
struct pxa3xx_nand_info *info;
struct pxa3xx_nand *nand;
struct mtd_info *mtd;
int i, chip_select;
nand = malloc(sizeof(struct pxa3xx_nand));
if (!nand) {
printf("failed to allocate memory\n");
return NULL;
}
nand->mmio_base = pdata->mmio_base;
for (chip_select = 0; chip_select < NUM_CHIP_SELECT; chip_select ++) {
mtd = malloc(sizeof(struct mtd_info) \
+ sizeof(struct pxa3xx_nand_info));
if (!mtd) {
printf("failed to allocate mtd memory\n");
return NULL;
}
info = (struct pxa3xx_nand_info *)(&mtd[1]);
info->chip_select = chip_select;
info->nand_data = nand;
mtd->priv = info;
nand->mtd[chip_select] = mtd;
info->data_buff = malloc(MAX_BUFF_SIZE);
if (info->data_buff == NULL) {
printf("failed to allocate data buff\n");
goto fail_free_buf;
}
}
return nand;
fail_free_buf:
for (i = 0; i < NUM_CHIP_SELECT; i ++) {
mtd = nand->mtd[i];
info = mtd->priv;
if (info->data_buff)
free(info->data_buff);
if (mtd)
free(mtd);
}
free(nand);
return NULL;
}
static int pxa3xx_nand_scan(struct mtd_info *mtd)
{
return nand_scan_tail(mtd);
}
struct pxa3xx_nand *pxa3xx_nand_probe(struct pxa3xx_nand_platform_data *pdata)
{
struct pxa3xx_nand *nand;
struct mtd_info *mtd;
struct nand_chip *chip;
int i;
nand = alloc_nand_resource(pdata);
if (!nand)
return NULL;
nand->enable_arbiter = pdata->enable_arbiter;
nand->RD_CNT_DEL = pdata->RD_CNT_DEL;
pxa3xx_nand_detect_flash(nand);
for (i = 0; i < NUM_CHIP_SELECT; i ++) {
mtd = nand->mtd[i];
if (mtd) {
pxa3xx_nand_init_mtd(mtd);
if (pxa3xx_nand_scan(mtd)) {
printf("failed to scan nand\n");
}
mtd->name = mtd_names[i];
/* Scan for bad blocks and create bbt here */
chip = mtd->priv;
chip->scan_bbt(mtd);
chip->options |= NAND_BBT_SCANNED;
#ifdef CONFIG_CMD_UBI
add_mtd_device(mtd);
#endif
}
}
return nand;
}