ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/uboot/drivers/mtd/pxa3xx_bbm.c b/marvell/uboot/drivers/mtd/pxa3xx_bbm.c
new file mode 100644
index 0000000..c136a92
--- /dev/null
+++ b/marvell/uboot/drivers/mtd/pxa3xx_bbm.c
@@ -0,0 +1,3028 @@
+/*
+ * Bad Block Management support for PXA3XX.
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Lei Wen <leiwen@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <asm/errno.h>
+#include <mtd/pxa3xx_bbm.h>
+#include <asm/arch/cpu.h>
+#include <asm/arch/config.h>
+#include <asm/arch-pxa182x/pxa182x.h>
+#include <asm/arch-pxa182x/cpu.h>
+#ifndef CONFIG_ASR1901
+#include <asm/arch-asr1802s/asr1802.h>
+#include <asm/arch-asr1802s/cpu.h>
+#endif
+#include <malloc.h>
+#include <common.h>
+
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+
+#define NEW_BBM_RELOC_PERCENTAGE (5)
+#define MAX_SUPPRTED_PARTNUM (3)
+#define MAX_OBM_BLOCK (3)
+static struct mtd_partition *pxa3xx_check_partition(struct mtd_info *mtd,
+ struct mtd_partition *part, int *num);
+
+static int erase_success;
+static int should_reloc = 1;
+static int rd_scrubbing = 0;
+static int disable_reloc = 0;
+static int rd_disturb_cnt = 0;
+
+static inline unsigned short from32to16(unsigned int x)
+{
+ /* add up 16-bit and 16-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+static unsigned int bbm_crc16(unsigned int crcu32,
+ const unsigned char *ptr, unsigned int buf_len)
+{
+ static const unsigned int s_crc32[16] = {
+ 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
+ 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
+ 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
+ };
+ if (!ptr)
+ return 0;
+ crcu32 = ~crcu32;
+ while (buf_len--)
+ {
+ unsigned char b = *ptr++;
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
+ }
+
+ return from32to16(~crcu32);
+}
+
+static int is_empty(void *buf, int len)
+{
+ uint8_t *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (*p++ != 0xff)
+ return 0;
+ return 1;
+}
+
+static void pxa3xx_bbm_callback(struct erase_info *instr)
+{
+ if (instr->fail_addr == MTD_FAIL_ADDR_UNKNOWN)
+ erase_success = 1;
+ else
+ erase_success = 0;
+}
+
+static void dump_reloc_table(struct reloc_item *item, int entry_num)
+{
+ int i;
+
+ if (entry_num == 0) {
+ printk(KERN_INFO "The reloc table is empty now\n");
+ return;
+ }
+
+ printk(KERN_INFO "Total %d entry:\n", entry_num);
+ for (i = 0; i < entry_num; i++) {
+ if (item[i].from == BLK_BAD && item[i].to == BLK_BAD)
+ continue;
+
+ printk(KERN_INFO "%d: block %8d ---> %d\n",
+ i + 1, item[i].from, item[i].to);
+ }
+}
+
+static void dump_fact_bads(struct pxa3xx_bbt *fbbt)
+{
+ uint32_t *fact_bad = (uint32_t *)&fbbt->fact_bad;
+ int i;
+
+ if (fbbt->entry_num == 0) {
+ printk(KERN_INFO "There is no factory bad block!!\n");
+ return;
+ }
+
+ for (i = 0; i < fbbt->entry_num; i ++)
+ printk(KERN_INFO "block %d is bad.\n", fact_bad[i]);
+}
+
+static void dump_part_info(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ struct pxa3xx_part *part = new_bbm->part;
+ struct pxa3xx_partinfo *partinfo;
+ struct pxa3xx_bbt *rbbt;
+ struct reloc_item *item;
+ char tmp[9];
+ int i;
+ uint32_t swap_temp;
+
+ printk(KERN_INFO "\nThere are totally %d parts", part->part_num);
+ for (i = 0; i < part->part_num; i ++) {
+ printk(KERN_INFO "\n===The part %d info:===\n", i);
+ partinfo = &new_bbm->partinfo[i];
+ if (partinfo->type == PART_LOGI)
+ printk(KERN_INFO "This part is Logi\n");
+ else
+ printk(KERN_INFO "This part is Phys\n");
+ if (partinfo->usage && partinfo->usage != 0xffffffff) {
+ memcpy(tmp, &partinfo->usage, 4);
+ tmp[4] = '\0';
+ printk(KERN_INFO "Part name %s\n", tmp);
+ }
+ if (partinfo->identifier && partinfo->identifier != 0xffffffff) {
+ memcpy(tmp, &partinfo->identifier, 4);
+ tmp[4] = '\0';
+ printk(KERN_INFO "identifier %s\n", tmp);
+ }
+ printk(KERN_INFO "Attr %16x\n", partinfo->attrs);
+ printk(KERN_INFO "This part start from %llx to %llx\n",
+ partinfo->start_addr, partinfo->end_addr);
+ printk(KERN_INFO "Reserved pool start from %llx, size %llx\n",
+ partinfo->rp_start, partinfo->rp_size);
+ if (partinfo->rp_algo == RP_UPWD)
+ printk(KERN_INFO "Reserved pool grow upwards\n");
+ else
+ printk(KERN_INFO "Reserved pool grow downwards\n");
+
+ swap_temp = partinfo->rbbt_type;
+ swab32s(&swap_temp);
+ memcpy(tmp, &swap_temp, 4);
+ tmp[4] = '\0';
+ printk(KERN_INFO "\nRBBT type %s\n", tmp);
+ printk(KERN_INFO "RBBT start at %llx, its back at %llx\n",
+ partinfo->rbbt_start, partinfo->rbbt_start_back);
+ rbbt = &new_bbm->rbbt[i];
+ printk(KERN_INFO "RBBT could max reloc %d blocks\n",
+ new_bbm->max_reloc_entry[i]);
+ printk(KERN_INFO "Current slot is at 0x%llx\n",
+ new_bbm->rbbt_offset[i] << mtd->writesize_shift);
+ item = (struct reloc_item *)&rbbt->reloc;
+ dump_reloc_table(item, new_bbm->rbbt->entry_num);
+ }
+}
+
+static void pxa3xx_uninit_reloc_tb(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+
+ if (bbm) {
+ switch (bbm->bbm_type) {
+ case BBM_LEGACY:
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ kfree(legacy_bbm->table);
+ break;
+
+ case BBM_NEW:
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ kfree(new_bbm->rbbt);
+ kfree(new_bbm->fbbt);
+ kfree(new_bbm->part);
+ default:
+ break;
+ }
+
+ if (bbm->data_buf)
+ kfree(bbm->data_buf);
+ kfree(bbm);
+ mtd->bbm = NULL;
+ }
+}
+
+/*
+ * Found the block belong to which partition
+ */
+static int find_part(struct mtd_info *mtd, uint64_t offset)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ struct pxa3xx_part *part = new_bbm->part;
+ struct pxa3xx_partinfo *partinfo;
+ int i, found_part = -EINVAL;
+
+ for (i = 0; i < part->part_num; i ++) {
+ partinfo = &(new_bbm->partinfo[i]);
+ if (offset < partinfo->start_addr)
+ break;
+
+ if (offset < partinfo->end_addr) {
+ found_part = i;
+ break;
+ }
+ }
+
+ return found_part;
+}
+
+/*
+ * start_page and end_page should be in one block boundary
+ * direction: 1 for positive page grow order, 0 for the reversed order
+ * indicator should be meaningful bit order stand for BBT
+ */
+int page_search(struct mtd_info *mtd, int start_page, int end_page,
+ int direction, unsigned int indicator, void *buf, unsigned int mask)
+{
+ int found_page = -EINVAL, cur_page, ret;
+ unsigned int header;
+ size_t retlen;
+
+ cur_page = (direction == ORDER_POSITIVE) ? end_page : start_page;
+ while (start_page <= end_page) {
+ ret = mtd->_read(mtd, cur_page << mtd->writesize_shift,
+ mtd->writesize, &retlen, buf);
+ header = *(unsigned int *)buf & mask;
+ if (ret >= 0 && header == indicator) {
+ found_page = cur_page;
+ break;
+ }
+
+ if (direction == ORDER_POSITIVE) {
+ cur_page --;
+ if (cur_page < start_page)
+ break;
+ }
+ else {
+ cur_page ++;
+ if (cur_page > end_page)
+ break;
+ }
+ }
+
+ return found_page;
+}
+
+static int legacy_bbm_copy_peb(struct mtd_info *mtd, int from, int to,
+ int start_page, int end_page, int flag)
+{
+ int from_addr = from << mtd->erasesize_shift;
+ int to_addr = to << mtd->erasesize_shift;
+ int page_size = mtd->writesize;
+ int pages_per_block = mtd->erasesize >> mtd->writesize_shift;
+ int addr, end_addr, ret = 0;
+ size_t retlen;
+ void *buf, *rbuf;
+
+ buf = kzalloc(page_size * 2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rbuf = buf + page_size;
+
+ end_addr = min(end_page, pages_per_block - 1) << mtd->writesize_shift;
+ addr = start_page << mtd->writesize_shift;
+
+ if (flag & DEST_SKIP_ALL_FF_PAGE) {
+ /* skip ALL 0xFF page, since FS may write later */
+ while (1) {
+ ret = mtd->_read(mtd, from_addr + end_addr,
+ page_size, &retlen, buf);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!is_empty(buf, mtd->writesize)) {
+ pr_debug("will copy from page %d to %d\n",
+ start_page,
+ end_addr >> mtd->writesize_shift);
+ break;
+ }
+
+ end_addr -= mtd->writesize;
+ if (end_addr < addr)
+ break;
+ }
+ }
+
+ while (addr <= end_addr) {
+ ret = mtd->_read(mtd, from_addr + addr, page_size,
+ &retlen, buf);
+ if(ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (flag & DEST_NOT_USE_RELOC)
+ disable_reloc = 1;
+
+ ret = mtd->_write(mtd, to_addr + addr, page_size,
+ &retlen, buf);
+ if (ret) {
+ if (flag & DEST_NOT_USE_RELOC)
+ disable_reloc = 0;
+
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Read back and compare */
+ memset(rbuf, 0xFF, page_size);
+ ret = mtd->_read(mtd, to_addr + addr, page_size,
+ &retlen, rbuf);
+
+ if (flag & DEST_NOT_USE_RELOC)
+ disable_reloc = 0;
+
+ if (ret < 0 || memcmp(buf, rbuf, page_size)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = 0;
+ addr += page_size;
+ }
+out:
+ kfree(buf);
+ return ret;
+}
+
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+static int check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
+}
+
+static int torture_block(struct mtd_info *mtd, int block)
+{
+ struct erase_info instr;
+ int patt_count = ARRAY_SIZE(patterns);
+ int addr = block << mtd->erasesize_shift;
+ size_t retlen;
+ int i, ret = 0;
+ void *buf;
+
+ buf = kzalloc(mtd->erasesize, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_INFO "Failed to malloc erasesize memory\n");
+ return -ENOMEM;
+ }
+
+ disable_reloc = 1;
+ rd_disturb_cnt = 0;
+
+ for (i = 0; i < patt_count; i++) {
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = mtd->_read(mtd, addr, mtd->erasesize, &retlen, buf);
+ if (ret < 0 || rd_disturb_cnt) {
+ goto out;
+ }
+
+ ret = check_pattern(buf, 0xff, mtd->erasesize);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ memset(buf, patterns[i], mtd->erasesize);
+ ret = mtd->_write(mtd, addr, mtd->erasesize, &retlen, buf);
+ if (ret)
+ goto out;
+
+ memset(buf, ~patterns[i], mtd->erasesize);
+ ret = mtd->_read(mtd, addr, mtd->erasesize, &retlen, buf);
+ if (ret < 0 || rd_disturb_cnt)
+ goto out;
+
+ ret = check_pattern(buf, patterns[i], mtd->erasesize);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (rd_disturb_cnt) {
+ rd_disturb_cnt = 0;
+ printk(KERN_INFO "Find read disturb during torture %d!\n",
+ block);
+ ret = -EIO;
+ }
+
+ disable_reloc = 0;
+ kfree(buf);
+ if (!ret)
+ printk(KERN_INFO "Success to recycle block %d\n", block);
+ return ret;
+}
+
+int pxa3xx_abbt_recycle_blk(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ struct reloc_item *item_abbt;
+ int ret, i, total_abbt;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+ item_abbt = abbt->reloc;
+ total_abbt = abbt->entry_num;
+
+ for (i = 0; i < total_abbt; i++) {
+ if (item_abbt[i].from == BLK_WAIT_RECYCLE) {
+ ret = torture_block(mtd, item_abbt[i].to);
+ if (ret == -ENOMEM) {
+ break;
+ } else if (ret) {
+ item_abbt[i].from = BLK_RECYCLE_FAIL;
+ } else {
+ item_abbt[i].from = BLK_RECYCLED;
+ abbt->recycled_num++;
+ }
+
+ abbt->wait_recycle_num--;
+ }
+ }
+
+ return 0;
+}
+
+static int ext_legacy_bbt_relocate(struct mtd_info *mtd, loff_t ofs, int scrub)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm = NULL;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ struct reloc_item *item, *item_abbt;
+ struct erase_info instr;
+ int block = (int)(ofs >> mtd->erasesize_shift);
+ int reloc_block, entry_num = -1;
+ int i, _rel, max_entry, bitflip_entry, reloc_boundary;
+ int total, total_abbt, blk_index, blk_recyc = -1;
+ char *rp_tbl;
+ int ret;
+ int bitflip_cnt_entry = -1;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+ item = legacy_bbm->reloc;
+ max_entry = legacy_bbm->reserved_blks;
+ reloc_boundary = mtd_div_by_eb(mtd->size, mtd) - max_entry;
+ total = legacy_bbm->table->total;
+ item_abbt = abbt->reloc;
+ total_abbt = abbt->entry_num;
+
+ if (block >= reloc_boundary &&
+ block < reloc_boundary + max_entry - ABBT_BLK_NUM)
+ return -EINVAL;
+
+ printk(KERN_INFO "ready to put %llx into the bbt\n", ofs);
+ /* clear cache reloc */
+ legacy_bbm->reloc_cache.from = 0xFFFF;
+
+ rp_tbl = bbm->rel_dist;
+ if (!rp_tbl) {
+ rp_tbl = kzalloc(max_entry, GFP_KERNEL);
+ /* need to save this */
+ bbm->rel_dist = rp_tbl;
+ } else {
+ memset(rp_tbl, 0, max_entry);
+ }
+
+ /* Scan and save reserved block pool usage by two-level bbt */
+ bitflip_entry = 0;
+ for (i = 0; i < total_abbt; i ++) {
+ _rel = item_abbt[i].to - reloc_boundary;
+ if (item_abbt[i].from == BLK_FLIP_COUNT)
+ bitflip_cnt_entry = i;
+ else if (_rel >= 0)
+ rp_tbl[_rel] = 1;
+ else if (item_abbt[i].from != BLK_BAD)
+ bitflip_entry++;
+ }
+
+ if (bitflip_cnt_entry == -1) {
+ item_abbt[total_abbt].from = BLK_FLIP_COUNT;
+ item_abbt[total_abbt].to = 0;
+ bitflip_cnt_entry = total_abbt;
+ total_abbt++;
+ }
+
+ /*
+ * Some ugly spi-nand may generate too many bit-flips, use up BBT table,
+ * limit max bit-flip blocks, so that markbad can be use used.
+ */
+ if (scrub == ABBT_SCRUB_ANY &&
+ ((bitflip_entry + (max_entry - ABBT_BLK_NUM) * 2) >= abbm->max_entry)) {
+ printk(KERN_ERR "bit-flip number reach threshold(%d, %d, %d), exit\n",
+ bitflip_entry, max_entry, abbm->max_entry);
+ return -EINVAL;
+ }
+
+ /* Identify whether the block has been relocated */
+ for(i = total_abbt - 1; i >= 0; i --) {
+ if(block == item_abbt[i].from)
+ entry_num = i;
+ }
+
+ /*
+ * Find the available block with the largest number in reservered area
+ */
+ while (1) {
+ if (block == abbm->main_blk || block == abbm->mirror_blk ||
+ block <= MAX_OBM_BLOCK) {
+ int bbt_max;
+
+ bbt_max = (mtd->writesize - sizeof(struct reloc_table)) /
+ sizeof(struct reloc_item);
+ if (total + 4 >= bbt_max) {
+ printk("bbt reach max(%d, %d)\n", total, bbt_max);
+ return -EINVAL;
+ }
+ }
+
+ if (total_abbt >= (abbm->max_entry - 2)) {
+ printk(KERN_ERR "ABBT table full: %ditems\n", total_abbt);
+ return -EINVAL;
+ }
+
+ /* Make sure that reloc_block is pointing to a valid block */
+ if (scrub == ABBT_SCRUB_BACK) {
+ reloc_block = -1;
+ } else {
+ for (reloc_block = max_entry - ABBT_BLK_NUM - 1;
+ reloc_block >= 0; reloc_block --) {
+ if (rp_tbl[reloc_block] == 0) {
+ reloc_block = reloc_block + reloc_boundary;
+ printk(KERN_INFO
+ "get block %d from reserved area\n",
+ reloc_block);
+ break;
+ }
+ }
+ }
+
+ if (reloc_block < 0) {
+ pxa3xx_abbt_recycle_blk(mtd);
+ /*
+ * No block from reserved pool, need to check if any
+ * recycled blocks exist
+ */
+ for (i = 0; i < total_abbt; i++) {
+ if (item_abbt[i].from == BLK_RECYCLED &&
+ (scrub != ABBT_SCRUB_BACK ||
+ item_abbt[i].to == block)) {
+ reloc_block = item_abbt[i].to;
+ blk_recyc = i;
+ printk(KERN_INFO
+ "get block %d from recycle area\n",
+ reloc_block);
+ abbt->recycled_num--;
+ break;
+ }
+ }
+ }
+
+ if (reloc_block < 0) {
+ /* if block failed to recycle, not map back to itself */
+ if (scrub == ABBT_SCRUB_BACK)
+ return -ENOSPC;
+
+ if (entry_num >= 0 && !scrub)
+ item_abbt[entry_num].from = BLK_BAD;
+
+ item_abbt[total_abbt].from = BLK_BAD;
+ item_abbt[total_abbt].to = block;
+ total_abbt++;
+ printk(KERN_ERR "Reserved area has no left blocks\n");
+ return -ENOSPC;
+ }
+
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)reloc_block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ disable_reloc = 1;
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ disable_reloc = 0;
+
+ ret = 0;
+ if (erase_success) {
+ if (scrub)
+ ret = legacy_bbm_copy_peb(
+ mtd, block, reloc_block, 0,
+ (mtd->erasesize >>
+ mtd->writesize_shift) - 1,
+ DEST_NOT_USE_RELOC | DEST_SKIP_ALL_FF_PAGE);
+ if (!ret)
+ break;
+ if (ret != -EAGAIN) {
+ printk(KERN_INFO "%s: fatal error, exit\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ /* reach here means erase or copy failure */
+ if (scrub == ABBT_SCRUB_BACK ||
+ (!ret && instr.fail_addr != instr.addr))
+ return -EINVAL;
+ /*
+ * skip it if the reloc_block is also a bad block(erase or
+ * write fail).
+ */
+ if (blk_recyc != -1) {
+ item_abbt[blk_recyc].from = BLK_WAIT_RECYCLE;
+ } else {
+ item_abbt[total_abbt].from = BLK_WAIT_RECYCLE;
+ item_abbt[total_abbt].to = reloc_block;
+ total_abbt++;
+ legacy_bbm->status |= ABBT_CHANGED;
+ }
+
+ blk_recyc = -1;
+ _rel = reloc_block - reloc_boundary;
+ if (_rel >= 0)
+ rp_tbl[_rel] = 1;
+ }
+
+ if (total_abbt >= (abbm->max_entry - 2)) {
+ printk(KERN_ERR "ABBT table full: %ditems\n", total_abbt);
+ return -EINVAL;
+ }
+
+ /*
+ * Create the relocated block information in the table
+ * when the block is relocated before, blob should modify
+ * the original entry to new relocated block and the old
+ * relocated block point to 65535. If not the situation,
+ * create a new entry
+ */
+ if (blk_recyc != -1) {
+ blk_index = blk_recyc;
+ } else {
+ blk_index = total_abbt;
+ total_abbt++;
+ }
+
+ /*
+ * Move recycle block to bbt tail, avoid being used repeatedly
+ * Use torture test to check if it is a true bad block later
+ */
+ if (entry_num != -1) {
+ item_abbt[blk_index].from = item_abbt[total_abbt - 1].from;
+ item_abbt[blk_index].to = item_abbt[total_abbt - 1].to;
+ item_abbt[total_abbt - 1].from = BLK_WAIT_RECYCLE;
+ item_abbt[total_abbt - 1].to = item_abbt[entry_num].to;
+ item_abbt[entry_num].to = reloc_block;
+ } else {
+ item_abbt[blk_index].from = block;
+ item_abbt[blk_index].to = reloc_block;
+ item_abbt[total_abbt].from = BLK_WAIT_RECYCLE;
+ item_abbt[total_abbt].to = block;
+ total_abbt++;
+
+ entry_num = blk_index;
+ }
+
+ /* Update bitfilp count statistics */
+ if (scrub == ABBT_SCRUB_ANY)
+ item_abbt[bitflip_cnt_entry].to++;
+
+ /* Update first level bbt for blocks which second bbt located */
+ if (block == abbm->main_blk || block == abbm->mirror_blk ||
+ block <= MAX_OBM_BLOCK) {
+ unsigned int *pver, *pcsum;
+ unsigned short csum;
+
+ for (i = 0; i < total; i++) {
+ if (item[i].from == block) {
+ /*
+ * make a fake entry for legacy bbm, so that
+ * new bbt has more entries than old, then we
+ * can find out which one is the latest.
+ */
+ item[i].from = BLK_BAD;
+ item[i].to = BLK_BAD;
+ break;
+ }
+ }
+
+ /* bootrom not support A --> A in BBT */
+ if (block == reloc_block) {
+ item[total].from = BLK_BAD;
+ item[total].to = BLK_BAD;
+ } else {
+ item[total].from = block;
+ item[total].to = reloc_block;
+ }
+ total++;
+ legacy_bbm->table->total = total;
+ legacy_bbm->status |= BBT_CHANGED;
+
+ /*
+ * update BBT crc:
+ * BBT layout (Append ABB version and crc at tail)
+ * | magic(2B) |
+ * | entry number(2B) |
+ * | entry(4B)... |
+ * | ABB version(4B) |
+ * | Owner(2bit) |
+ * | reserved(14bit) |
+ * | CRC(2B) |
+ */
+ pver = (unsigned int *)(item + total);
+ *pver = ABBT_VERSION_2001;
+ pcsum = pver + 1;
+ *pcsum = BBT_UBOOT; /* owner at lower 2bit */
+ mb();
+ csum = bbm_crc16(0, (unsigned char *)legacy_bbm->table,
+ (sizeof(struct reloc_table) +
+ total * sizeof(struct reloc_item) + 4 + 2));
+
+ *pcsum |= csum << 16;
+ }
+
+ /* Remove redundant entry such as A -> A */
+ if (block == reloc_block) {
+ for (i = entry_num; i < total_abbt - 1; i++) {
+ item_abbt[i].from = item_abbt[i+1].from;
+ item_abbt[i].to = item_abbt[i+1].to;
+ }
+
+ item_abbt[total_abbt - 1].from = BLK_BAD;
+ item_abbt[total_abbt - 1].to = BLK_BAD;
+
+ total_abbt--;
+ }
+
+ abbt->wait_recycle_num++;
+ abbt->refcnt++;
+ abbt->entry_num = total_abbt;
+ legacy_bbm->status |= ABBT_CHANGED;
+
+ /* update ABBT crc */
+ if (abbt->ver == ABBT_VERSION_2001) {
+ abbt->owner = BBT_UBOOT;
+ abbt->reserved = 0;
+ abbt->crc = 0;
+ mb();
+ abbt->crc = bbm_crc16(0, (unsigned char *)abbt,
+ (sizeof(struct pxa3xx_abbt) +
+ abbt->entry_num * sizeof(struct reloc_item)));
+ }
+
+ /* clear cache reloc */
+ legacy_bbm->reloc_cache.from = 0xFFFF;
+ printk(KERN_INFO "%s: block %d --> %d\n", __func__,
+ block, reloc_block);
+ return 0;
+}
+
+/* add the relocation entry into the relocation table
+ * It's valid on MOBM V3.
+ * If the relocated block is bad, an new entry will be added into the
+ * bottom of the relocation table.
+ */
+static int sync_pxa3xx_bbt(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm = NULL;
+ struct pxa3xx_new_bbm *new_bbm;
+ struct pxa3xx_partinfo *partinfo;
+ struct pxa3xx_bbt *bbt = NULL;
+ struct reloc_item *item;
+ struct erase_info instr;
+ int reloc_block, entry_num = -1;
+ char *rel_dist;
+ int i, block, _rel, max_reloc_entry, reloc_boundary, total, part;
+
+ printk(KERN_INFO "ready to put %llx into the bbt\n", ofs);
+ if (bbm->bbm_type == BBM_LEGACY) {
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ item = legacy_bbm->reloc;
+ reloc_boundary = mtd_div_by_eb(mtd->size, mtd)
+ - legacy_bbm->max_reloc_entry;
+ max_reloc_entry = legacy_bbm->max_reloc_entry;
+ total = legacy_bbm->table->total;
+ }
+ else {
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ part = find_part(mtd, ofs);
+ if (part < 0)
+ return -EINVAL;
+ new_bbm->update_indicator |= 1 << part;
+ max_reloc_entry = new_bbm->max_reloc_entry[part];
+ bbt = &new_bbm->rbbt[part];
+ partinfo = &new_bbm->partinfo[part];
+ item = (struct reloc_item *)&bbt->reloc;
+ reloc_boundary = mtd_div_by_eb(partinfo->rp_start, mtd);
+ total = bbt->entry_num;
+ }
+
+ block = (int)(ofs >> mtd->erasesize_shift);
+ if (total >= max_reloc_entry) {
+ printk(KERN_WARNING "Relocation table currently have %d\n"
+ "Exceed max num %d, cannot relocate block %d!!\n",
+ total, max_reloc_entry, block);
+ return -ENOSPC;
+ }
+
+ if (block >= reloc_boundary)
+ return -EINVAL;
+
+ //identify whether the block has been relocated
+ for(i = total - 1; i >= 0; i --) {
+ if(block == item[i].from)
+ entry_num = i;
+ }
+
+ rel_dist = bbm->rel_dist;
+ if (!rel_dist) {
+ rel_dist = kzalloc(max_reloc_entry, GFP_KERNEL);
+ /* need to save this */
+ bbm->rel_dist = rel_dist;
+ }
+ else
+ memset(rel_dist, 0, max_reloc_entry);
+ //find the available block with the largest number in reservered area
+ for (i = 0; i < total; i ++) {
+ _rel = (item[i].to != 65535) ? item[i].to : item[i].from;
+ rel_dist[_rel - reloc_boundary] = 1;
+ }
+
+ while (1) {
+ /* Make sure that reloc_block is pointing to a valid block */
+ for (reloc_block = max_reloc_entry - 1;
+ reloc_block >= 0; reloc_block --) {
+ if (rel_dist[reloc_block] == 0) {
+ printk(KERN_INFO "get block %d from reserved area\n", reloc_block + reloc_boundary);
+ break;
+ }
+ }
+
+ if (reloc_block < 0) {
+ if (entry_num >= 0) {
+ item[entry_num].from = item[entry_num].to;
+ item[entry_num].to = 65535;
+ }
+ printk(KERN_ERR "Reserved ared has no left blocks\n");
+ return -ENOSPC;
+ }
+
+ reloc_block = reloc_block + reloc_boundary;
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)reloc_block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (erase_success) {
+ printk(KERN_INFO "The block is verified\n");
+ break;
+ }
+ else {
+ /* skip it if the reloc_block is also a
+ * bad block
+ */
+ if (instr.fail_addr == instr.addr) {
+ item[total].from = reloc_block;
+ item[total].to = 65535;
+ total ++;
+ rel_dist[reloc_block - reloc_boundary] = 1;;
+ continue;
+ } else
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Create the relocated block information in the table
+ * when the block is relocated before, blob should modify
+ * the original entry to new relocated block and the old
+ * relocated block point to 65535. If not the situation,
+ * create a new entry
+ */
+ if (entry_num != -1) {
+ item[total].from = item[entry_num].to;
+ item[total].to = 65535;
+ total ++;
+ item[entry_num].to = reloc_block;
+ } else {
+ item[total].from = block;
+ item[total].to = reloc_block;
+ total ++;
+ }
+
+ if (bbm->bbm_type == BBM_LEGACY)
+ legacy_bbm->table->total = total;
+ else
+ bbt->entry_num = total;
+
+ return 0;
+}
+
+static int pxa3xx_update_ext_legacy_abbt(struct mtd_info *mtd, int main_bbt,
+ int erase, int *bbt_changed)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct erase_info instr;
+ int update_blk, backup_blk;
+ size_t retlen;
+ loff_t offset = 0;
+ int ret = 1, pages;
+ int slot, start_slot, end_slot;
+ void *buf, *rbuf;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+
+ if (bbt_changed)
+ *bbt_changed = 0;
+
+ if(!(legacy_bbm->status & ABBT_CHANGED))
+ return 0;
+
+ rbuf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ if (main_bbt) {
+ update_blk = abbm->main_blk;
+ backup_blk = abbm->mirror_blk;
+ } else {
+ update_blk = abbm->mirror_blk;
+ backup_blk = abbm->main_blk;
+ }
+
+ while (erase) {
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)update_blk << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (erase_success) {
+ printk(KERN_INFO "Success to erase block %d\n",
+ update_blk);
+ break;
+ }
+
+ ret = ext_legacy_bbt_relocate(mtd,
+ update_blk << mtd->erasesize_shift, ABBT_SCRUB_NONE);
+ if (ret) {
+ printk(KERN_INFO "%s: relocate failed, exit\n",
+ __func__);
+ goto exit;
+ }
+
+ if (bbt_changed)
+ *bbt_changed = 1;
+ }
+
+ buf = abbm->abbt;
+ pages = mtd->erasesize >> mtd->writesize_shift;
+ if (abbm->order == ORDER_REVERSE) {
+ start_slot = abbm->cur_slot;
+ end_slot = erase ? (pages - 1) : abbm->cur_slot;
+ } else {
+ start_slot = erase ? 0 : abbm->cur_slot;
+ end_slot = abbm->cur_slot;
+ }
+
+ while (1) {
+ /*
+ * If abbt block is erased, need to write abbt from
+ * current slot page to end slot page.
+ */
+ for (slot = start_slot; slot <= end_slot; slot++) {
+ offset = slot << mtd->writesize_shift;
+ offset += update_blk << mtd->erasesize_shift;
+ ret = mtd->_write(mtd, offset, mtd->writesize, &retlen,
+ buf);
+ if (ret)
+ break;
+
+ /* Read back and compare */
+ memset(rbuf, 0xFF, mtd->writesize);
+ ret = mtd->_read(mtd, offset, mtd->writesize,
+ &retlen, rbuf);
+ if (ret < 0 || memcmp(buf, rbuf, mtd->writesize)) {
+ ret = -EIO;
+ break;
+ }
+ ret = 0;
+ }
+
+ /* return if write succeed */
+ if (!ret)
+ break;
+
+ while (1) {
+ ret = ext_legacy_bbt_relocate(mtd,
+ update_blk << mtd->erasesize_shift,
+ ABBT_SCRUB_NONE);
+ if (ret) {
+ printk(KERN_INFO "%s: relocate failed, exit\n",
+ __func__);
+ goto exit;
+ }
+
+ if (!erase) {
+ if (abbm->order == ORDER_REVERSE)
+ ret = legacy_bbm_copy_peb(mtd,
+ backup_blk, update_blk,
+ abbm->cur_slot + 1, pages - 1, 0);
+ else
+ ret = legacy_bbm_copy_peb(mtd,
+ backup_blk, update_blk,
+ 0, abbm->cur_slot - 1, 0);
+ }
+
+ if (!ret)
+ break;
+
+ if (ret != -EAGAIN) {
+ printk(KERN_INFO "%s: fatal error, exit\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ if (bbt_changed)
+ *bbt_changed = 1;
+ }
+
+exit:
+ kfree(rbuf);
+ return ret;
+}
+
+static void pxa3xx_bbt_remove_dummy(struct pxa3xx_legacy_bbm *legacy_bbm)
+{
+ struct reloc_item *item;
+ int total, i;
+
+ item = legacy_bbm->reloc;
+ total = legacy_bbm->table->total;
+
+ for (i = 0; i < total; i++) {
+ if ((item[i].from == BLK_BAD) && (item[i].to == BLK_BAD)) {
+ item[i].from = item[total - 1].from;
+ item[i].to = item[total - 1].to;
+ item[total - 1].from = BLK_BAD;
+ item[total - 1].to = BLK_BAD;
+ total--;
+ i--;
+ }
+ }
+
+ legacy_bbm->table->total = total;
+}
+
+int __pxa3xx_update_legacy_bbt(struct mtd_info *mtd, int block, int cur_slot)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct erase_info instr;
+ size_t retlen;
+ loff_t offset = 0;
+ int pages, erase_bbt;
+ int ret;
+ void *buf, *rbuf;
+ int max_retries = 3;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ pages = mtd->erasesize >> mtd->writesize_shift;
+ erase_bbt = 0;
+
+ rbuf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+retry:
+ if (max_retries-- <= 0)
+ goto ERR_EXIT2;
+
+ /* should write to the next slot */
+ if (legacy_bbm->order == ORDER_REVERSE) {
+ cur_slot--;
+ if (cur_slot < bbm->begin_slot) {
+ erase_bbt = 1;
+ cur_slot = pages - 1;
+ }
+ } else {
+ cur_slot++;
+ if (cur_slot >= pages) {
+ erase_bbt = 1;
+ cur_slot = bbm->begin_slot;
+ }
+ }
+
+ if (erase_bbt) {
+ buf = kzalloc(mtd->writesize * bbm->begin_slot, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mtd->_read(mtd, (block << mtd->erasesize_shift),
+ mtd->writesize * bbm->begin_slot,
+ &retlen, buf);
+ if (ret < 0)
+ goto ERR_EXIT;
+
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_ERR "Failed to erase block 0!\n");
+ goto ERR_EXIT;
+ }
+ printk(KERN_INFO "Success to erase block 0!\n");
+
+ ret = mtd->_write(mtd, (block << mtd->erasesize_shift),
+ mtd->writesize * bbm->begin_slot,
+ &retlen, buf);
+ if (ret)
+ goto ERR_EXIT;
+
+ kfree(buf);
+
+ pxa3xx_bbt_remove_dummy(legacy_bbm);
+ }
+
+ buf = legacy_bbm->table;
+ offset = (block << mtd->erasesize_shift) +
+ (cur_slot << mtd->writesize_shift);
+ ret = mtd->_write(mtd, offset, mtd->writesize, &retlen, buf);
+ if (ret) {
+ erase_bbt = 1;
+ goto retry;
+ }
+
+ /* Read back and compare */
+ memset(rbuf, 0xFF, mtd->writesize);
+ ret = mtd->_read(mtd, offset, mtd->writesize, &retlen, rbuf);
+ if (ret < 0 || memcmp(buf, rbuf, mtd->writesize)) {
+ erase_bbt = 1;
+ goto retry;
+ }
+
+ legacy_bbm->status &= ~BBT_CHANGED;
+ kfree(rbuf);
+ return cur_slot;
+
+ERR_EXIT:
+ kfree(buf);
+ERR_EXIT2:
+ kfree(rbuf);
+ return -EINVAL;
+}
+
+int pxa3xx_update_legacy_bbt(struct mtd_info *mtd, int block, int cur_slot)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ struct erase_info instr;
+ int allow_reloc;
+ int ret;
+
+ legacy_bbm = (struct asr_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+
+ if ((abbt->ver == ABBT_VERSION_1102 ||
+ abbt->ver == ABBT_VERSION_2001) && abbt->backup_bbt_loc > 0)
+ allow_reloc = 1;
+ else
+ allow_reloc = 0;
+
+ ret = __pxa3xx_update_legacy_bbt(mtd, block, cur_slot);
+ if (allow_reloc && ret < 0 && ret != -ENOMEM) {
+ /*
+ * Relocate BBT block and erase it, so that booting from
+ * another backup is available.
+ */
+ while (1) {
+ ret = ext_legacy_bbt_relocate(mtd,
+ block << mtd->erasesize_shift, ABBT_SCRUB_NONE);
+ if (ret) {
+ printk(KERN_INFO "%s: relocate failed, exit\n",
+ __func__);
+ return ret;
+ }
+
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)block << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_INFO "erase bbt block %d success\n",
+ block);
+ break;
+ }
+
+ printk(KERN_INFO "erase block %d failed\n", block);
+ }
+ }
+
+ return ret;
+}
+
+int pxa3xx_update_ext_legacy_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ struct erase_info instr;
+ struct mtd_ecc_stats stats;
+ size_t retlen;
+ loff_t offset = 0;
+ int pages, erase_main, erase_mirror;
+ void *buf;
+ int ret, renew = 0;
+ int bbt_slot;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+ pages = mtd->erasesize >> mtd->writesize_shift;
+
+ if(!(legacy_bbm->status & ABBT_CHANGED))
+ goto update_bbt;
+
+ /*
+ * abbt may be changed during update mirror blk, so need to
+ * update main blk agagin for this situation
+ */
+ do {
+ erase_main = erase_mirror = 0;
+ if (abbm->order == ORDER_REVERSE) {
+ abbm->cur_slot--;
+ if (abbm->cur_slot < 0) {
+ erase_main = erase_mirror = 1;
+ abbm->cur_slot = pages - 1;
+ }
+ } else {
+ abbm->cur_slot++;
+ if (abbm->cur_slot >= pages) {
+ erase_main = erase_mirror = 1;
+ abbm->cur_slot = 0;
+ }
+ }
+ if (erase_main == 0) {
+ /* Check if next page is all 0xff, and can be written */
+ buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ offset = abbm->cur_slot << mtd->writesize_shift;
+ offset += abbm->main_blk << mtd->erasesize_shift;
+ stats.failed = mtd->ecc_stats.failed;
+ ret = mtd->_read(mtd, offset, mtd->writesize, &retlen, buf);
+ if (ret < 0 || !check_pattern(buf, 0xff, mtd->writesize)) {
+ erase_main = 1;
+ printk(KERN_ERR "abbt main block slot %d not writtable, ret=%d\n",
+ abbm->cur_slot, ret);
+ }
+
+ offset = abbm->cur_slot << mtd->writesize_shift;
+ offset += abbm->mirror_blk << mtd->erasesize_shift;
+ ret = mtd->_read(mtd, offset, mtd->writesize, &retlen, buf);
+ if (ret < 0 || !check_pattern(buf, 0xff, mtd->writesize)) {
+ erase_mirror = 1;
+ printk(KERN_ERR "abbt mirror block slot %d not writtable, ret=%d\n",
+ abbm->cur_slot, ret);
+ }
+ mtd->ecc_stats.failed = stats.failed;
+
+ kfree(buf);
+ }
+
+ /* Update legacy abbt main and mirror block */
+ pxa3xx_update_ext_legacy_abbt(mtd, 1, erase_main, NULL);
+ pxa3xx_update_ext_legacy_abbt(mtd, 0, erase_mirror, &renew);
+ } while(renew);
+
+ legacy_bbm->status &= ~ABBT_CHANGED;
+
+update_bbt:
+ if(!(legacy_bbm->status & BBT_CHANGED))
+ return 0;
+
+ bbt_slot = legacy_bbm->current_slot;
+ ret = pxa3xx_update_legacy_bbt(mtd, legacy_bbm->bbt_blk, bbt_slot);
+ if (abbt->ver == ABBT_VERSION_1102 || abbt->ver == ABBT_VERSION_2001) {
+ int backup_blk = abbt->backup_bbt_loc >> mtd->erasesize_shift;
+ if (backup_blk != legacy_bbm->bbt_blk)
+ ret = pxa3xx_update_legacy_bbt(mtd, backup_blk,
+ bbt_slot);
+ }
+ if (ret >= 0) {
+ legacy_bbm->current_slot = ret;
+ ret = 0;
+ } else {
+ printk(KERN_INFO "Can't write relocation table to device any more.\n");
+ }
+
+ return ret;
+}
+
+static int pxa3xx_scrub_read_disturb(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm = NULL;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ int block = (int)(ofs >> mtd->erasesize_shift);
+
+ /* Should not relocate block 0 since bootrom must use it */
+ if (!bbm || !block || rd_scrubbing || disable_reloc) {
+ if (disable_reloc)
+ rd_disturb_cnt++;
+
+ if (!block )
+ printk(KERN_INFO "warn: block0 bit-flip, not relocate\n");
+ return 0;
+ }
+
+ rd_scrubbing = 1;
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+ if (abbt->ver == ABBT_VERSION_1102 || abbt->ver == ABBT_VERSION_2001) {
+ int backup_blk = abbt->backup_bbt_loc >> mtd->erasesize_shift;
+ if (block == backup_blk) {
+ printk(KERN_INFO "warn: bbt backup block%d bit-flip, not relocate\n",
+ block);
+ return 0;
+ }
+ }
+
+ if (bbm->is_init == BBT_NOINIT) {
+ if (block == abbm->main_blk)
+ legacy_bbm->status |= ABBT_MAIN_SCRUB;
+ else if (block == abbm->mirror_blk)
+ legacy_bbm->status |= ABBT_MIRROR_SCRUB;
+ goto out;
+ } else if (abbm->cur_slot < 0) {
+ /* If ABBT not supported, skip scrubing flow */
+ goto out;
+ }
+
+ if (abbt->entry_num >= (abbm->max_entry - 2)) {
+ printk(KERN_ERR "ABBT table full: %ditems\n", abbt->entry_num);
+ goto out;
+ }
+
+ /*
+ * First relocate block A to any valid block B, after this:
+ * 65522 --> A
+ * A --> B
+ */
+ ext_legacy_bbt_relocate(mtd, ofs, ABBT_SCRUB_ANY);
+ pxa3xx_update_ext_legacy_bbt(mtd, 0);
+
+ /*
+ * Try to recycle block A, after this:
+ * 65521 --> A
+ * A --> B
+ */
+ pxa3xx_abbt_recycle_blk(mtd);
+
+ /*
+ * Try to map block A back to itself to decrease abbt entry,
+ * after this:
+ * 65522 --> B
+ * A --> A (redundant entry to be removed)
+ */
+ ext_legacy_bbt_relocate(mtd, ofs, ABBT_SCRUB_BACK);
+ pxa3xx_update_ext_legacy_bbt(mtd, 0);
+
+ /*
+ * Try to recycle block B, after this:
+ * 65521 --> B
+ */
+ pxa3xx_abbt_recycle_blk(mtd);
+out:
+ rd_scrubbing = 0;
+ return 0;
+}
+
+/* Write the relocation table back to device, if there's room. */
+int pxa3xx_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ size_t retlen;
+ loff_t offset = 0;
+ void *buf;
+ int ret = 1, part = 0, pages, is_continue = 1, backup_size;
+ struct erase_info instr = {
+ .callback = NULL,
+ };
+
+ while (is_continue) {
+ switch (bbm->bbm_type) {
+ case BBM_LEGACY:
+ if (!ret) {
+ printk(KERN_INFO "update legacy bbt"
+ " at %llx\n", offset);
+ return 0;
+ }
+
+ pages = mtd->erasesize >> mtd->writesize_shift;
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ if (legacy_bbm->current_slot <= bbm->begin_slot
+ || legacy_bbm->current_slot > pages)
+ {
+ backup_size = mtd->writesize*bbm->begin_slot;
+ buf = kmalloc(backup_size, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "Fail to allocate backup memory!!\n");
+ goto ERR_EXIT;
+ }
+ ret = mtd->_read(mtd, 0, backup_size, &retlen, buf);
+ if(ret < 0)
+ {
+ printk(KERN_ERR "read backup two page failed!!\n");
+ goto ERR_EXIT;
+ }
+ instr.mtd = mtd;
+ instr.addr = 0;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+ printk(KERN_INFO "erasing..");
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_ERR "erase block 0 failed!!!\n");
+ goto ERR_EXIT;
+ }
+
+ ret = mtd->_write(mtd, 0, backup_size, &retlen, buf);
+ kfree(buf);
+ if(ret)
+ {
+ printk(KERN_ERR "restore backup two page failed!!\n");
+ goto ERR_EXIT;
+ }
+ legacy_bbm->current_slot = (mtd->erasesize >> mtd->writesize_shift) - 1;
+ }else{
+ /* should write to the next slot */
+ legacy_bbm->current_slot --;
+ }
+
+ buf = legacy_bbm->table;
+ offset = legacy_bbm->current_slot
+ << mtd->writesize_shift;
+ break;
+
+ case BBM_NEW:
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ if (!ret) {
+ printk(KERN_INFO "update new bbm bbt"
+ " at %llx\n", offset);
+ new_bbm->update_indicator &= ~(1 << part);
+ }
+ for (; part < MAX_SUPPRTED_PARTNUM; part ++)
+ if (new_bbm->update_indicator & (1 << part))
+ break;
+
+ if (part >= MAX_SUPPRTED_PARTNUM)
+ return 0;
+
+ offset = (new_bbm->rbbt_offset[part] + 1)
+ << mtd->writesize_shift;
+ if (!(unsigned int)(offset & mtd->erasesize_mask))
+ goto ERR_EXIT;
+
+ new_bbm->rbbt_offset[part] ++;
+ buf = new_bbm->rbbt;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = mtd->_write(mtd, offset, mtd->writesize, &retlen, buf);
+ }
+
+ return 0;
+
+ERR_EXIT:
+ printk(KERN_ERR "Can't write relocation table to device any more.\n");
+ return -EINVAL;
+}
+
+/* Find the relocated block of the bad one.
+ * If it's a good block, return 0. Otherwise, return a relocated one.
+ * idx points to the next relocation entry
+ * If the relocated block is bad, an new entry will be added into the
+ * bottom of the relocation table.
+ */
+static loff_t pxa3xx_ext_legacy_search_reloc(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_abbt *abbt;
+ struct reloc_item *item;
+ int i, block, max_reloc, total;
+
+ if (!bbm || disable_reloc)
+ return ofs;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ if (legacy_bbm->current_slot < 0)
+ return ofs;
+
+ block = ofs >> mtd->erasesize_shift;
+ if (block == legacy_bbm->reloc_cache.from) {
+ ofs -= block << mtd->erasesize_shift;
+ block = legacy_bbm->reloc_cache.to;
+ ofs += block << mtd->erasesize_shift;
+ return ofs;
+ }
+
+ abbm = &legacy_bbm->abbm;
+ abbt = abbm->abbt;
+ max_reloc = mtd_div_by_eb(mtd->size, mtd) - legacy_bbm->reserved_blks;
+
+ if (block == abbm->main_blk || block == abbm->mirror_blk) {
+ item = legacy_bbm->reloc;
+ total = legacy_bbm->table->total;
+ } else {
+ item = abbt->reloc;
+ total = abbt->entry_num;
+ }
+
+ if ((block >= max_reloc &&
+ (block < mtd_div_by_eb(mtd->size, mtd) - ABBT_BLK_NUM)) ||
+ total == 0)
+ return ofs;
+
+ legacy_bbm->reloc_cache.from = block;
+ ofs -= block << mtd->erasesize_shift;
+ for (i = 0; i < total; i++) {
+ if (block == item[i].from) {
+ block = item[i].to;
+ break;
+ }
+ }
+ ofs += block << mtd->erasesize_shift;
+ legacy_bbm->reloc_cache.to = block;
+ return ofs;
+}
+
+static loff_t pxa3xx_search_reloc_tb(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ struct reloc_item *item;
+ int i, block, max_allow_relocated, entry_num, part;
+
+ if (!bbm)
+ return ofs;
+
+ block = ofs >> mtd->erasesize_shift;
+ switch (bbm->bbm_type) {
+ case BBM_LEGACY:
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+ if (abbm->cur_slot >= 0)
+ return pxa3xx_ext_legacy_search_reloc(mtd, ofs);
+
+ if (legacy_bbm->current_slot < 0)
+ return ofs;
+ /*
+ * In case abbt blocks are bad, find these two block relocation
+ * from legacy bbm. This can happen during abbt table scan after
+ * legacy bbm scan finished.
+ */
+ max_allow_relocated = mtd_div_by_eb(mtd->size, mtd);
+ if (bbm->is_init == BBT_INITED)
+ max_allow_relocated -= legacy_bbm->max_reloc_entry;
+
+ item = legacy_bbm->reloc;
+ entry_num = legacy_bbm->table->total;
+ break;
+
+ case BBM_NEW:
+ if (bbm->is_init == BBT_NOINIT)
+ return ofs;
+
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ part = find_part(mtd, ofs);
+ if (part < 0)
+ return ofs;
+ item = (struct reloc_item *)&new_bbm->rbbt[part].reloc;
+ entry_num = new_bbm->rbbt[part].entry_num;
+ max_allow_relocated =
+ mtd_div_by_eb(new_bbm->partinfo[part].end_addr, mtd);
+ break;
+
+ default:
+ return ofs;
+ }
+
+ if (block >= max_allow_relocated || entry_num == 0)
+ return ofs;
+
+ ofs -= block * mtd->erasesize;
+ for (i = 0; i < entry_num; i ++)
+ if (block == item[i].from)
+ /* !!! NOT add break here, repeat is needed */
+ block = item[i].to;
+
+ ofs += block * mtd->erasesize;
+
+ return ofs;
+}
+
+static int pxa3xx_init_bbm(struct mtd_info *mtd, int bbm_type)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ int size, ret, entrys, max_relcs;
+
+ if (bbm_type != BBM_NEW && bbm_type != BBM_LEGACY)
+ return -EFAULT;
+
+ bbm = kzalloc(sizeof(struct pxa3xx_bbm), GFP_KERNEL);
+ if (!bbm)
+ return -ENOMEM;
+
+ bbm->search = pxa3xx_search_reloc_tb;
+ bbm->scrub_read_disturb = pxa3xx_scrub_read_disturb;
+ bbm->uninit = pxa3xx_uninit_reloc_tb;
+ bbm->check_partition = pxa3xx_check_partition;
+ mtd->bbm = bbm;
+ size = (bbm_type == BBM_NEW) ? sizeof(struct pxa3xx_new_bbm) :
+ sizeof(struct pxa3xx_legacy_bbm);
+ bbm->is_init = BBT_NOINIT;
+ bbm->no_sync = 0;
+ bbm->data_buf = kzalloc(size, GFP_KERNEL);
+ if (!bbm->data_buf) {
+ ret = -ENOMEM;
+ goto ERR_EXIT;
+ }
+
+ if (bbm_type == BBM_NEW) {
+ bbm->bbm_type = BBM_NEW;
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ new_bbm->main_block = -1;
+ new_bbm->back_block = -1;
+ new_bbm->fbbt = kzalloc(mtd->writesize, GFP_KERNEL);
+ new_bbm->part = kzalloc(mtd->writesize, GFP_KERNEL);
+ new_bbm->rbbt =
+ kzalloc(mtd->writesize * MAX_SUPPRTED_PARTNUM, GFP_KERNEL);
+ new_bbm->rbbt_offset =
+ kzalloc(sizeof(loff_t) * MAX_SUPPRTED_PARTNUM, GFP_KERNEL);
+ new_bbm->max_reloc_entry =
+ kzalloc(sizeof(int) * MAX_SUPPRTED_PARTNUM, GFP_KERNEL);
+ if (!new_bbm->rbbt
+ || !new_bbm->rbbt_offset
+ || !new_bbm->max_reloc_entry
+ || !new_bbm->fbbt
+ || !new_bbm->part) {
+ kfree(bbm->data_buf);
+ ret = -ENOMEM;
+ goto ERR_EXIT;
+ }
+
+ new_bbm->partinfo =
+ (struct pxa3xx_partinfo *)&new_bbm->part[1];
+ memset(new_bbm->fbbt, 0xff, mtd->writesize);
+ memset(new_bbm->part, 0xff, mtd->writesize);
+ }
+ else {
+ bbm->bbm_type = BBM_LEGACY;
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ entrys = mtd_div_by_eb(mtd->size, mtd);
+ entrys = ABBT_BLK_NUM +
+ (entrys * LEGACY_BBM_RELOC_PERCENTAGE + 99) / 100;
+ max_relcs = (mtd->writesize - sizeof(struct reloc_table))
+ / sizeof(struct reloc_item);
+
+ legacy_bbm->reserved_blks = entrys;
+ legacy_bbm->max_reloc_entry = (entrys < max_relcs) ?
+ entrys : max_relcs;
+
+ /* max entry for legacy abbm */
+ max_relcs = (mtd->writesize - sizeof(struct pxa3xx_abbt))
+ / sizeof(struct reloc_item);
+ legacy_bbm->abbm.max_entry = max_relcs;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ legacy_bbm->table = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!legacy_bbm->table) {
+ kfree(bbm->data_buf);
+ ret = -ENOMEM;
+ goto ERR_EXIT;
+ }
+
+ legacy_bbm->abbm.abbt = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!legacy_bbm->abbm.abbt) {
+ kfree(legacy_bbm->table);
+ kfree(bbm->data_buf);
+ ret = -ENOMEM;
+ goto ERR_EXIT;
+ }
+
+ memset(legacy_bbm->table, 0xff, mtd->writesize);
+ legacy_bbm->reloc = (struct reloc_item *)&legacy_bbm->table[1];
+ legacy_bbm->current_slot = -1;
+ legacy_bbm->table->total = 0;
+
+ memset(legacy_bbm->abbm.abbt, 0xff, mtd->writesize);
+ legacy_bbm->abbm.main_blk =
+ mtd_div_by_eb(mtd->size, mtd) - 1;
+ legacy_bbm->abbm.mirror_blk =
+ legacy_bbm->abbm.main_blk - 1;
+ legacy_bbm->abbm.cur_slot = -1;
+ legacy_bbm->abbm.abbt->entry_num = 0;
+ legacy_bbm->reloc_cache.from = 0xFFFF;
+ }
+
+ return 0;
+
+ERR_EXIT:
+ kfree(bbm);
+ mtd->bbm = NULL;
+ return ret;
+}
+
+/*
+ * BBT layout (Append ABB version and Checksum at tail)
+ * | magic(2B) |
+ * | entry number(2B) |
+ * | entry(4B)... |
+ * | ABB version(4B) |
+ * | Owner(2bit) |
+ * | reserved(14bit) |
+ * | CRC(2B) |
+ */
+static bool pxa3xx_check_bbt(struct pxa3xx_legacy_bbm *legacy_bbm)
+{
+ struct reloc_table *table = legacy_bbm->table;
+ struct reloc_item *item = legacy_bbm->reloc;
+ unsigned int *pver, *pcsum;
+ unsigned short csum;
+
+ pver = (int*)(item + table->total);
+ if (*pver == 0xFFFFFFFF)
+ return true;
+
+ csum = bbm_crc16(0, (unsigned char *)legacy_bbm->table,
+ (sizeof(struct reloc_table) +
+ table->total * sizeof(struct reloc_item) + 4 + 2));
+
+ /* BBT crc locate at the end of all entries */
+ pcsum = (unsigned int *)(item + table->total) + 1;
+ return ((unsigned short)(*pcsum >> 16) == csum);
+}
+
+static bool pxa3xx_check_abbt(struct pxa3xx_abbt *abbt)
+{
+ unsigned short csum;
+
+ if (abbt->ver == ABBT_VERSION || abbt->ver == ABBT_VERSION_1102)
+ return true;
+
+ csum = abbt->crc;
+ abbt->crc = 0;
+ mb();
+ abbt->crc = bbm_crc16(0, (unsigned char *)abbt,
+ (sizeof(struct pxa3xx_abbt) +
+ abbt->entry_num * sizeof(struct reloc_item)));
+
+ return (csum == abbt->crc);
+}
+
+static void __pxa3xx_fix_bbt_from_abbt(struct pxa3xx_legacy_bbm *legacy_bbm,
+ struct pxa3xx_abbt *abbt, int from)
+{
+ struct reloc_item *item, *item_abbt;
+ int total, total_abbt;
+ int bbt_to = -1, abbt_to = -1;
+ int index, i;
+
+ item = legacy_bbm->reloc;
+ total = legacy_bbm->table->total;
+ item_abbt = abbt->reloc;
+ total_abbt = abbt->entry_num;
+
+ for (i = 0; i < total; i++) {
+ if (item[i].from == from) {
+ bbt_to = item[i].to;
+ index = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < total_abbt; i++) {
+ if (item_abbt[i].from == from) {
+ abbt_to = item_abbt[i].to;
+ break;
+ }
+ }
+
+ /* Check if any mis-match exist */
+ if (bbt_to != abbt_to) {
+ if (bbt_to != -1) {
+ item[index].from = BLK_BAD;
+ item[index].to = BLK_BAD;
+ }
+ if (abbt_to != -1) {
+ item[total].from = from;
+ item[total].to = abbt_to;
+ } else {
+ item[total].from = BLK_BAD;
+ item[total].to = BLK_BAD;
+ }
+ total++;
+ legacy_bbm->status |= BBT_CHANGED;
+ printk("!!! bbt fixup: from %d --> %d to %d --> %d\n",
+ from, bbt_to, from, abbt_to);
+ }
+
+ legacy_bbm->table->total = total;
+ if(legacy_bbm->status & BBT_CHANGED) {
+ unsigned int *pver, *pcsum;
+ unsigned short csum;
+
+ /*
+ * update BBT crc:
+ * BBT layout (Append ABB version and Checksum at tail)
+ * | magic(2B) |
+ * | entry number(2B) |
+ * | entry(4B)... |
+ * | ABB version(4B) |
+ * | Owner(2bit) |
+ * | reserved(14bit) |
+ * | CRC(2B) |
+ */
+ pver = (unsigned int *)(item + total);
+ *pver = ABBT_VERSION_2001;
+ pcsum = pver + 1;
+ *pcsum = BBT_UBOOT; /* owner at lower 2bit */
+ mb();
+ csum = bbm_crc16(0, (unsigned char *)legacy_bbm->table,
+ (sizeof(struct reloc_table) +
+ total * sizeof(struct reloc_item) + 4 + 2));
+
+ *pcsum |= csum << 16;
+ }
+}
+
+static void pxa3xx_fix_bbt_from_abbt(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ int i;
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ abbm = &legacy_bbm->abbm;
+
+ for (i = 0; i < MAX_OBM_BLOCK; i++)
+ __pxa3xx_fix_bbt_from_abbt(legacy_bbm, abbm->abbt, i);
+
+ __pxa3xx_fix_bbt_from_abbt(legacy_bbm, abbm->abbt, abbm->main_blk);
+ __pxa3xx_fix_bbt_from_abbt(legacy_bbm, abbm->abbt, abbm->mirror_blk);
+
+ pxa3xx_update_ext_legacy_bbt(mtd, 0);
+}
+
+static int ext_legacy_abbm_scan(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm =
+ (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ struct pxa3xx_legacy_abbm *abbm = &legacy_bbm->abbm;
+ struct pxa3xx_abbt *abbt = abbm->abbt;
+ int start_page, end_page;
+ int slot, start_slot, end_slot;
+ int low_valid, high_valid;
+ int low_refcnt, high_refcnt;
+ int ret, retlen;
+ int order = ORDER_REVERSE;
+ int backup_abbt = 0;
+
+backup:
+ legacy_bbm->status &= ~(ABBT_MAIN_SCRUB | ABBT_MIRROR_SCRUB);
+ start_page = backup_abbt ? abbm->mirror_blk : abbm->main_blk;
+ start_page <<= (mtd->erasesize_shift - mtd->writesize_shift);
+ end_page = start_page + (mtd->erasesize >> mtd->writesize_shift) - 1;
+
+ start_slot = 0;
+ end_slot = (mtd->erasesize >> mtd->writesize_shift) - 1;
+ slot = start_slot;
+ do {
+ ret = mtd->_read(mtd, (start_page + slot) << mtd->writesize_shift,
+ mtd->writesize, &retlen, (void *)abbt);
+ if (ret >= 0)
+ break;
+ } while (++slot <= end_slot);
+ if (ret >= 0 && abbt->ident == BBT_TYPE_ASR) {
+ low_valid = 1;
+ low_refcnt = abbt->refcnt;
+ } else {
+ low_valid = 0;
+ }
+
+ slot = end_slot;
+ do {
+ ret = mtd->_read(mtd, (start_page + slot) << mtd->writesize_shift,
+ mtd->writesize, &retlen, (void *)abbt);
+ if (ret >= 0)
+ break;
+ } while (--slot >= start_slot);
+
+ if (ret >= 0 && abbt->ident == BBT_TYPE_ASR) {
+ high_valid = 1;
+ high_refcnt = abbt->refcnt;
+ } else {
+ high_valid = 0;
+ }
+
+ if (low_valid && !high_valid) {
+ order = ORDER_POSITIVE;
+ } else if (!low_valid && high_valid) {
+ order = ORDER_REVERSE;
+ } else if (low_valid && high_valid) {
+ if (low_refcnt < high_refcnt)
+ order = ORDER_POSITIVE;
+ else
+ order = ORDER_REVERSE;
+ } else {
+ pr_err("ERR: No valid ABBT\n");
+ }
+ abbm->order = order;
+ abbm->cur_slot = page_search(mtd, start_page, end_page,
+ order, BBT_TYPE_ASR,
+ abbt, BBM_FULL_MASK);
+ abbm->cur_slot -= start_page;
+ if (abbm->cur_slot >= 0) {
+ if (!pxa3xx_check_abbt(abbt)) {
+ if (!backup_abbt) {
+ printk(KERN_INFO "abbt use backup block\n");
+ backup_abbt = 1;
+ goto backup;
+ } else {
+ printk(KERN_INFO "abbt crc failed\n");
+ }
+ }
+
+ if (legacy_bbm->status & ABBT_MAIN_SCRUB)
+ ext_legacy_bbt_relocate(mtd,
+ abbm->main_blk << mtd->erasesize_shift, ABBT_SCRUB_ANY);
+ if (legacy_bbm->status & ABBT_MIRROR_SCRUB)
+ ext_legacy_bbt_relocate(mtd,
+ abbm->mirror_blk << mtd->erasesize_shift, ABBT_SCRUB_ANY);
+ legacy_bbm->status &= ~(ABBT_MAIN_SCRUB |
+ ABBT_MIRROR_SCRUB);
+
+ pxa3xx_update_ext_legacy_bbt(mtd, 0);
+ printk(KERN_INFO "[abbt] at page:%d, order:%s, max:%d\n",
+ abbm->cur_slot,
+ order == ORDER_POSITIVE ? "positive" : "reverse",
+ abbm->max_entry);
+ dump_reloc_table(abbt->reloc, abbt->entry_num);
+
+ /* recover main ABBT from backup */
+ bbm->is_init = BBT_INITED;
+ if (backup_abbt) {
+ struct erase_info instr;
+
+ memset(&instr, 0, sizeof(struct erase_info));
+ instr.mtd = mtd;
+ instr.addr = (uint64_t)abbm->main_blk << mtd->erasesize_shift;
+ instr.len = mtd->erasesize;
+ ret = mtd_erase(mtd, &instr);
+ if (!ret) {
+ ret = legacy_bbm_copy_peb(
+ mtd, abbm->mirror_blk, abbm->main_blk,
+ 0, (mtd->erasesize >> mtd->writesize_shift) - 1,
+ DEST_SKIP_ALL_FF_PAGE);
+ if (!ret)
+ printk(KERN_INFO "Main ABBT recoverd\n");
+ }
+ }
+ return 0;
+ }
+
+ if (!backup_abbt) {
+ printk(KERN_INFO "try abbt backup block...\n");
+ backup_abbt = 1;
+ goto backup;
+ }
+
+ /* There should be a valid relocation table slot at least. */
+ printk(KERN_ERR "abbt: NO VALID reloc table can be recognized\n");
+ return -EINVAL;
+}
+
+static int legacy_bbm_scan(struct mtd_info *mtd, int block)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct reloc_table *table;
+ int slot, start_slot, end_slot;
+ int low_valid, high_valid;
+ int low_entrys, high_entrys;
+ int ret, retlen;
+ int order = ORDER_REVERSE;
+ int start_page = block << (mtd->erasesize_shift - mtd->writesize_shift);
+
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ table = legacy_bbm->table;
+
+ start_slot = bbm->begin_slot;
+ end_slot = (mtd->erasesize >> mtd->writesize_shift) - 1;
+
+ slot = start_slot;
+ do {
+ ret = mtd->_read(mtd, (slot + start_page) << mtd->writesize_shift,
+ mtd->writesize, &retlen, (void *)table);
+ if (ret >= 0)
+ break;
+ } while (++slot <= end_slot);
+ if (ret >= 0 && table->header == PXA_RELOC_HEADER) {
+ low_valid = 1;
+ low_entrys = table->total;
+ } else {
+ low_valid = 0;
+ }
+
+ slot = end_slot;
+ do {
+ ret = mtd->_read(mtd, (slot + start_page) << mtd->writesize_shift,
+ mtd->writesize, &retlen, (void *)table);
+ if (ret >= 0)
+ break;
+ } while (--slot >= start_slot);
+
+ if (ret >= 0 && *(unsigned short *)table == PXA_RELOC_HEADER) {
+ high_valid = 1;
+ high_entrys = table->total;
+ } else {
+ high_valid = 0;
+ }
+
+ if (low_valid && !high_valid) {
+ order = ORDER_POSITIVE;
+ } else if (!low_valid && high_valid) {
+ order = ORDER_REVERSE;
+ } else if (low_valid && high_valid) {
+ if (low_entrys < high_entrys)
+ order = ORDER_POSITIVE;
+ else
+ order = ORDER_REVERSE;
+ } else {
+ pr_err("ERR: No valid BBT in block %d!!!\n", block);
+ }
+ legacy_bbm->order = order;
+ legacy_bbm->current_slot = page_search(mtd,
+ start_page + bbm->begin_slot,
+ start_page + (mtd->erasesize >> mtd->writesize_shift) - 1,
+ order, PXA_RELOC_HEADER, table, BBM_HALF_MASK);
+
+ if (legacy_bbm->current_slot >= 0) {
+ printk(KERN_INFO "Max capacity of BBM is %d blocks!!\n",
+ legacy_bbm->max_reloc_entry);
+ legacy_bbm->current_slot -= start_page;
+ legacy_bbm->bbt_blk = block;
+ ext_legacy_abbm_scan(mtd);
+
+ if (!pxa3xx_check_bbt(legacy_bbm)) {
+ printk(KERN_INFO "bbt crc fail in blk%d\n", block);
+ return -EINVAL;
+ }
+
+ /* Restore bbt from abbt if mis-match exist */
+ pxa3xx_fix_bbt_from_abbt(mtd);
+
+ printk(KERN_INFO "[bbt] at block:%d page:%d, begin:%d, order:%s\n",
+ block, legacy_bbm->current_slot, bbm->begin_slot,
+ order == ORDER_POSITIVE ? "positive" : "reverse");
+ dump_reloc_table(legacy_bbm->reloc, table->total);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#define FOUND_FBBT 0x1
+#define FOUND_PART 0x2
+#define BBM_NOCOPY 0x1
+static int scan_fbbt_part(struct mtd_info *mtd, int block, void *buf, int flag)
+{
+ /*
+ * NTIM header at least occupy by one page,
+ * so search the FBBT or part from second page,
+ * and this search should be ended at the fifth page
+ */
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ struct pxa3xx_part *part;
+ struct pxa3xx_partinfo *partinfo;
+ int page, ret, part_num, found = 0, i, max_reloc_entry, rp_num;
+ int start_page, end_page;
+ loff_t offset;
+ size_t retlen;
+
+ max_reloc_entry = (mtd->writesize - 40) / sizeof(struct reloc_item);
+ for (page = 1; page < 5; page ++) {
+ if (found == (FOUND_PART | FOUND_FBBT))
+ break;
+
+ offset = ((uint64_t)block << mtd->erasesize_shift)
+ + (page << mtd->writesize_shift);
+ ret = mtd->_read(mtd, offset, mtd->writesize, &retlen, buf);
+
+ /* found FBBT */
+ if (ret >= 0 && *(unsigned int *)buf == PXA_NEW_BBM_HEADER) {
+ if (flag == BBM_NOCOPY)
+ return 1;
+
+ found |= FOUND_FBBT;
+ memcpy(new_bbm->fbbt, buf, retlen);
+ }
+
+ /* found partition table */
+ if (ret >= 0 && *(unsigned int *)buf == PXA_PART_IDET_1) {
+ if (*((unsigned int *)buf + 1) != PXA_PART_IDET_2)
+ continue;
+
+ if (flag == BBM_NOCOPY)
+ return 1;
+
+ found |= FOUND_PART;
+ memcpy(new_bbm->part, buf, retlen);
+ part = new_bbm->part;
+ part_num = part->part_num;
+
+ for (i = 0; i < part_num; i ++) {
+ partinfo = &new_bbm->partinfo[i];
+ start_page =
+ do_div(partinfo->rbbt_start, mtd->writesize);
+ end_page = start_page - 1 +
+ (mtd->erasesize >> mtd->writesize_shift);
+ new_bbm->rbbt_offset[i] =
+ page_search(mtd, start_page, end_page,
+ ORDER_POSITIVE, PXA_NEW_BBM_HEADER,
+ &new_bbm->rbbt[i], BBM_FULL_MASK);
+ rp_num = mtd_div_by_eb(partinfo->rp_size, mtd);
+ new_bbm->max_reloc_entry[i] =
+ (max_reloc_entry < rp_num) ?
+ max_reloc_entry : rp_num;
+ }
+ }
+ }
+
+ return found == (FOUND_PART | FOUND_FBBT);
+}
+
+static int new_bbm_scan(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ int block, ret, flag;
+ void *buf;
+
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ flag = 0;
+ for (block = 0; block < 10; block ++) {
+ ret = scan_fbbt_part(mtd, block, buf, flag);
+ if (ret) {
+ flag = BBM_NOCOPY;
+ if (new_bbm->main_block == -1)
+ new_bbm->main_block = block;
+ else if (new_bbm->back_block == -1) {
+ new_bbm->back_block = block;
+ break;
+ }
+ }
+ }
+ kfree(buf);
+
+ if (new_bbm->main_block == -1 && new_bbm->back_block == -1) {
+ printk(KERN_ERR "New BBM initilization failed!!!!!!\n");
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "Found main block at %d, back at %d\n",
+ new_bbm->main_block, new_bbm->back_block);
+ new_bbm->update_indicator = 0;
+ printk(KERN_INFO "Factory marked bad blocks:\n");
+ dump_fact_bads(new_bbm->fbbt);
+ dump_part_info(mtd);
+ return 0;
+}
+
+int pxa3xx_get_bbt_type(struct mtd_info *mtd, int begin_slot)
+{
+ size_t retlen;
+ int ret, bbm_type;
+ void *buf;
+ int slot, base_slot, end_slot;
+ int i;
+
+ buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ bbm_type = BBM_NEW;
+ end_slot = (mtd->erasesize >> mtd->writesize_shift) - 1;
+ for (i = 0; i < 10; i++) {
+ base_slot = i << (mtd->erasesize_shift - mtd->writesize_shift);
+ slot = end_slot;
+ /* Serach from downward */
+ do {
+ ret = mtd->_read(mtd,
+ (slot + base_slot) << mtd->writesize_shift,
+ mtd->writesize, &retlen, buf);
+ if (ret >= 0)
+ break;
+ } while (--slot >= begin_slot);
+
+ /* Serach from upward */
+ if (ret >= 0 && *(unsigned short *)buf != PXA_RELOC_HEADER) {
+ slot = begin_slot;
+ do {
+ ret = mtd->_read(mtd,
+ (slot + base_slot) << mtd->writesize_shift,
+ mtd->writesize, &retlen, buf);
+ if (ret >= 0)
+ break;
+ } while (++slot <= end_slot);
+ }
+
+ /* This flash chip is using legacy BBM */
+ if (ret >= 0 && *(unsigned short *)buf == PXA_RELOC_HEADER) {
+ bbm_type = BBM_LEGACY;
+ break;
+ }
+ }
+
+ kfree(buf);
+ return bbm_type;
+}
+
+int pxa3xx_scan_bbt(struct mtd_info *mtd)
+{
+ struct pxa3xx_bbm *bbm;
+ int ret, bbm_type;
+ int begin_slot;
+ int i;
+
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+
+ if (cpu_is_pxa1826() || cpu_is_asr1802s())
+ begin_slot = (4*1024) / mtd->writesize;
+ else if (cpu_is_asr1803() || cpu_is_asr1826s())
+ begin_slot = (8*1024) / mtd->writesize;
+ else
+ begin_slot = (16*1024)/ mtd->writesize;
+
+ if (!mtd->bbm) {
+ bbm_type = pxa3xx_get_bbt_type(mtd, begin_slot);
+ ret = pxa3xx_init_bbm(mtd, bbm_type);
+ if (ret)
+ return ret;
+ bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ } else {
+ bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ bbm_type = bbm->bbm_type;
+ }
+
+ bbm->begin_slot = begin_slot;
+ if (bbm->is_init != BBT_NOINIT)
+ return 0;
+
+ if (bbm_type == BBM_LEGACY) {
+ for (i = 0; i < 10; i++) {
+ /* Scan first 10 block to find valid BBT */
+ ret = legacy_bbm_scan(mtd, i);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ /* There should be a valid relocation table slot at least. */
+ printk(KERN_ERR "NO VALID reloc table can be recognized\n");
+ printk(KERN_ERR "CAUTION: It may cause unpredicated error\n");
+ printk(KERN_ERR "Please re-initialize the flash.\n");
+ kfree(bbm->data_buf);
+ }
+ } else {
+ ret = new_bbm_scan(mtd);
+ }
+
+ if (!ret)
+ bbm->is_init = BBT_INITED;
+ else {
+ printk(KERN_ERR "BBM NOT Initialized, "
+ "Please re-init the flash!!!\n\n");
+ bbm->is_init = BBT_NOINIT;
+ }
+
+ return ret;
+}
+
+static int checkbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_oob_ops ops;
+ uint32_t bad_mark;
+
+ ops.ooboffs = 0;
+ ops.ooblen = 2;
+ ops.len = 2;
+ ops.datbuf = NULL;
+ ops.oobbuf = (uint8_t *)&bad_mark;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ mtd->_read_oob(mtd, ofs, &ops);
+ if ((bad_mark & 0xFF) != 0xFF)
+ return 1;
+ else
+ return 0;
+}
+
+static int boot_part_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ struct pxa3xx_bbt *fbbt = new_bbm->fbbt;
+ int block = ofs >> mtd->erasesize_shift, i;
+ uint32_t *fact_bad = (uint32_t *)&fbbt->fact_bad;
+
+ for (i = 0; i < fbbt->entry_num; i ++)
+ if (fact_bad[i] == block)
+ return 1;
+
+ return 0;
+}
+
+int pxa3xx_block_bad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct pxa3xx_bbm *bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ struct reloc_table *table;
+ int part;
+
+ bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ if (bbm && (bbm->is_init != BBT_NOINIT)) {
+ if (bbm->is_init == BBT_FORCE_NOINIT)
+ return 0;
+
+ bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ switch (bbm->bbm_type) {
+ case BBM_LEGACY:
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ table = legacy_bbm->table;
+ /*
+ * If relocation table is not yet full, then any block
+ * in the flash should be good
+ */
+ if (legacy_bbm->current_slot >= bbm->begin_slot
+ && table->total <= legacy_bbm->max_reloc_entry)
+ return 0;
+
+ return checkbad(mtd, ofs);
+ case BBM_NEW:
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ part = find_part(mtd, ofs);
+ if (part >= 0) {
+ if (new_bbm->rbbt[part].entry_num
+ < new_bbm->max_reloc_entry[part])
+ return 0;
+ else
+ return 1;
+ }
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int pxa3xx_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm =
+ (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ struct pxa3xx_legacy_abbm *abbm = &legacy_bbm->abbm;
+ int ret;
+
+ if (!should_reloc)
+ return 0;
+
+ if (bbm) {
+ if (bbm->bbm_type != BBM_LEGACY && bbm->bbm_type != BBM_NEW) {
+ printk(KERN_WARNING "There is no way"
+ " to mark bad at %llx", ofs);
+ return 0;
+ }
+
+ if (bbm->is_init == BBT_NOINIT) {
+ printk(KERN_WARNING "You should scan bbm first!!\n");
+ return 0;
+ }
+
+ if (abbm->cur_slot >= 0) {
+ ret = ext_legacy_bbt_relocate(mtd, ofs, ABBT_SCRUB_NONE);
+ if (!ret && !bbm->no_sync)
+ ret = pxa3xx_update_ext_legacy_bbt(mtd, 0);
+ } else {
+ ret = sync_pxa3xx_bbt(mtd, ofs);
+ if (!ret && !bbm->no_sync)
+ ret = pxa3xx_update_bbt(mtd, 0);
+ }
+ return ret;
+ }
+ else {
+ printk(KERN_ERR "Unable to mark bad block at %llx\n", ofs);
+ return -EFAULT;
+ }
+}
+
+static int recover_legacy_bbm(struct mtd_info *mtd, int backup)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_legacy_abbm *abbm;
+ struct reloc_table *table;
+ struct erase_info instr = {
+ .callback = NULL,
+ };
+ int backup_size, ret = 0;
+ loff_t ofs;
+ void *buf;
+ size_t retlen;
+
+ backup_size = mtd->writesize * bbm->begin_slot;
+ bbm->is_init = BBT_FORCE_NOINIT;
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ legacy_bbm->current_slot = mtd->erasesize >> mtd->writesize_shift;
+ abbm = &legacy_bbm->abbm;
+ table = legacy_bbm->table;
+ table->header = PXA_RELOC_HEADER;
+ table->total = 0;
+
+ if (backup) {
+ buf = kzalloc(backup_size, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "MEM alloc failed!!\n");
+ return -ENOMEM;
+ }
+ printk(KERN_INFO "Ready to read..");
+ mtd->_read(mtd, 0, backup_size, &retlen, buf);
+ }
+
+ instr.mtd = mtd;
+ instr.addr = 0;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+ printk(KERN_INFO "erasing..");
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_ERR "erase block 0 failed!!!\n");
+ return -EFAULT;
+ }
+
+ if (backup) {
+ printk(KERN_INFO "write back..");
+ mtd->_write(mtd, 0, backup_size, &retlen, buf);
+ kfree(buf);
+ }
+
+ printk(KERN_INFO "collect bad info..");
+ for (ofs = mtd->erasesize; ofs < mtd->size; ofs += mtd->erasesize)
+ if (checkbad(mtd, ofs)) {
+ printk(KERN_INFO "\nmark %llx as bad in bbt\n", ofs);
+ if (abbm->cur_slot >= 0)
+ ext_legacy_bbt_relocate(mtd, ofs, ABBT_SCRUB_NONE);
+ else
+ sync_pxa3xx_bbt(mtd, ofs);
+ }
+
+ if (!bbm->no_sync) {
+ printk(KERN_INFO "update bbt..");
+ if (abbm->cur_slot >= 0)
+ ret = pxa3xx_update_ext_legacy_bbt(mtd, 0);
+ else
+ ret = pxa3xx_update_bbt(mtd, 0);
+ }
+ printk(KERN_INFO "done\n");
+
+ return ret;
+}
+
+static int update_fbbt(struct pxa3xx_bbt *fbbt, int block)
+{
+ uint32_t *fact_bad = (uint32_t *)&fbbt->fact_bad;
+ int i;
+
+ for (i = 0; i < fbbt->entry_num; i ++)
+ if (fact_bad[i] == block)
+ return 0;
+
+ fact_bad[i] = block;
+ fbbt->entry_num ++;
+
+ return fbbt->entry_num;
+}
+
+/*
+ * recover_new_bbm only try to rebuild the fbbt and use the
+ * default partition table to build the pt
+ */
+static int recover_new_bbm(struct mtd_info *mtd, struct reloc_item * item,
+ int num, int reserve_last_page)
+{
+ struct pxa3xx_bbm *bbm = (struct pxa3xx_bbm *)mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ struct pxa3xx_bbt *fbbt = new_bbm->fbbt;
+ struct pxa3xx_part *part = new_bbm->part;
+ struct erase_info instr = {
+ .callback = NULL,
+ };
+ int boot_block, block, total_block, reserved_block, ret;
+ int rbbt, rbbt_back, max_reloc_entry, len, failed = 0;
+ loff_t ofs;
+ size_t retlen;
+ u_char *backup_buf = NULL;
+
+ /*
+ * This should be the most init state
+ * should try to find two good blocks without the fbbt's help
+ * then build up a new fbbt
+ */
+ backup_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
+ if (!backup_buf) {
+ printk(KERN_ERR "Fail to allocate recovery memory!!\n");
+ return -ENOMEM;
+ }
+ bbm->is_init = BBT_FORCE_NOINIT;
+ if (new_bbm->main_block == -1) {
+ memset(new_bbm->rbbt, 0xff, mtd->writesize);
+ new_bbm->rbbt->ident = PXA_NEW_BBM_HEADER;
+ new_bbm->rbbt->type = BBT_TYPE_RUNT;
+ if (item != NULL && num > 0) {
+ memcpy(&(new_bbm->rbbt->reloc), (void *)item,
+ sizeof(struct reloc_item) * num);
+ new_bbm->rbbt->entry_num = num;
+ }
+ else
+ new_bbm->rbbt->entry_num = 0;
+ max_reloc_entry = (mtd->writesize - sizeof(struct pxa3xx_bbt))
+ / sizeof(struct reloc_item) + 1;
+
+ fbbt->ident = PXA_NEW_BBM_HEADER;
+ fbbt->type = BBT_TYPE_FACT;
+ fbbt->entry_num = 0;
+ instr.mtd = mtd;
+ instr.len = mtd->erasesize;
+ instr.callback = pxa3xx_bbm_callback;
+ printk(KERN_INFO "Rebuild new bbm as init state..\n");
+ for (boot_block = 0; boot_block < BOOT_PRAT_MAX; boot_block ++) {
+ if (failed) {
+ ofs = (uint64_t)(boot_block - 1) << mtd->erasesize_shift;
+ new_bbm->main_block = -1;
+ update_fbbt(fbbt, boot_block - 1);
+ failed = 0;
+ }
+ instr.addr = (uint64_t)boot_block << mtd->erasesize_shift;
+ ret = mtd->_read(mtd, instr.addr, mtd->erasesize,
+ &retlen, backup_buf);
+ if (ret < 0) {
+ printk(KERN_ERR "Cannot backup block %d!!\n", boot_block);
+ failed = 1;
+ continue;
+ }
+ if (!reserve_last_page)
+ memset(backup_buf + mtd->erasesize - mtd->writesize, 0xff,
+ mtd->writesize);
+
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_ERR "erase %llx failed!!\n", instr.addr);
+ failed = 1;
+ continue;
+ }
+ else {
+ ret = mtd->_write(mtd, instr.addr,
+ mtd->writesize * bbm->begin_slot,
+ &retlen, backup_buf);
+ if (ret) {
+ printk(KERN_ERR "restore backup two page failed!!\n");
+ failed = 1;
+ continue;
+ }
+ new_bbm->main_block = boot_block;
+ }
+
+ printk(KERN_INFO "Get main block at %d\n", new_bbm->main_block);
+ part->identifier = (uint64_t)PXA_PART_IDET_2 << 32 | PXA_PART_IDET_1;
+
+ // calculate part range under defaut setting of only one part
+ // The first BOOT_PRAT_MAX block should be used as boot partition
+ // and next two block should be reversed as run time bbt
+ part->part_num = 1;
+ new_bbm->partinfo->type = PART_LOGI;
+ total_block = mtd_div_by_eb(mtd->size, mtd);
+ rbbt = rbbt_back = -1;
+ instr.mtd = mtd;
+ instr.callback = pxa3xx_bbm_callback;
+ instr.len = mtd->erasesize;
+ for (block = BOOT_PRAT_MAX; block < total_block; block ++) {
+ instr.addr = (uint64_t)block << mtd->erasesize_shift;
+ should_reloc = 0;
+ mtd->_erase(mtd, &instr);
+ should_reloc = 1;
+ if (!erase_success) {
+ printk(KERN_ERR "Erase %llx failed!!\n", instr.addr);
+ sync_pxa3xx_bbt(mtd, instr.addr);
+ update_fbbt(fbbt, block);
+ }
+ else {
+ ofs = (uint64_t)block << mtd->erasesize_shift;
+ if (rbbt == -1) {
+ ret = mtd->_write(mtd, ofs, mtd->writesize,
+ &retlen, (void *)new_bbm->rbbt);
+ if (ret)
+ continue;
+ rbbt = block;
+ }
+ else if (rbbt_back == -1) {
+ ret = mtd->_write(mtd, ofs, mtd->writesize,
+ &retlen, (void *)new_bbm->rbbt);
+ if (ret)
+ continue;
+ rbbt_back = block ++;
+ break;
+ }
+ }
+ }
+
+ printk(KERN_INFO "\nGet RBBT at block %d, its back at %d\n",
+ rbbt, rbbt_back);
+ reserved_block = ((total_block - block) / 100)
+ * NEW_BBM_RELOC_PERCENTAGE;
+ new_bbm->partinfo->start_addr = (uint64_t)block << mtd->erasesize_shift;
+ new_bbm->partinfo->end_addr = ((uint64_t)(total_block - reserved_block)
+ << mtd->erasesize_shift) - 1;
+ new_bbm->partinfo->rp_start = (uint64_t)(total_block - reserved_block)
+ << mtd->erasesize_shift;
+ new_bbm->partinfo->rp_size = (uint64_t)reserved_block << mtd->erasesize_shift;
+ new_bbm->partinfo->rp_algo = RP_UPWD;
+ new_bbm->partinfo->rbbt_type = PXA_NEW_BBM_HEADER;
+ new_bbm->partinfo->rbbt_start = (uint64_t)rbbt
+ << mtd->erasesize_shift;
+ new_bbm->partinfo->rbbt_start_back = (uint64_t)rbbt_back
+ << mtd->erasesize_shift;
+
+ new_bbm->rbbt_offset[0] = do_div(new_bbm->partinfo->rbbt_start, mtd->writesize);
+ new_bbm->max_reloc_entry[0] = (max_reloc_entry < reserved_block) ?
+ max_reloc_entry : reserved_block;
+
+ ofs = (bbm->begin_slot << mtd->writesize_shift)
+ + ((uint64_t)new_bbm->main_block << mtd->erasesize_shift);
+
+ printk(KERN_INFO "\nBegin to write main block..\n");
+ ret = mtd->_write(mtd, ofs, mtd->writesize, &retlen, (void *)fbbt);
+ if (ret) {
+ printk(KERN_ERR "Write fbbt failed at %llx\n", ofs);
+ failed = 1;
+ continue;
+ }
+
+ ofs = ((bbm->begin_slot + 1) << mtd->writesize_shift)
+ + ((uint64_t)new_bbm->main_block << mtd->erasesize_shift);
+ ret = mtd->_write(mtd, ofs, mtd->writesize, &retlen, (void *)part);
+ if (ret) {
+ printk(KERN_ERR "Write part failed at %llx\n", ofs);
+ failed = 1;
+ continue;
+ }
+
+ ofs = ((bbm->begin_slot + 2) << mtd->writesize_shift)
+ + ((uint64_t)new_bbm->main_block << mtd->erasesize_shift);
+ len = mtd->erasesize - (mtd->writesize * (bbm->begin_slot + 2));
+ ret = mtd->_write(mtd, ofs, len, &retlen, backup_buf
+ + (mtd->writesize * (bbm->begin_slot + 2)));
+ if (ret) {
+ printk(KERN_ERR "restore obm part failed!!\n");
+ failed = 1;
+ }
+ else
+ break;
+ }
+
+ if (boot_block >= BOOT_PRAT_MAX) {
+ new_bbm->main_block = -1;
+ printk(KERN_ERR "There is no good blocks in first %d"
+ " blocks!\n You should use another"
+ " flash now!!\n", BOOT_PRAT_MAX);
+ return -EFAULT;
+ }
+ }
+
+ /*
+ * try to find a good block with fbbt's help
+ * and back the main block to back block
+ */
+ if (new_bbm->back_block == -1) {
+ ofs = (uint64_t)new_bbm->main_block << mtd->erasesize_shift;
+ ret = mtd->_read(mtd, ofs, mtd->erasesize, &retlen, backup_buf);
+ if (ret < 0) {
+ printk(KERN_ERR "Cannot load main boot block!!\n");
+ return -EFAULT;
+ }
+
+ instr.mtd = mtd;
+ instr.callback = pxa3xx_bbm_callback;
+ instr.len = mtd->erasesize;
+ instr.addr = 0;
+ for (block = 0; block < BOOT_PRAT_MAX; block ++,
+ instr.addr += mtd->erasesize) {
+ if (block == new_bbm->main_block
+ || boot_part_bad(mtd, instr.addr))
+ continue;
+
+ ret = mtd->_erase(mtd, &instr);
+ if (!ret) {
+ printk(KERN_INFO "Got backup block at block %d\n", block);
+ printk(KERN_INFO "\nBegin to write backup block..\n");
+ ret = mtd->_write(mtd, instr.addr, mtd->erasesize,
+ &retlen, backup_buf);
+ if (ret) {
+ printk("Failed to backup to %llx\n", instr.addr);
+ continue;
+ }
+
+ new_bbm->back_block = block;
+ break;
+ }
+ }
+
+ if (new_bbm->back_block == -1)
+ printk(KERN_WARNING "Unable to recover backup boot block!!\n");
+ }
+
+ if (backup_buf)
+ kfree(backup_buf);
+
+ printk(KERN_INFO "done!!\n");
+ return 0;
+}
+
+/*
+ * bbm_type:
+ * BBM_NONE: recover the bbm according to original setting
+ * BBM_LEGACY: recover bbm as legacy bbm
+ * BBM_NEW: recover bbm as new bbm
+ */
+int pxa3xx_bbm_recovery(struct mtd_info *mtd, int bbm_type, struct reloc_item *item,
+ int num, int reserve_last_page)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ int ret;
+
+ if (bbm && bbm->bbm_type != bbm_type) {
+ pxa3xx_uninit_reloc_tb(mtd);
+ bbm = mtd->bbm;
+ }
+
+ if (!bbm) {
+ ret = pxa3xx_init_bbm(mtd, bbm_type);
+ if (ret) {
+ printk(KERN_ERR "Init failed!!!\n");
+ return -EFAULT;
+ }
+ }
+
+ if (bbm_type == BBM_NONE)
+ bbm_type = bbm->bbm_type;
+
+ switch (bbm_type) {
+ case BBM_LEGACY:
+ printk(KERN_INFO "Ready to recover bbm as legacy!\n");
+ ret = recover_legacy_bbm(mtd, 1);
+ break;
+
+ case BBM_NEW:
+ printk(KERN_INFO "Ready to recover bbm as new!\n");
+ ret = recover_new_bbm(mtd, item, num, reserve_last_page);
+ break;
+
+ case BBM_NONE:
+ default:
+ printk(KERN_ERR "Cannot fulfill recovery bbm task!!!\n");
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static char *bbm_name = "MRVL_BBM";
+static int do_check_part(struct mtd_info *mtd, struct mtd_partition *part_orig,
+ struct mtd_partition *part, int *num)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ struct pxa3xx_legacy_bbm *legacy_bbm;
+ struct pxa3xx_partinfo *partinfo;
+ uint64_t boundary_offset, orig_size;
+ int reloc_boundary, i, j, err, last_add, last_add_orig;
+
+ if (bbm->bbm_type == BBM_LEGACY) {
+ legacy_bbm = (struct pxa3xx_legacy_bbm *)bbm->data_buf;
+ reloc_boundary = mtd_div_by_eb(mtd->size, mtd)
+ - legacy_bbm->max_reloc_entry;
+ boundary_offset = (uint64_t)reloc_boundary << mtd->erasesize_shift;
+
+ if (boundary_offset < part[*num - 1].offset) {
+ printk(KERN_ERR "The last part overlay with the reserved area!!\n");
+ return -EFAULT;
+ }
+
+ memcpy(part, part_orig, *num * sizeof(struct mtd_partition));
+ if (part[*num - 1].size == MTDPART_SIZ_FULL ||
+ (boundary_offset < part[*num - 1].size
+ + part[*num - 1].offset)) {
+
+ part[*num - 1].size = boundary_offset - part[*num - 1].offset;
+ part[*num].name = bbm_name;
+ part[*num].offset = boundary_offset;
+ part[*num].size = MTDPART_SIZ_FULL;
+ part[*num].mask_flags = MTD_WRITEABLE;
+ *num = *num + 1;
+ }
+ return 0;
+ }
+
+ /*
+ * The following is for new BBM scheme
+ * reserved pool should be included in one of defined partition,
+ * or would cause chech fail
+ */
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ last_add_orig = last_add = err = 0;
+ for (i = 0, j = 0; i < new_bbm->part->part_num && j < *num && !err; i ++) {
+ partinfo = &new_bbm->partinfo[i];
+ for (; j < *num; j ++) {
+ if (part_orig[j].size == MTDPART_SIZ_FULL)
+ orig_size = mtd->size - part_orig[j].offset;
+ else
+ orig_size = part_orig[j].size;
+
+ if ((orig_size + part_orig[j].offset)
+ < partinfo->rp_start)
+ continue;
+ if (part_orig[j].offset > partinfo->rp_start) {
+ err = 1;
+ break;
+ }
+ if ((orig_size + part_orig[j].offset)
+ != (partinfo->rp_start + partinfo->rp_size)) {
+ err = 1;
+ break;
+ }
+ else {
+ memcpy(&part[last_add], &part_orig[last_add_orig],
+ (j - last_add + 1) * sizeof(struct mtd_partition));
+ last_add += (j - last_add_orig) + 1;
+ last_add_orig = j;
+ part[last_add - 1].size = partinfo->rp_start
+ - part[last_add -1].offset;
+ part[last_add].name = bbm_name;
+ part[last_add].offset = partinfo->rp_start;
+ part[last_add].size = partinfo->rp_size;
+ part[last_add].mask_flags = MTD_WRITEABLE;
+ last_add += 1;
+ }
+ }
+ }
+
+ if (!err)
+ *num += (last_add - last_add_orig - 1);
+
+ return err;
+}
+
+static struct mtd_partition *pxa3xx_check_partition(struct mtd_info *mtd,
+ struct mtd_partition *part, int *num)
+{
+ struct pxa3xx_bbm *bbm = mtd->bbm;
+ struct pxa3xx_new_bbm *new_bbm;
+ struct mtd_partition *new_part;
+ int part_num, alloc_size;
+
+ if (bbm->bbm_type == BBM_LEGACY)
+ part_num = 1;
+ else {
+ new_bbm = (struct pxa3xx_new_bbm *)bbm->data_buf;
+ part_num = new_bbm->part->part_num;
+ }
+
+ alloc_size = (*num + part_num) * sizeof(struct mtd_partition);
+ new_part = kzalloc(alloc_size, GFP_KERNEL);
+ if (!new_part) {
+ printk(KERN_ERR "OUT of memory!!\n");
+ return NULL;
+ }
+
+ if (!do_check_part(mtd, part, new_part, num))
+ return new_part;
+ else
+ return NULL;
+}