blob: af459824fac9df3a3b8da3bd847a78e6ace715eb [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 Macronix
4 *
5 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
6 */
7
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/mtd/spinand.h>
11
12#define SPINAND_MFR_MACRONIX 0xC2
13#define MACRONIX_ECCSR_MASK 0x0F
14
15static SPINAND_OP_VARIANTS(read_cache_variants,
16 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
17 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
18 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
19 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
20
21static SPINAND_OP_VARIANTS(read_cache_variants2,
22 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
23 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
24 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
25 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
26 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
27 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
28
29static SPINAND_OP_VARIANTS(write_cache_variants,
30 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
31 SPINAND_PROG_LOAD(true, 0, NULL, 0));
32
33static SPINAND_OP_VARIANTS(update_cache_variants,
34 SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
35 SPINAND_PROG_LOAD(false, 0, NULL, 0));
36
37static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
38 struct mtd_oob_region *region)
39{
40 return -ERANGE;
41}
42
43static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
44 struct mtd_oob_region *region)
45{
46 if (section)
47 return -ERANGE;
48
49 region->offset = 2;
50 region->length = mtd->oobsize - 2;
51
52 return 0;
53}
54
55static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
56 .ecc = mx35lfxge4ab_ooblayout_ecc,
57 .free = mx35lfxge4ab_ooblayout_free,
58};
59
60static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
61{
62 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
63 SPI_MEM_OP_NO_ADDR,
64 SPI_MEM_OP_DUMMY(1, 1),
65 SPI_MEM_OP_DATA_IN(1, eccsr, 1));
66
67 int ret = spi_mem_exec_op(spinand->spimem, &op);
68 if (ret)
69 return ret;
70
71 *eccsr &= MACRONIX_ECCSR_MASK;
72 return 0;
73}
74
75static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
76 u8 status)
77{
78 struct nand_device *nand = spinand_to_nand(spinand);
79 u8 eccsr;
80
81 switch (status & STATUS_ECC_MASK) {
82 case STATUS_ECC_NO_BITFLIPS:
83 return 0;
84
85 case STATUS_ECC_UNCOR_ERROR:
86 return -EBADMSG;
87
88 case STATUS_ECC_HAS_BITFLIPS:
89 /*
90 * Let's try to retrieve the real maximum number of bitflips
91 * in order to avoid forcing the wear-leveling layer to move
92 * data around if it's not necessary.
93 */
94 if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
95 return nand->eccreq.strength;
96
97 if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
98 return nand->eccreq.strength;
99
100 return eccsr;
101
102 default:
103 break;
104 }
105
106 return -EINVAL;
107}
108
109static int mx35ufxge4ax_ooblayout_ecc(struct mtd_info *mtd, int section,
110 struct mtd_oob_region *region)
111{
112 if (section > 3)
113 return -ERANGE;
114
115 region->offset = (16 * section) + 8;
116 region->length = 8;
117
118 return 0;
119}
120
121static int mx35ufxge4ax_ooblayout_free(struct mtd_info *mtd, int section,
122 struct mtd_oob_region *region)
123{
124 if (section > 3)
125 return -ERANGE;
126
127 region->offset = (16 * section) + 2;
128 region->length = 6;
129
130 return 0;
131}
132
133static const struct mtd_ooblayout_ops mx35ufxge4ax_ooblayout = {
134 .ecc = mx35ufxge4ax_ooblayout_ecc,
135 .free = mx35ufxge4ax_ooblayout_free,
136};
137
138static int mx35ufxge4ad_set_bft(struct spinand_device *spinand, u8 threshold)
139{
140 u8 val, bft;
141 struct spi_mem_op op_rd = SPINAND_GET_FEATURE_OP(0x10, &val);
142 int ret;
143
144 ret = spi_mem_exec_op(spinand->spimem, &op_rd);
145 if (ret)
146 return ret;
147
148 bft = (val & 0xf0) >> 4;
149 printk("%s: read BFT=0x%x threshold=%d\n", __func__, val, threshold);
150
151 if (bft != threshold) {
152 struct spi_mem_op op_wr = SPINAND_SET_FEATURE_OP(0x10, &val);
153
154 val = threshold << 4;
155 ret = spi_mem_exec_op(spinand->spimem, &op_wr);
156 if (ret)
157 return ret;
158
159 printk("%s: update BFT=0x%x\n", __func__, val);
160 }
161
162 return 0;
163}
164
165static int mx35ufxge4ax_ecc_get_status(struct spinand_device *spinand,
166 u8 status)
167{
168 struct nand_device *nand = spinand_to_nand(spinand);
169 u8 eccsr;
170
171 switch (status & STATUS_ECC_MASK) {
172 case STATUS_ECC_NO_BITFLIPS:
173 return 0;
174
175 case STATUS_ECC_UNCOR_ERROR:
176 return -EBADMSG;
177
178 default:
179 /*
180 * Let's try to retrieve the real maximum number of bitflips
181 * in order to avoid forcing the wear-leveling layer to move
182 * data around if it's not necessary.
183 */
184 if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
185 return nand->eccreq.strength;
186
187 if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
188 return nand->eccreq.strength;
189
190 return eccsr;
191 }
192
193 return -EINVAL;
194}
195
196static int mx35ufxge4ad_ooblayout_ecc(struct mtd_info *mtd, int section,
197 struct mtd_oob_region *region)
198{
199 if (section > 3)
200 return -ERANGE;
201
202 region->offset = (16 * section) + 64;
203 region->length = 16;
204
205 return 0;
206}
207
208static int mx35ufxge4ad_ooblayout_free(struct mtd_info *mtd, int section,
209 struct mtd_oob_region *region)
210{
211 if (section > 3)
212 return -ERANGE;
213
214 region->offset = (16 * section) + 2;
215 region->length = 14;
216
217 return 0;
218}
219
220static const struct mtd_ooblayout_ops mx35ufxge4ad_ooblayout = {
221 .ecc = mx35ufxge4ad_ooblayout_ecc,
222 .free = mx35ufxge4ad_ooblayout_free,
223};
224
225static const struct spinand_info macronix_spinand_table[] = {
226 SPINAND_INFO("MX35LF1GE4AB", 0x12,
227 NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
228 NAND_ECCREQ(4, 512),
229 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
230 &write_cache_variants,
231 &update_cache_variants),
232 SPINAND_HAS_QE_BIT,
233 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
234 mx35lf1ge4ab_ecc_get_status)),
235 SPINAND_INFO("MX35LF2GE4AB", 0x22,
236 NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
237 NAND_ECCREQ(4, 512),
238 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
239 &write_cache_variants,
240 &update_cache_variants),
241 SPINAND_HAS_QE_BIT,
242 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
243 SPINAND_INFO("MX35UF1GE4AC", 0x92,
244 NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
245 NAND_ECCREQ(4, 512),
246 SPINAND_INFO_OP_VARIANTS(&read_cache_variants2,
247 &write_cache_variants,
248 &update_cache_variants),
249 SPINAND_HAS_QE_BIT | SPINAND_NEED_SET_BFT,
250 SPINAND_ECCINFO(&mx35ufxge4ax_ooblayout,
251 mx35ufxge4ax_ecc_get_status)),
252 SPINAND_INFO("MX35UF2GE4AC", 0xA2,
253 NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
254 NAND_ECCREQ(4, 512),
255 SPINAND_INFO_OP_VARIANTS(&read_cache_variants2,
256 &write_cache_variants,
257 &update_cache_variants),
258 SPINAND_HAS_QE_BIT | SPINAND_NEED_SET_BFT,
259 SPINAND_ECCINFO(&mx35ufxge4ax_ooblayout,
260 mx35ufxge4ax_ecc_get_status)),
261 SPINAND_INFO("MX35UF1GE4AD", 0x96,
262 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
263 NAND_ECCREQ(4, 512),
264 SPINAND_INFO_OP_VARIANTS(&read_cache_variants2,
265 &write_cache_variants,
266 &update_cache_variants),
267 SPINAND_HAS_QE_BIT | SPINAND_NEED_SET_BFT,
268 SPINAND_ECCINFO(&mx35ufxge4ad_ooblayout,
269 mx35ufxge4ax_ecc_get_status)),
270 SPINAND_INFO("MX35UF2GE4AD", 0xA6,
271 NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
272 NAND_ECCREQ(8, 512),
273 SPINAND_INFO_OP_VARIANTS(&read_cache_variants2,
274 &write_cache_variants,
275 &update_cache_variants),
276 SPINAND_HAS_QE_BIT | SPINAND_NEED_SET_BFT,
277 SPINAND_ECCINFO(&mx35ufxge4ad_ooblayout,
278 mx35ufxge4ax_ecc_get_status)),
279};
280
281static int macronix_spinand_detect(struct spinand_device *spinand)
282{
283 u8 *id = spinand->id.data;
284 int ret;
285
286 /*
287 * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
288 * raw_id is garbage.
289 */
290 if (id[1] != SPINAND_MFR_MACRONIX)
291 return 0;
292
293 ret = spinand_match_and_init(spinand, macronix_spinand_table,
294 ARRAY_SIZE(macronix_spinand_table),
295 id[2]);
296 if (ret)
297 return ret;
298
299 return 1;
300}
301
302static int macronix_spinand_init(struct spinand_device *spinand)
303{
304 struct nand_device *nand = spinand_to_nand(spinand);
305 u8 threshold;
306 int ret = 0;
307
308 if (spinand->flags & SPINAND_NEED_SET_BFT) {
309 threshold = DIV_ROUND_UP(nand->eccreq.strength * 3, 4);
310 threshold = threshold ? threshold : 1;
311 ret = mx35ufxge4ad_set_bft(spinand, threshold);
312 }
313
314 return ret;
315}
316static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
317 .detect = macronix_spinand_detect,
318 .init = macronix_spinand_init,
319};
320
321const struct spinand_manufacturer macronix_spinand_manufacturer = {
322 .id = SPINAND_MFR_MACRONIX,
323 .name = "Macronix",
324 .ops = &macronix_spinand_manuf_ops,
325};