2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
64 size_t *retlen, const u_char *buf);
66 static void cfi_amdstd_destroy(struct mtd_info *);
68 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
69 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
71 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
72 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
75 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
78 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
79 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
82 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
83 .probe = NULL, /* Not usable directly */
84 .destroy = cfi_amdstd_destroy,
85 .name = "cfi_cmdset_0002",
90 /* #define DEBUG_CFI_FEATURES */
93 #ifdef DEBUG_CFI_FEATURES
94 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
96 const char* erase_suspend[3] = {
97 "Not supported", "Read only", "Read/write"
99 const char* top_bottom[6] = {
100 "No WP", "8x8KiB sectors at top & bottom, no WP",
101 "Bottom boot", "Top boot",
102 "Uniform, Bottom WP", "Uniform, Top WP"
105 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
106 printk(" Address sensitive unlock: %s\n",
107 (extp->SiliconRevision & 1) ? "Not required" : "Required");
109 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
110 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
112 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
114 if (extp->BlkProt == 0)
115 printk(" Block protection: Not supported\n");
117 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
120 printk(" Temporary block unprotect: %s\n",
121 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
122 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
123 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
124 printk(" Burst mode: %s\n",
125 extp->BurstMode ? "Supported" : "Not supported");
126 if (extp->PageMode == 0)
127 printk(" Page mode: Not supported\n");
129 printk(" Page mode: %d word page\n", extp->PageMode << 2);
131 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
132 extp->VppMin >> 4, extp->VppMin & 0xf);
133 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
134 extp->VppMax >> 4, extp->VppMax & 0xf);
136 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
137 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
139 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
143 #ifdef AMD_BOOTLOC_BUG
144 /* Wheee. Bring me the head of someone at AMD. */
145 static void fixup_amd_bootblock(struct mtd_info *mtd)
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
149 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150 __u8 major = extp->MajorVersion;
151 __u8 minor = extp->MinorVersion;
153 if (((major << 8) | minor) < 0x3131) {
154 /* CFI version 1.0 => don't trust bootloc */
156 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
157 map->name, cfi->mfr, cfi->id);
159 /* AFAICS all 29LV400 with a bottom boot block have a device ID
160 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
161 * These were badly detected as they have the 0x80 bit set
162 * so treat them as a special case.
164 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
166 /* Macronix added CFI to their 2nd generation
167 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
168 * Fujitsu, Spansion, EON, ESI and older Macronix)
171 * Therefore also check the manufacturer.
172 * This reduces the risk of false detection due to
173 * the 8-bit device ID.
175 (cfi->mfr == CFI_MFR_MACRONIX)) {
176 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
177 " detected\n", map->name);
178 extp->TopBottom = 2; /* bottom boot */
180 if (cfi->id & 0x80) {
181 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
182 extp->TopBottom = 3; /* top boot */
184 extp->TopBottom = 2; /* bottom boot */
187 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
188 " deduced %s from Device ID\n", map->name, major, minor,
189 extp->TopBottom == 2 ? "bottom" : "top");
194 static void fixup_use_write_buffers(struct mtd_info *mtd)
196 struct map_info *map = mtd->priv;
197 struct cfi_private *cfi = map->fldrv_priv;
198 if (cfi->cfiq->BufWriteTimeoutTyp) {
199 pr_debug("Using buffer write method\n" );
200 mtd->_write = cfi_amdstd_write_buffers;
204 /* Atmel chips don't use the same PRI format as AMD chips */
205 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv;
209 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
210 struct cfi_pri_atmel atmel_pri;
212 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
213 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
215 if (atmel_pri.Features & 0x02)
216 extp->EraseSuspend = 2;
218 /* Some chips got it backwards... */
219 if (cfi->id == AT49BV6416) {
220 if (atmel_pri.BottomBoot)
225 if (atmel_pri.BottomBoot)
231 /* burst write mode not supported */
232 cfi->cfiq->BufWriteTimeoutTyp = 0;
233 cfi->cfiq->BufWriteTimeoutMax = 0;
236 static void fixup_use_secsi(struct mtd_info *mtd)
238 /* Setup for chips with a secsi area */
239 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
240 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
243 static void fixup_use_erase_chip(struct mtd_info *mtd)
245 struct map_info *map = mtd->priv;
246 struct cfi_private *cfi = map->fldrv_priv;
247 if ((cfi->cfiq->NumEraseRegions == 1) &&
248 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
249 mtd->_erase = cfi_amdstd_erase_chip;
255 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
258 static void fixup_use_atmel_lock(struct mtd_info *mtd)
260 mtd->_lock = cfi_atmel_lock;
261 mtd->_unlock = cfi_atmel_unlock;
262 mtd->flags |= MTD_POWERUP_LOCK;
265 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
267 struct map_info *map = mtd->priv;
268 struct cfi_private *cfi = map->fldrv_priv;
271 * These flashes report two separate eraseblock regions based on the
272 * sector_erase-size and block_erase-size, although they both operate on the
273 * same memory. This is not allowed according to CFI, so we just pick the
276 cfi->cfiq->NumEraseRegions = 1;
279 static void fixup_sst39vf(struct mtd_info *mtd)
281 struct map_info *map = mtd->priv;
282 struct cfi_private *cfi = map->fldrv_priv;
284 fixup_old_sst_eraseregion(mtd);
286 cfi->addr_unlock1 = 0x5555;
287 cfi->addr_unlock2 = 0x2AAA;
290 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
292 struct map_info *map = mtd->priv;
293 struct cfi_private *cfi = map->fldrv_priv;
295 fixup_old_sst_eraseregion(mtd);
297 cfi->addr_unlock1 = 0x555;
298 cfi->addr_unlock2 = 0x2AA;
300 cfi->sector_erase_cmd = CMD(0x50);
303 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
305 struct map_info *map = mtd->priv;
306 struct cfi_private *cfi = map->fldrv_priv;
308 fixup_sst39vf_rev_b(mtd);
311 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
312 * it should report a size of 8KBytes (0x0020*256).
314 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
315 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
318 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
320 struct map_info *map = mtd->priv;
321 struct cfi_private *cfi = map->fldrv_priv;
323 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
324 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
325 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
329 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
331 struct map_info *map = mtd->priv;
332 struct cfi_private *cfi = map->fldrv_priv;
334 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
335 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
336 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
340 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
342 struct map_info *map = mtd->priv;
343 struct cfi_private *cfi = map->fldrv_priv;
346 * S29NS512P flash uses more than 8bits to report number of sectors,
347 * which is not permitted by CFI.
349 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
350 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
353 /* Used to fix CFI-Tables of chips without Extended Query Tables */
354 static struct cfi_fixup cfi_nopri_fixup_table[] = {
355 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
356 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
357 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
358 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
359 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
360 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
361 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
362 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
366 static struct cfi_fixup cfi_fixup_table[] = {
367 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
368 #ifdef AMD_BOOTLOC_BUG
369 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
370 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
371 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
373 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
374 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
375 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
376 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
377 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
378 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
379 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
380 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
381 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
382 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
383 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
384 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
385 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
386 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
387 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
388 #if !FORCE_WORD_WRITE
389 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
393 static struct cfi_fixup jedec_fixup_table[] = {
394 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
395 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
396 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
400 static struct cfi_fixup fixup_table[] = {
401 /* The CFI vendor ids and the JEDEC vendor IDs appear
402 * to be common. It is like the devices id's are as
403 * well. This table is to pick all cases where
404 * we know that is the case.
406 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
407 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
412 static void cfi_fixup_major_minor(struct cfi_private *cfi,
413 struct cfi_pri_amdstd *extp)
415 if (cfi->mfr == CFI_MFR_SAMSUNG) {
416 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
417 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
419 * Samsung K8P2815UQB and K8D6x16UxM chips
420 * report major=0 / minor=0.
421 * K8D3x16UxC chips report major=3 / minor=3.
423 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
424 " Extended Query version to 1.%c\n",
426 extp->MajorVersion = '1';
431 * SST 38VF640x chips report major=0xFF / minor=0xFF.
433 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
434 extp->MajorVersion = '1';
435 extp->MinorVersion = '0';
439 static int is_m29ew(struct cfi_private *cfi)
441 if (cfi->mfr == CFI_MFR_INTEL &&
442 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
443 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
449 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
450 * Some revisions of the M29EW suffer from erase suspend hang ups. In
451 * particular, it can occur when the sequence
452 * Erase Confirm -> Suspend -> Program -> Resume
453 * causes a lockup due to internal timing issues. The consequence is that the
454 * erase cannot be resumed without inserting a dummy command after programming
455 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
456 * that writes an F0 command code before the RESUME command.
458 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
461 struct cfi_private *cfi = map->fldrv_priv;
462 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
464 map_write(map, CMD(0xF0), adr);
468 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
470 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
471 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
472 * command is issued after an ERASE RESUME operation without waiting for a
473 * minimum delay. The result is that once the ERASE seems to be completed
474 * (no bits are toggling), the contents of the Flash memory block on which
475 * the erase was ongoing could be inconsistent with the expected values
476 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
477 * values), causing a consequent failure of the ERASE operation.
478 * The occurrence of this issue could be high, especially when file system
479 * operations on the Flash are intensive. As a result, it is recommended
480 * that a patch be applied. Intensive file system operations can cause many
481 * calls to the garbage routine to free Flash space (also by erasing physical
482 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
483 * commands can occur. The problem disappears when a delay is inserted after
484 * the RESUME command by using the udelay() function available in Linux.
485 * The DELAY value must be tuned based on the customer's platform.
486 * The maximum value that fixes the problem in all cases is 500us.
487 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
489 * We have chosen 500µs because this latency is acceptable.
491 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
494 * Resolving the Delay After Resume Issue see Micron TN-13-07
495 * Worst case delay must be 500µs but 30-50µs should be ok as well
501 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
503 struct cfi_private *cfi = map->fldrv_priv;
504 struct device_node __maybe_unused *np = map->device_node;
505 struct mtd_info *mtd;
508 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
510 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
514 mtd->type = MTD_NORFLASH;
516 /* Fill in the default mtd operations */
517 mtd->_erase = cfi_amdstd_erase_varsize;
518 mtd->_write = cfi_amdstd_write_words;
519 mtd->_read = cfi_amdstd_read;
520 mtd->_sync = cfi_amdstd_sync;
521 mtd->_suspend = cfi_amdstd_suspend;
522 mtd->_resume = cfi_amdstd_resume;
523 mtd->flags = MTD_CAP_NORFLASH;
524 mtd->name = map->name;
526 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
528 pr_debug("MTD %s(): write buffer size %d\n", __func__,
531 mtd->_panic_write = cfi_amdstd_panic_write;
532 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
534 if (cfi->cfi_mode==CFI_MODE_CFI){
535 unsigned char bootloc;
536 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
537 struct cfi_pri_amdstd *extp;
539 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
542 * It's a real CFI chip, not one for which the probe
543 * routine faked a CFI structure.
545 cfi_fixup_major_minor(cfi, extp);
548 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
549 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
550 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
551 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
552 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
554 if (extp->MajorVersion != '1' ||
555 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
556 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
557 "version %c.%c (%#02x/%#02x).\n",
558 extp->MajorVersion, extp->MinorVersion,
559 extp->MajorVersion, extp->MinorVersion);
565 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
566 extp->MajorVersion, extp->MinorVersion);
568 /* Install our own private info structure */
569 cfi->cmdset_priv = extp;
571 /* Apply cfi device specific fixups */
572 cfi_fixup(mtd, cfi_fixup_table);
574 #ifdef DEBUG_CFI_FEATURES
575 /* Tell the user about it in lots of lovely detail */
576 cfi_tell_features(extp);
580 if (np && of_property_read_bool(
581 np, "use-advanced-sector-protection")
582 && extp->BlkProtUnprot == 8) {
583 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
584 mtd->_lock = cfi_ppb_lock;
585 mtd->_unlock = cfi_ppb_unlock;
586 mtd->_is_locked = cfi_ppb_is_locked;
590 bootloc = extp->TopBottom;
591 if ((bootloc < 2) || (bootloc > 5)) {
592 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
593 "bank location (%d). Assuming bottom.\n",
598 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
599 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
601 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
602 int j = (cfi->cfiq->NumEraseRegions-1)-i;
605 swap = cfi->cfiq->EraseRegionInfo[i];
606 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
607 cfi->cfiq->EraseRegionInfo[j] = swap;
610 /* Set the default CFI lock/unlock addresses */
611 cfi->addr_unlock1 = 0x555;
612 cfi->addr_unlock2 = 0x2aa;
614 cfi_fixup(mtd, cfi_nopri_fixup_table);
616 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
622 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
623 /* Apply jedec specific fixups */
624 cfi_fixup(mtd, jedec_fixup_table);
626 /* Apply generic fixups */
627 cfi_fixup(mtd, fixup_table);
629 for (i=0; i< cfi->numchips; i++) {
630 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
631 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
632 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
633 cfi->chips[i].ref_point_counter = 0;
634 init_waitqueue_head(&(cfi->chips[i].wq));
637 map->fldrv = &cfi_amdstd_chipdrv;
639 return cfi_amdstd_setup(mtd);
641 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
642 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
643 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
644 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
645 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
647 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
649 struct map_info *map = mtd->priv;
650 struct cfi_private *cfi = map->fldrv_priv;
651 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
652 unsigned long offset = 0;
655 printk(KERN_NOTICE "number of %s chips: %d\n",
656 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
657 /* Select the correct geometry setup */
658 mtd->size = devsize * cfi->numchips;
660 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
661 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
662 * mtd->numeraseregions, GFP_KERNEL);
663 if (!mtd->eraseregions) {
664 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
668 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
669 unsigned long ernum, ersize;
670 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
671 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
673 if (mtd->erasesize < ersize) {
674 mtd->erasesize = ersize;
676 for (j=0; j<cfi->numchips; j++) {
677 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
678 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
679 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
681 offset += (ersize * ernum);
683 if (offset != devsize) {
685 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
689 __module_get(THIS_MODULE);
690 register_reboot_notifier(&mtd->reboot_notifier);
694 kfree(mtd->eraseregions);
696 kfree(cfi->cmdset_priv);
702 * Return true if the chip is ready.
704 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
705 * non-suspended sector) and is indicated by no toggle bits toggling.
707 * Note that anything more complicated than checking if no bits are toggling
708 * (including checking DQ5 for an error status) is tricky to get working
709 * correctly and is therefore not done (particularly with interleaved chips
710 * as each chip must be checked independently of the others).
712 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
716 d = map_read(map, addr);
717 t = map_read(map, addr);
719 return map_word_equal(map, d, t);
723 * Return true if the chip is ready and has the correct value.
725 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
726 * non-suspended sector) and it is indicated by no bits toggling.
728 * Error are indicated by toggling bits or bits held with the wrong value,
729 * or with bits toggling.
731 * Note that anything more complicated than checking if no bits are toggling
732 * (including checking DQ5 for an error status) is tricky to get working
733 * correctly and is therefore not done (particularly with interleaved chips
734 * as each chip must be checked independently of the others).
737 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
741 oldd = map_read(map, addr);
742 curd = map_read(map, addr);
744 return map_word_equal(map, oldd, curd) &&
745 map_word_equal(map, curd, expected);
748 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
750 DECLARE_WAITQUEUE(wait, current);
751 struct cfi_private *cfi = map->fldrv_priv;
753 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
756 timeo = jiffies + HZ;
758 switch (chip->state) {
762 if (chip_ready(map, adr))
765 if (time_after(jiffies, timeo)) {
766 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
769 mutex_unlock(&chip->mutex);
771 mutex_lock(&chip->mutex);
772 /* Someone else might have been playing with it. */
782 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
783 !(mode == FL_READY || mode == FL_POINT ||
784 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
787 /* We could check to see if we're trying to access the sector
788 * that is currently being erased. However, no user will try
789 * anything like that so we just wait for the timeout. */
792 /* It's harmless to issue the Erase-Suspend and Erase-Resume
793 * commands when the erase algorithm isn't in progress. */
794 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
795 chip->oldstate = FL_ERASING;
796 chip->state = FL_ERASE_SUSPENDING;
797 chip->erase_suspended = 1;
799 if (chip_ready(map, adr))
802 if (time_after(jiffies, timeo)) {
803 /* Should have suspended the erase by now.
804 * Send an Erase-Resume command as either
805 * there was an error (so leave the erase
806 * routine to recover from it) or we trying to
807 * use the erase-in-progress sector. */
808 put_chip(map, chip, adr);
809 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
813 mutex_unlock(&chip->mutex);
815 mutex_lock(&chip->mutex);
816 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
817 So we can just loop here. */
819 chip->state = FL_READY;
822 case FL_XIP_WHILE_ERASING:
823 if (mode != FL_READY && mode != FL_POINT &&
824 (!cfip || !(cfip->EraseSuspend&2)))
826 chip->oldstate = chip->state;
827 chip->state = FL_READY;
831 /* The machine is rebooting */
835 /* Only if there's no operation suspended... */
836 if (mode == FL_READY && chip->oldstate == FL_READY)
841 set_current_state(TASK_UNINTERRUPTIBLE);
842 add_wait_queue(&chip->wq, &wait);
843 mutex_unlock(&chip->mutex);
845 remove_wait_queue(&chip->wq, &wait);
846 mutex_lock(&chip->mutex);
852 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
854 struct cfi_private *cfi = map->fldrv_priv;
856 switch(chip->oldstate) {
858 cfi_fixup_m29ew_erase_suspend(map,
859 chip->in_progress_block_addr);
860 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
861 cfi_fixup_m29ew_delay_after_resume(cfi);
862 chip->oldstate = FL_READY;
863 chip->state = FL_ERASING;
866 case FL_XIP_WHILE_ERASING:
867 chip->state = chip->oldstate;
868 chip->oldstate = FL_READY;
875 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
880 #ifdef CONFIG_MTD_XIP
883 * No interrupt what so ever can be serviced while the flash isn't in array
884 * mode. This is ensured by the xip_disable() and xip_enable() functions
885 * enclosing any code path where the flash is known not to be in array mode.
886 * And within a XIP disabled code path, only functions marked with __xipram
887 * may be called and nothing else (it's a good thing to inspect generated
888 * assembly to make sure inline functions were actually inlined and that gcc
889 * didn't emit calls to its own support functions). Also configuring MTD CFI
890 * support to a single buswidth and a single interleave is also recommended.
893 static void xip_disable(struct map_info *map, struct flchip *chip,
896 /* TODO: chips with no XIP use should ignore and return */
897 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
901 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
904 struct cfi_private *cfi = map->fldrv_priv;
906 if (chip->state != FL_POINT && chip->state != FL_READY) {
907 map_write(map, CMD(0xf0), adr);
908 chip->state = FL_READY;
910 (void) map_read(map, adr);
916 * When a delay is required for the flash operation to complete, the
917 * xip_udelay() function is polling for both the given timeout and pending
918 * (but still masked) hardware interrupts. Whenever there is an interrupt
919 * pending then the flash erase operation is suspended, array mode restored
920 * and interrupts unmasked. Task scheduling might also happen at that
921 * point. The CPU eventually returns from the interrupt or the call to
922 * schedule() and the suspended flash operation is resumed for the remaining
923 * of the delay period.
925 * Warning: this function _will_ fool interrupt latency tracing tools.
928 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
929 unsigned long adr, int usec)
931 struct cfi_private *cfi = map->fldrv_priv;
932 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
933 map_word status, OK = CMD(0x80);
934 unsigned long suspended, start = xip_currtime();
939 if (xip_irqpending() && extp &&
940 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
941 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
943 * Let's suspend the erase operation when supported.
944 * Note that we currently don't try to suspend
945 * interleaved chips if there is already another
946 * operation suspended (imagine what happens
947 * when one chip was already done with the current
948 * operation while another chip suspended it, then
949 * we resume the whole thing at once). Yes, it
952 map_write(map, CMD(0xb0), adr);
953 usec -= xip_elapsed_since(start);
954 suspended = xip_currtime();
956 if (xip_elapsed_since(suspended) > 100000) {
958 * The chip doesn't want to suspend
959 * after waiting for 100 msecs.
960 * This is a critical error but there
961 * is not much we can do here.
965 status = map_read(map, adr);
966 } while (!map_word_andequal(map, status, OK, OK));
968 /* Suspend succeeded */
969 oldstate = chip->state;
970 if (!map_word_bitsset(map, status, CMD(0x40)))
972 chip->state = FL_XIP_WHILE_ERASING;
973 chip->erase_suspended = 1;
974 map_write(map, CMD(0xf0), adr);
975 (void) map_read(map, adr);
978 mutex_unlock(&chip->mutex);
983 * We're back. However someone else might have
984 * decided to go write to the chip if we are in
985 * a suspended erase state. If so let's wait
988 mutex_lock(&chip->mutex);
989 while (chip->state != FL_XIP_WHILE_ERASING) {
990 DECLARE_WAITQUEUE(wait, current);
991 set_current_state(TASK_UNINTERRUPTIBLE);
992 add_wait_queue(&chip->wq, &wait);
993 mutex_unlock(&chip->mutex);
995 remove_wait_queue(&chip->wq, &wait);
996 mutex_lock(&chip->mutex);
998 /* Disallow XIP again */
1001 /* Correct Erase Suspend Hangups for M29EW */
1002 cfi_fixup_m29ew_erase_suspend(map, adr);
1003 /* Resume the write or erase operation */
1004 map_write(map, cfi->sector_erase_cmd, adr);
1005 chip->state = oldstate;
1006 start = xip_currtime();
1007 } else if (usec >= 1000000/HZ) {
1009 * Try to save on CPU power when waiting delay
1010 * is at least a system timer tick period.
1011 * No need to be extremely accurate here.
1015 status = map_read(map, adr);
1016 } while (!map_word_andequal(map, status, OK, OK)
1017 && xip_elapsed_since(start) < usec);
1020 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1023 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1024 * the flash is actively programming or erasing since we have to poll for
1025 * the operation to complete anyway. We can't do that in a generic way with
1026 * a XIP setup so do it before the actual flash operation in this case
1027 * and stub it out from INVALIDATE_CACHE_UDELAY.
1029 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1030 INVALIDATE_CACHED_RANGE(map, from, size)
1032 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1033 UDELAY(map, chip, adr, usec)
1038 * Activating this XIP support changes the way the code works a bit. For
1039 * example the code to suspend the current process when concurrent access
1040 * happens is never executed because xip_udelay() will always return with the
1041 * same chip state as it was entered with. This is why there is no care for
1042 * the presence of add_wait_queue() or schedule() calls from within a couple
1043 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1044 * The queueing and scheduling are always happening within xip_udelay().
1046 * Similarly, get_chip() and put_chip() just happen to always be executed
1047 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1048 * is in array mode, therefore never executing many cases therein and not
1049 * causing any problem with XIP.
1054 #define xip_disable(map, chip, adr)
1055 #define xip_enable(map, chip, adr)
1056 #define XIP_INVAL_CACHED_RANGE(x...)
1058 #define UDELAY(map, chip, adr, usec) \
1060 mutex_unlock(&chip->mutex); \
1062 mutex_lock(&chip->mutex); \
1065 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1067 mutex_unlock(&chip->mutex); \
1068 INVALIDATE_CACHED_RANGE(map, adr, len); \
1070 mutex_lock(&chip->mutex); \
1075 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1077 unsigned long cmd_addr;
1078 struct cfi_private *cfi = map->fldrv_priv;
1083 /* Ensure cmd read/writes are aligned. */
1084 cmd_addr = adr & ~(map_bankwidth(map)-1);
1086 mutex_lock(&chip->mutex);
1087 ret = get_chip(map, chip, cmd_addr, FL_READY);
1089 mutex_unlock(&chip->mutex);
1093 if (chip->state != FL_POINT && chip->state != FL_READY) {
1094 map_write(map, CMD(0xf0), cmd_addr);
1095 chip->state = FL_READY;
1098 map_copy_from(map, buf, adr, len);
1100 put_chip(map, chip, cmd_addr);
1102 mutex_unlock(&chip->mutex);
1107 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1109 struct map_info *map = mtd->priv;
1110 struct cfi_private *cfi = map->fldrv_priv;
1115 /* ofs: offset within the first chip that the first read should start */
1116 chipnum = (from >> cfi->chipshift);
1117 ofs = from - (chipnum << cfi->chipshift);
1120 unsigned long thislen;
1122 if (chipnum >= cfi->numchips)
1125 if ((len + ofs -1) >> cfi->chipshift)
1126 thislen = (1<<cfi->chipshift) - ofs;
1130 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1145 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1147 DECLARE_WAITQUEUE(wait, current);
1148 unsigned long timeo = jiffies + HZ;
1149 struct cfi_private *cfi = map->fldrv_priv;
1152 mutex_lock(&chip->mutex);
1154 if (chip->state != FL_READY){
1155 set_current_state(TASK_UNINTERRUPTIBLE);
1156 add_wait_queue(&chip->wq, &wait);
1158 mutex_unlock(&chip->mutex);
1161 remove_wait_queue(&chip->wq, &wait);
1162 timeo = jiffies + HZ;
1169 chip->state = FL_READY;
1171 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1172 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1173 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1175 map_copy_from(map, buf, adr, len);
1177 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1178 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1179 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1180 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1183 mutex_unlock(&chip->mutex);
1188 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1190 struct map_info *map = mtd->priv;
1191 struct cfi_private *cfi = map->fldrv_priv;
1196 /* ofs: offset within the first chip that the first read should start */
1197 /* 8 secsi bytes per chip */
1202 unsigned long thislen;
1204 if (chipnum >= cfi->numchips)
1207 if ((len + ofs -1) >> 3)
1208 thislen = (1<<3) - ofs;
1212 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1227 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1229 struct cfi_private *cfi = map->fldrv_priv;
1230 unsigned long timeo = jiffies + HZ;
1232 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1233 * have a max write time of a few hundreds usec). However, we should
1234 * use the maximum timeout value given by the chip at probe time
1235 * instead. Unfortunately, struct flchip does have a field for
1236 * maximum timeout, only for typical which can be far too short
1237 * depending of the conditions. The ' + 1' is to avoid having a
1238 * timeout of 0 jiffies if HZ is smaller than 1000.
1240 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1247 mutex_lock(&chip->mutex);
1248 ret = get_chip(map, chip, adr, FL_WRITING);
1250 mutex_unlock(&chip->mutex);
1254 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1255 __func__, adr, datum.x[0] );
1258 * Check for a NOP for the case when the datum to write is already
1259 * present - it saves time and works around buggy chips that corrupt
1260 * data at other locations when 0xff is written to a location that
1261 * already contains 0xff.
1263 oldd = map_read(map, adr);
1264 if (map_word_equal(map, oldd, datum)) {
1265 pr_debug("MTD %s(): NOP\n",
1270 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1272 xip_disable(map, chip, adr);
1274 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1275 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1276 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1277 map_write(map, datum, adr);
1278 chip->state = FL_WRITING;
1280 INVALIDATE_CACHE_UDELAY(map, chip,
1281 adr, map_bankwidth(map),
1282 chip->word_write_time);
1284 /* See comment above for timeout value. */
1285 timeo = jiffies + uWriteTimeout;
1287 if (chip->state != FL_WRITING) {
1288 /* Someone's suspended the write. Sleep */
1289 DECLARE_WAITQUEUE(wait, current);
1291 set_current_state(TASK_UNINTERRUPTIBLE);
1292 add_wait_queue(&chip->wq, &wait);
1293 mutex_unlock(&chip->mutex);
1295 remove_wait_queue(&chip->wq, &wait);
1296 timeo = jiffies + (HZ / 2); /* FIXME */
1297 mutex_lock(&chip->mutex);
1301 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1302 xip_enable(map, chip, adr);
1303 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1304 xip_disable(map, chip, adr);
1308 if (chip_ready(map, adr))
1311 /* Latency issues. Drop the lock, wait a while and retry */
1312 UDELAY(map, chip, adr, 1);
1314 /* Did we succeed? */
1315 if (!chip_good(map, adr, datum)) {
1316 /* reset on all failures. */
1317 map_write( map, CMD(0xF0), chip->start );
1318 /* FIXME - should have reset delay before continuing */
1320 if (++retry_cnt <= MAX_WORD_RETRIES)
1325 xip_enable(map, chip, adr);
1327 chip->state = FL_READY;
1329 put_chip(map, chip, adr);
1330 mutex_unlock(&chip->mutex);
1336 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1337 size_t *retlen, const u_char *buf)
1339 struct map_info *map = mtd->priv;
1340 struct cfi_private *cfi = map->fldrv_priv;
1343 unsigned long ofs, chipstart;
1344 DECLARE_WAITQUEUE(wait, current);
1346 chipnum = to >> cfi->chipshift;
1347 ofs = to - (chipnum << cfi->chipshift);
1348 chipstart = cfi->chips[chipnum].start;
1350 /* If it's not bus-aligned, do the first byte write */
1351 if (ofs & (map_bankwidth(map)-1)) {
1352 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1353 int i = ofs - bus_ofs;
1358 mutex_lock(&cfi->chips[chipnum].mutex);
1360 if (cfi->chips[chipnum].state != FL_READY) {
1361 set_current_state(TASK_UNINTERRUPTIBLE);
1362 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1364 mutex_unlock(&cfi->chips[chipnum].mutex);
1367 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1371 /* Load 'tmp_buf' with old contents of flash */
1372 tmp_buf = map_read(map, bus_ofs+chipstart);
1374 mutex_unlock(&cfi->chips[chipnum].mutex);
1376 /* Number of bytes to copy from buffer */
1377 n = min_t(int, len, map_bankwidth(map)-i);
1379 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1381 ret = do_write_oneword(map, &cfi->chips[chipnum],
1391 if (ofs >> cfi->chipshift) {
1394 if (chipnum == cfi->numchips)
1399 /* We are now aligned, write as much as possible */
1400 while(len >= map_bankwidth(map)) {
1403 datum = map_word_load(map, buf);
1405 ret = do_write_oneword(map, &cfi->chips[chipnum],
1410 ofs += map_bankwidth(map);
1411 buf += map_bankwidth(map);
1412 (*retlen) += map_bankwidth(map);
1413 len -= map_bankwidth(map);
1415 if (ofs >> cfi->chipshift) {
1418 if (chipnum == cfi->numchips)
1420 chipstart = cfi->chips[chipnum].start;
1424 /* Write the trailing bytes if any */
1425 if (len & (map_bankwidth(map)-1)) {
1429 mutex_lock(&cfi->chips[chipnum].mutex);
1431 if (cfi->chips[chipnum].state != FL_READY) {
1432 set_current_state(TASK_UNINTERRUPTIBLE);
1433 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1435 mutex_unlock(&cfi->chips[chipnum].mutex);
1438 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1442 tmp_buf = map_read(map, ofs + chipstart);
1444 mutex_unlock(&cfi->chips[chipnum].mutex);
1446 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1448 ret = do_write_oneword(map, &cfi->chips[chipnum],
1461 * FIXME: interleaved mode not tested, and probably not supported!
1463 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1464 unsigned long adr, const u_char *buf,
1467 struct cfi_private *cfi = map->fldrv_priv;
1468 unsigned long timeo = jiffies + HZ;
1469 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1470 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1472 unsigned long cmd_adr;
1479 mutex_lock(&chip->mutex);
1480 ret = get_chip(map, chip, adr, FL_WRITING);
1482 mutex_unlock(&chip->mutex);
1486 datum = map_word_load(map, buf);
1488 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1489 __func__, adr, datum.x[0] );
1491 XIP_INVAL_CACHED_RANGE(map, adr, len);
1493 xip_disable(map, chip, cmd_adr);
1495 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1496 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1498 /* Write Buffer Load */
1499 map_write(map, CMD(0x25), cmd_adr);
1501 chip->state = FL_WRITING_TO_BUFFER;
1503 /* Write length of data to come */
1504 words = len / map_bankwidth(map);
1505 map_write(map, CMD(words - 1), cmd_adr);
1508 while(z < words * map_bankwidth(map)) {
1509 datum = map_word_load(map, buf);
1510 map_write(map, datum, adr + z);
1512 z += map_bankwidth(map);
1513 buf += map_bankwidth(map);
1515 z -= map_bankwidth(map);
1519 /* Write Buffer Program Confirm: GO GO GO */
1520 map_write(map, CMD(0x29), cmd_adr);
1521 chip->state = FL_WRITING;
1523 INVALIDATE_CACHE_UDELAY(map, chip,
1524 adr, map_bankwidth(map),
1525 chip->word_write_time);
1527 timeo = jiffies + uWriteTimeout;
1530 if (chip->state != FL_WRITING) {
1531 /* Someone's suspended the write. Sleep */
1532 DECLARE_WAITQUEUE(wait, current);
1534 set_current_state(TASK_UNINTERRUPTIBLE);
1535 add_wait_queue(&chip->wq, &wait);
1536 mutex_unlock(&chip->mutex);
1538 remove_wait_queue(&chip->wq, &wait);
1539 timeo = jiffies + (HZ / 2); /* FIXME */
1540 mutex_lock(&chip->mutex);
1544 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1547 if (chip_ready(map, adr)) {
1548 xip_enable(map, chip, adr);
1552 /* Latency issues. Drop the lock, wait a while and retry */
1553 UDELAY(map, chip, adr, 1);
1557 * Recovery from write-buffer programming failures requires
1558 * the write-to-buffer-reset sequence. Since the last part
1559 * of the sequence also works as a normal reset, we can run
1560 * the same commands regardless of why we are here.
1562 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1564 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1565 cfi->device_type, NULL);
1566 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1567 cfi->device_type, NULL);
1568 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1569 cfi->device_type, NULL);
1570 xip_enable(map, chip, adr);
1571 /* FIXME - should have reset delay before continuing */
1573 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1578 chip->state = FL_READY;
1580 put_chip(map, chip, adr);
1581 mutex_unlock(&chip->mutex);
1587 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1588 size_t *retlen, const u_char *buf)
1590 struct map_info *map = mtd->priv;
1591 struct cfi_private *cfi = map->fldrv_priv;
1592 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1597 chipnum = to >> cfi->chipshift;
1598 ofs = to - (chipnum << cfi->chipshift);
1600 /* If it's not bus-aligned, do the first word write */
1601 if (ofs & (map_bankwidth(map)-1)) {
1602 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1603 if (local_len > len)
1605 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1606 local_len, retlen, buf);
1613 if (ofs >> cfi->chipshift) {
1616 if (chipnum == cfi->numchips)
1621 /* Write buffer is worth it only if more than one word to write... */
1622 while (len >= map_bankwidth(map) * 2) {
1623 /* We must not cross write block boundaries */
1624 int size = wbufsize - (ofs & (wbufsize-1));
1628 if (size % map_bankwidth(map))
1629 size -= size % map_bankwidth(map);
1631 ret = do_write_buffer(map, &cfi->chips[chipnum],
1641 if (ofs >> cfi->chipshift) {
1644 if (chipnum == cfi->numchips)
1650 size_t retlen_dregs = 0;
1652 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1653 len, &retlen_dregs, buf);
1655 *retlen += retlen_dregs;
1663 * Wait for the flash chip to become ready to write data
1665 * This is only called during the panic_write() path. When panic_write()
1666 * is called, the kernel is in the process of a panic, and will soon be
1667 * dead. Therefore we don't take any locks, and attempt to get access
1668 * to the chip as soon as possible.
1670 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1673 struct cfi_private *cfi = map->fldrv_priv;
1678 * If the driver thinks the chip is idle, and no toggle bits
1679 * are changing, then the chip is actually idle for sure.
1681 if (chip->state == FL_READY && chip_ready(map, adr))
1685 * Try several times to reset the chip and then wait for it
1686 * to become idle. The upper limit of a few milliseconds of
1687 * delay isn't a big problem: the kernel is dying anyway. It
1688 * is more important to save the messages.
1690 while (retries > 0) {
1691 const unsigned long timeo = (HZ / 1000) + 1;
1693 /* send the reset command */
1694 map_write(map, CMD(0xF0), chip->start);
1696 /* wait for the chip to become ready */
1697 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1698 if (chip_ready(map, adr))
1705 /* the chip never became ready */
1710 * Write out one word of data to a single flash chip during a kernel panic
1712 * This is only called during the panic_write() path. When panic_write()
1713 * is called, the kernel is in the process of a panic, and will soon be
1714 * dead. Therefore we don't take any locks, and attempt to get access
1715 * to the chip as soon as possible.
1717 * The implementation of this routine is intentionally similar to
1718 * do_write_oneword(), in order to ease code maintenance.
1720 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1721 unsigned long adr, map_word datum)
1723 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1724 struct cfi_private *cfi = map->fldrv_priv;
1732 ret = cfi_amdstd_panic_wait(map, chip, adr);
1736 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1737 __func__, adr, datum.x[0]);
1740 * Check for a NOP for the case when the datum to write is already
1741 * present - it saves time and works around buggy chips that corrupt
1742 * data at other locations when 0xff is written to a location that
1743 * already contains 0xff.
1745 oldd = map_read(map, adr);
1746 if (map_word_equal(map, oldd, datum)) {
1747 pr_debug("MTD %s(): NOP\n", __func__);
1754 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1755 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1756 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1757 map_write(map, datum, adr);
1759 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1760 if (chip_ready(map, adr))
1766 if (!chip_good(map, adr, datum)) {
1767 /* reset on all failures. */
1768 map_write(map, CMD(0xF0), chip->start);
1769 /* FIXME - should have reset delay before continuing */
1771 if (++retry_cnt <= MAX_WORD_RETRIES)
1783 * Write out some data during a kernel panic
1785 * This is used by the mtdoops driver to save the dying messages from a
1786 * kernel which has panic'd.
1788 * This routine ignores all of the locking used throughout the rest of the
1789 * driver, in order to ensure that the data gets written out no matter what
1790 * state this driver (and the flash chip itself) was in when the kernel crashed.
1792 * The implementation of this routine is intentionally similar to
1793 * cfi_amdstd_write_words(), in order to ease code maintenance.
1795 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1796 size_t *retlen, const u_char *buf)
1798 struct map_info *map = mtd->priv;
1799 struct cfi_private *cfi = map->fldrv_priv;
1800 unsigned long ofs, chipstart;
1804 chipnum = to >> cfi->chipshift;
1805 ofs = to - (chipnum << cfi->chipshift);
1806 chipstart = cfi->chips[chipnum].start;
1808 /* If it's not bus aligned, do the first byte write */
1809 if (ofs & (map_bankwidth(map) - 1)) {
1810 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1811 int i = ofs - bus_ofs;
1815 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1819 /* Load 'tmp_buf' with old contents of flash */
1820 tmp_buf = map_read(map, bus_ofs + chipstart);
1822 /* Number of bytes to copy from buffer */
1823 n = min_t(int, len, map_bankwidth(map) - i);
1825 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1827 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1837 if (ofs >> cfi->chipshift) {
1840 if (chipnum == cfi->numchips)
1845 /* We are now aligned, write as much as possible */
1846 while (len >= map_bankwidth(map)) {
1849 datum = map_word_load(map, buf);
1851 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1856 ofs += map_bankwidth(map);
1857 buf += map_bankwidth(map);
1858 (*retlen) += map_bankwidth(map);
1859 len -= map_bankwidth(map);
1861 if (ofs >> cfi->chipshift) {
1864 if (chipnum == cfi->numchips)
1867 chipstart = cfi->chips[chipnum].start;
1871 /* Write the trailing bytes if any */
1872 if (len & (map_bankwidth(map) - 1)) {
1875 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1879 tmp_buf = map_read(map, ofs + chipstart);
1881 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1883 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1896 * Handle devices with one erase region, that only implement
1897 * the chip erase command.
1899 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1901 struct cfi_private *cfi = map->fldrv_priv;
1902 unsigned long timeo = jiffies + HZ;
1903 unsigned long int adr;
1904 DECLARE_WAITQUEUE(wait, current);
1907 adr = cfi->addr_unlock1;
1909 mutex_lock(&chip->mutex);
1910 ret = get_chip(map, chip, adr, FL_WRITING);
1912 mutex_unlock(&chip->mutex);
1916 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1917 __func__, chip->start );
1919 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1921 xip_disable(map, chip, adr);
1923 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1924 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1925 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1926 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1927 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1928 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1930 chip->state = FL_ERASING;
1931 chip->erase_suspended = 0;
1932 chip->in_progress_block_addr = adr;
1934 INVALIDATE_CACHE_UDELAY(map, chip,
1936 chip->erase_time*500);
1938 timeo = jiffies + (HZ*20);
1941 if (chip->state != FL_ERASING) {
1942 /* Someone's suspended the erase. Sleep */
1943 set_current_state(TASK_UNINTERRUPTIBLE);
1944 add_wait_queue(&chip->wq, &wait);
1945 mutex_unlock(&chip->mutex);
1947 remove_wait_queue(&chip->wq, &wait);
1948 mutex_lock(&chip->mutex);
1951 if (chip->erase_suspended) {
1952 /* This erase was suspended and resumed.
1953 Adjust the timeout */
1954 timeo = jiffies + (HZ*20); /* FIXME */
1955 chip->erase_suspended = 0;
1958 if (chip_ready(map, adr))
1961 if (time_after(jiffies, timeo)) {
1962 printk(KERN_WARNING "MTD %s(): software timeout\n",
1967 /* Latency issues. Drop the lock, wait a while and retry */
1968 UDELAY(map, chip, adr, 1000000/HZ);
1970 /* Did we succeed? */
1971 if (!chip_good(map, adr, map_word_ff(map))) {
1972 /* reset on all failures. */
1973 map_write( map, CMD(0xF0), chip->start );
1974 /* FIXME - should have reset delay before continuing */
1979 chip->state = FL_READY;
1980 xip_enable(map, chip, adr);
1982 put_chip(map, chip, adr);
1983 mutex_unlock(&chip->mutex);
1989 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1991 struct cfi_private *cfi = map->fldrv_priv;
1992 unsigned long timeo = jiffies + HZ;
1993 DECLARE_WAITQUEUE(wait, current);
1998 mutex_lock(&chip->mutex);
1999 ret = get_chip(map, chip, adr, FL_ERASING);
2001 mutex_unlock(&chip->mutex);
2005 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2008 XIP_INVAL_CACHED_RANGE(map, adr, len);
2010 xip_disable(map, chip, adr);
2012 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2013 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2014 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2015 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2016 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2017 map_write(map, cfi->sector_erase_cmd, adr);
2019 chip->state = FL_ERASING;
2020 chip->erase_suspended = 0;
2021 chip->in_progress_block_addr = adr;
2023 INVALIDATE_CACHE_UDELAY(map, chip,
2025 chip->erase_time*500);
2027 timeo = jiffies + (HZ*20);
2030 if (chip->state != FL_ERASING) {
2031 /* Someone's suspended the erase. Sleep */
2032 set_current_state(TASK_UNINTERRUPTIBLE);
2033 add_wait_queue(&chip->wq, &wait);
2034 mutex_unlock(&chip->mutex);
2036 remove_wait_queue(&chip->wq, &wait);
2037 mutex_lock(&chip->mutex);
2040 if (chip->erase_suspended) {
2041 /* This erase was suspended and resumed.
2042 Adjust the timeout */
2043 timeo = jiffies + (HZ*20); /* FIXME */
2044 chip->erase_suspended = 0;
2047 if (chip_ready(map, adr)) {
2048 xip_enable(map, chip, adr);
2052 if (time_after(jiffies, timeo)) {
2053 xip_enable(map, chip, adr);
2054 printk(KERN_WARNING "MTD %s(): software timeout\n",
2059 /* Latency issues. Drop the lock, wait a while and retry */
2060 UDELAY(map, chip, adr, 1000000/HZ);
2062 /* Did we succeed? */
2063 if (!chip_good(map, adr, map_word_ff(map))) {
2064 /* reset on all failures. */
2065 map_write( map, CMD(0xF0), chip->start );
2066 /* FIXME - should have reset delay before continuing */
2071 chip->state = FL_READY;
2073 put_chip(map, chip, adr);
2074 mutex_unlock(&chip->mutex);
2079 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2081 unsigned long ofs, len;
2087 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2091 instr->state = MTD_ERASE_DONE;
2092 mtd_erase_callback(instr);
2098 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2100 struct map_info *map = mtd->priv;
2101 struct cfi_private *cfi = map->fldrv_priv;
2104 if (instr->addr != 0)
2107 if (instr->len != mtd->size)
2110 ret = do_erase_chip(map, &cfi->chips[0]);
2114 instr->state = MTD_ERASE_DONE;
2115 mtd_erase_callback(instr);
2120 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2121 unsigned long adr, int len, void *thunk)
2123 struct cfi_private *cfi = map->fldrv_priv;
2126 mutex_lock(&chip->mutex);
2127 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2130 chip->state = FL_LOCKING;
2132 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2134 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2135 cfi->device_type, NULL);
2136 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2137 cfi->device_type, NULL);
2138 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2139 cfi->device_type, NULL);
2140 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2141 cfi->device_type, NULL);
2142 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2143 cfi->device_type, NULL);
2144 map_write(map, CMD(0x40), chip->start + adr);
2146 chip->state = FL_READY;
2147 put_chip(map, chip, adr + chip->start);
2151 mutex_unlock(&chip->mutex);
2155 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2156 unsigned long adr, int len, void *thunk)
2158 struct cfi_private *cfi = map->fldrv_priv;
2161 mutex_lock(&chip->mutex);
2162 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2165 chip->state = FL_UNLOCKING;
2167 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2169 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2170 cfi->device_type, NULL);
2171 map_write(map, CMD(0x70), adr);
2173 chip->state = FL_READY;
2174 put_chip(map, chip, adr + chip->start);
2178 mutex_unlock(&chip->mutex);
2182 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2184 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2187 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2189 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2193 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2197 struct flchip *chip;
2202 #define MAX_SECTORS 512
2204 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2205 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2206 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2208 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2209 struct flchip *chip,
2210 unsigned long adr, int len, void *thunk)
2212 struct cfi_private *cfi = map->fldrv_priv;
2213 unsigned long timeo;
2216 mutex_lock(&chip->mutex);
2217 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2219 mutex_unlock(&chip->mutex);
2223 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2225 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2226 cfi->device_type, NULL);
2227 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2228 cfi->device_type, NULL);
2229 /* PPB entry command */
2230 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2231 cfi->device_type, NULL);
2233 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2234 chip->state = FL_LOCKING;
2235 map_write(map, CMD(0xA0), chip->start + adr);
2236 map_write(map, CMD(0x00), chip->start + adr);
2237 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2239 * Unlocking of one specific sector is not supported, so we
2240 * have to unlock all sectors of this device instead
2242 chip->state = FL_UNLOCKING;
2243 map_write(map, CMD(0x80), chip->start);
2244 map_write(map, CMD(0x30), chip->start);
2245 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2246 chip->state = FL_JEDEC_QUERY;
2247 /* Return locked status: 0->locked, 1->unlocked */
2248 ret = !cfi_read_query(map, adr);
2253 * Wait for some time as unlocking of all sectors takes quite long
2255 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2257 if (chip_ready(map, adr))
2260 if (time_after(jiffies, timeo)) {
2261 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2266 UDELAY(map, chip, adr, 1);
2269 /* Exit BC commands */
2270 map_write(map, CMD(0x90), chip->start);
2271 map_write(map, CMD(0x00), chip->start);
2273 chip->state = FL_READY;
2274 put_chip(map, chip, adr + chip->start);
2275 mutex_unlock(&chip->mutex);
2280 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2283 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2284 DO_XXLOCK_ONEBLOCK_LOCK);
2287 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2290 struct mtd_erase_region_info *regions = mtd->eraseregions;
2291 struct map_info *map = mtd->priv;
2292 struct cfi_private *cfi = map->fldrv_priv;
2293 struct ppb_lock *sect;
2303 * PPB unlocking always unlocks all sectors of the flash chip.
2304 * We need to re-lock all previously locked sectors. So lets
2305 * first check the locking status of all sectors and save
2306 * it for future use.
2308 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2313 * This code to walk all sectors is a slightly modified version
2314 * of the cfi_varsize_frob() code.
2324 int size = regions[i].erasesize;
2327 * Only test sectors that shall not be unlocked. The other
2328 * sectors shall be unlocked, so lets keep their locking
2329 * status at "unlocked" (locked=0) for the final re-locking.
2331 if ((adr < ofs) || (adr >= (ofs + len))) {
2332 sect[sectors].chip = &cfi->chips[chipnum];
2333 sect[sectors].offset = offset;
2334 sect[sectors].locked = do_ppb_xxlock(
2335 map, &cfi->chips[chipnum], adr, 0,
2336 DO_XXLOCK_ONEBLOCK_GETLOCK);
2343 if (offset == regions[i].offset + size * regions[i].numblocks)
2346 if (adr >> cfi->chipshift) {
2350 if (chipnum >= cfi->numchips)
2355 if (sectors >= MAX_SECTORS) {
2356 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2363 /* Now unlock the whole chip */
2364 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2365 DO_XXLOCK_ONEBLOCK_UNLOCK);
2372 * PPB unlocking always unlocks all sectors of the flash chip.
2373 * We need to re-lock all previously locked sectors.
2375 for (i = 0; i < sectors; i++) {
2377 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2378 DO_XXLOCK_ONEBLOCK_LOCK);
2385 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2388 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2389 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2392 static void cfi_amdstd_sync (struct mtd_info *mtd)
2394 struct map_info *map = mtd->priv;
2395 struct cfi_private *cfi = map->fldrv_priv;
2397 struct flchip *chip;
2399 DECLARE_WAITQUEUE(wait, current);
2401 for (i=0; !ret && i<cfi->numchips; i++) {
2402 chip = &cfi->chips[i];
2405 mutex_lock(&chip->mutex);
2407 switch(chip->state) {
2411 case FL_JEDEC_QUERY:
2412 chip->oldstate = chip->state;
2413 chip->state = FL_SYNCING;
2414 /* No need to wake_up() on this state change -
2415 * as the whole point is that nobody can do anything
2416 * with the chip now anyway.
2419 mutex_unlock(&chip->mutex);
2423 /* Not an idle state */
2424 set_current_state(TASK_UNINTERRUPTIBLE);
2425 add_wait_queue(&chip->wq, &wait);
2427 mutex_unlock(&chip->mutex);
2431 remove_wait_queue(&chip->wq, &wait);
2437 /* Unlock the chips again */
2439 for (i--; i >=0; i--) {
2440 chip = &cfi->chips[i];
2442 mutex_lock(&chip->mutex);
2444 if (chip->state == FL_SYNCING) {
2445 chip->state = chip->oldstate;
2448 mutex_unlock(&chip->mutex);
2453 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2455 struct map_info *map = mtd->priv;
2456 struct cfi_private *cfi = map->fldrv_priv;
2458 struct flchip *chip;
2461 for (i=0; !ret && i<cfi->numchips; i++) {
2462 chip = &cfi->chips[i];
2464 mutex_lock(&chip->mutex);
2466 switch(chip->state) {
2470 case FL_JEDEC_QUERY:
2471 chip->oldstate = chip->state;
2472 chip->state = FL_PM_SUSPENDED;
2473 /* No need to wake_up() on this state change -
2474 * as the whole point is that nobody can do anything
2475 * with the chip now anyway.
2477 case FL_PM_SUSPENDED:
2484 mutex_unlock(&chip->mutex);
2487 /* Unlock the chips again */
2490 for (i--; i >=0; i--) {
2491 chip = &cfi->chips[i];
2493 mutex_lock(&chip->mutex);
2495 if (chip->state == FL_PM_SUSPENDED) {
2496 chip->state = chip->oldstate;
2499 mutex_unlock(&chip->mutex);
2507 static void cfi_amdstd_resume(struct mtd_info *mtd)
2509 struct map_info *map = mtd->priv;
2510 struct cfi_private *cfi = map->fldrv_priv;
2512 struct flchip *chip;
2514 for (i=0; i<cfi->numchips; i++) {
2516 chip = &cfi->chips[i];
2518 mutex_lock(&chip->mutex);
2520 if (chip->state == FL_PM_SUSPENDED) {
2521 chip->state = FL_READY;
2522 map_write(map, CMD(0xF0), chip->start);
2526 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2528 mutex_unlock(&chip->mutex);
2534 * Ensure that the flash device is put back into read array mode before
2535 * unloading the driver or rebooting. On some systems, rebooting while
2536 * the flash is in query/program/erase mode will prevent the CPU from
2537 * fetching the bootloader code, requiring a hard reset or power cycle.
2539 static int cfi_amdstd_reset(struct mtd_info *mtd)
2541 struct map_info *map = mtd->priv;
2542 struct cfi_private *cfi = map->fldrv_priv;
2544 struct flchip *chip;
2546 for (i = 0; i < cfi->numchips; i++) {
2548 chip = &cfi->chips[i];
2550 mutex_lock(&chip->mutex);
2552 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2554 map_write(map, CMD(0xF0), chip->start);
2555 chip->state = FL_SHUTDOWN;
2556 put_chip(map, chip, chip->start);
2559 mutex_unlock(&chip->mutex);
2566 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2569 struct mtd_info *mtd;
2571 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2572 cfi_amdstd_reset(mtd);
2577 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2579 struct map_info *map = mtd->priv;
2580 struct cfi_private *cfi = map->fldrv_priv;
2582 cfi_amdstd_reset(mtd);
2583 unregister_reboot_notifier(&mtd->reboot_notifier);
2584 kfree(cfi->cmdset_priv);
2587 kfree(mtd->eraseregions);
2590 MODULE_LICENSE("GPL");
2591 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2592 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2593 MODULE_ALIAS("cfi_cmdset_0006");
2594 MODULE_ALIAS("cfi_cmdset_0701");