2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 #define M28F00AP30 0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
55 #define AT49BV640DT 0x02db
57 #define LH28F640BFHE_PTTL90 0x00b0
58 #define LH28F640BFHE_PBTL90 0x00b1
59 #define LH28F640BFHE_PTTL70A 0x00b2
60 #define LH28F640BFHE_PBTL70A 0x00b3
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
76 size_t *, const u_char *);
77 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
78 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
79 size_t *, struct otp_info *);
80 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
81 size_t *, struct otp_info *);
83 static int cfi_intelext_suspend (struct mtd_info *);
84 static void cfi_intelext_resume (struct mtd_info *);
85 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
87 static void cfi_intelext_destroy(struct mtd_info *);
89 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
91 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
92 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
94 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
95 size_t *retlen, void **virt, resource_size_t *phys);
96 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
98 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
101 #include "fwh_lock.h"
106 * *********** SETUP AND PROBE BITS ***********
109 static struct mtd_chip_driver cfi_intelext_chipdrv = {
110 .probe = NULL, /* Not usable directly */
111 .destroy = cfi_intelext_destroy,
112 .name = "cfi_cmdset_0001",
113 .module = THIS_MODULE
116 /* #define DEBUG_LOCK_BITS */
117 /* #define DEBUG_CFI_FEATURES */
119 #ifdef DEBUG_CFI_FEATURES
120 static void cfi_tell_features(struct cfi_pri_intelext *extp)
123 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
124 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
125 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
126 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
127 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
128 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
129 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
130 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
131 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
132 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
133 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
134 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
135 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
136 for (i=11; i<32; i++) {
137 if (extp->FeatureSupport & (1<<i))
138 printk(" - Unknown Bit %X: supported\n", i);
141 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
142 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
143 for (i=1; i<8; i++) {
144 if (extp->SuspendCmdSupport & (1<<i))
145 printk(" - Unknown Bit %X: supported\n", i);
148 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
149 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
150 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
151 for (i=2; i<3; i++) {
152 if (extp->BlkStatusRegMask & (1<<i))
153 printk(" - Unknown Bit %X Active: yes\n",i);
155 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
156 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
157 for (i=6; i<16; i++) {
158 if (extp->BlkStatusRegMask & (1<<i))
159 printk(" - Unknown Bit %X Active: yes\n",i);
162 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
163 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
164 if (extp->VppOptimal)
165 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
166 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
170 /* Atmel chips don't use the same PRI format as Intel chips */
171 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
176 struct cfi_pri_atmel atmel_pri;
177 uint32_t features = 0;
179 /* Reverse byteswapping */
180 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
181 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
182 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
184 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
185 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
189 if (atmel_pri.Features & 0x01) /* chip erase supported */
191 if (atmel_pri.Features & 0x02) /* erase suspend supported */
193 if (atmel_pri.Features & 0x04) /* program suspend supported */
195 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
197 if (atmel_pri.Features & 0x20) /* page mode read supported */
199 if (atmel_pri.Features & 0x40) /* queued erase supported */
201 if (atmel_pri.Features & 0x80) /* Protection bits supported */
204 extp->FeatureSupport = features;
206 /* burst write mode not supported */
207 cfi->cfiq->BufWriteTimeoutTyp = 0;
208 cfi->cfiq->BufWriteTimeoutMax = 0;
211 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
213 struct map_info *map = mtd->priv;
214 struct cfi_private *cfi = map->fldrv_priv;
215 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
217 cfip->FeatureSupport |= (1 << 5);
218 mtd->flags |= MTD_POWERUP_LOCK;
221 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
222 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
223 static void fixup_intel_strataflash(struct mtd_info *mtd)
225 struct map_info *map = mtd->priv;
226 struct cfi_private *cfi = map->fldrv_priv;
227 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
229 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
230 "erase on write disabled.\n");
231 extp->SuspendCmdSupport &= ~1;
235 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
236 static void fixup_no_write_suspend(struct mtd_info *mtd)
238 struct map_info *map = mtd->priv;
239 struct cfi_private *cfi = map->fldrv_priv;
240 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
242 if (cfip && (cfip->FeatureSupport&4)) {
243 cfip->FeatureSupport &= ~4;
244 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
249 static void fixup_st_m28w320ct(struct mtd_info *mtd)
251 struct map_info *map = mtd->priv;
252 struct cfi_private *cfi = map->fldrv_priv;
254 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
255 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
258 static void fixup_st_m28w320cb(struct mtd_info *mtd)
260 struct map_info *map = mtd->priv;
261 struct cfi_private *cfi = map->fldrv_priv;
263 /* Note this is done after the region info is endian swapped */
264 cfi->cfiq->EraseRegionInfo[1] =
265 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
268 static int is_LH28F640BF(struct cfi_private *cfi)
270 /* Sharp LH28F640BF Family */
271 if (cfi->mfr == CFI_MFR_SHARP && (
272 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
273 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
278 static void fixup_LH28F640BF(struct mtd_info *mtd)
280 struct map_info *map = mtd->priv;
281 struct cfi_private *cfi = map->fldrv_priv;
282 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
284 /* Reset the Partition Configuration Register on LH28F640BF
285 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
286 if (is_LH28F640BF(cfi)) {
287 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
288 map_write(map, CMD(0x60), 0);
289 map_write(map, CMD(0x04), 0);
291 /* We have set one single partition thus
292 * Simultaneous Operations are not allowed */
293 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
294 extp->FeatureSupport &= ~512;
298 static void fixup_use_point(struct mtd_info *mtd)
300 struct map_info *map = mtd->priv;
301 if (!mtd->_point && map_is_linear(map)) {
302 mtd->_point = cfi_intelext_point;
303 mtd->_unpoint = cfi_intelext_unpoint;
307 static void fixup_use_write_buffers(struct mtd_info *mtd)
309 struct map_info *map = mtd->priv;
310 struct cfi_private *cfi = map->fldrv_priv;
311 if (cfi->cfiq->BufWriteTimeoutTyp) {
312 printk(KERN_INFO "Using buffer write method\n" );
313 mtd->_write = cfi_intelext_write_buffers;
314 mtd->_writev = cfi_intelext_writev;
319 * Some chips power-up with all sectors locked by default.
321 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
323 struct map_info *map = mtd->priv;
324 struct cfi_private *cfi = map->fldrv_priv;
325 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
327 if (cfip->FeatureSupport&32) {
328 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
329 mtd->flags |= MTD_POWERUP_LOCK;
333 static struct cfi_fixup cfi_fixup_table[] = {
334 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
335 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
336 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
337 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
338 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
340 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
341 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
343 #if !FORCE_WORD_WRITE
344 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
346 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
347 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
348 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
354 static struct cfi_fixup jedec_fixup_table[] = {
355 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
356 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
357 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
358 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
359 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
362 static struct cfi_fixup fixup_table[] = {
363 /* The CFI vendor ids and the JEDEC vendor IDs appear
364 * to be common. It is like the devices id's are as
365 * well. This table is to pick all cases where
366 * we know that is the case.
368 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
372 static void cfi_fixup_major_minor(struct cfi_private *cfi,
373 struct cfi_pri_intelext *extp)
375 if (cfi->mfr == CFI_MFR_INTEL &&
376 cfi->id == PF38F4476 && extp->MinorVersion == '3')
377 extp->MinorVersion = '1';
380 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
383 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
384 * Erase Supend for their small Erase Blocks(0x8000)
386 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
391 static inline struct cfi_pri_intelext *
392 read_pri_intelext(struct map_info *map, __u16 adr)
394 struct cfi_private *cfi = map->fldrv_priv;
395 struct cfi_pri_intelext *extp;
396 unsigned int extra_size = 0;
397 unsigned int extp_size = sizeof(*extp);
400 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
404 cfi_fixup_major_minor(cfi, extp);
406 if (extp->MajorVersion != '1' ||
407 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
408 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
409 "version %c.%c.\n", extp->MajorVersion,
415 /* Do some byteswapping if necessary */
416 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
417 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
418 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
420 if (extp->MinorVersion >= '0') {
423 /* Protection Register info */
424 if (extp->NumProtectionFields)
425 extra_size += (extp->NumProtectionFields - 1) *
426 sizeof(struct cfi_intelext_otpinfo);
429 if (extp->MinorVersion >= '1') {
430 /* Burst Read info */
432 if (extp_size < sizeof(*extp) + extra_size)
434 extra_size += extp->extra[extra_size - 1];
437 if (extp->MinorVersion >= '3') {
440 /* Number of hardware-partitions */
442 if (extp_size < sizeof(*extp) + extra_size)
444 nb_parts = extp->extra[extra_size - 1];
446 /* skip the sizeof(partregion) field in CFI 1.4 */
447 if (extp->MinorVersion >= '4')
450 for (i = 0; i < nb_parts; i++) {
451 struct cfi_intelext_regioninfo *rinfo;
452 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
453 extra_size += sizeof(*rinfo);
454 if (extp_size < sizeof(*extp) + extra_size)
456 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
457 extra_size += (rinfo->NumBlockTypes - 1)
458 * sizeof(struct cfi_intelext_blockinfo);
461 if (extp->MinorVersion >= '4')
462 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
464 if (extp_size < sizeof(*extp) + extra_size) {
466 extp_size = sizeof(*extp) + extra_size;
468 if (extp_size > 4096) {
470 "%s: cfi_pri_intelext is too fat\n",
481 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
483 struct cfi_private *cfi = map->fldrv_priv;
484 struct mtd_info *mtd;
487 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
491 mtd->type = MTD_NORFLASH;
493 /* Fill in the default mtd operations */
494 mtd->_erase = cfi_intelext_erase_varsize;
495 mtd->_read = cfi_intelext_read;
496 mtd->_write = cfi_intelext_write_words;
497 mtd->_sync = cfi_intelext_sync;
498 mtd->_lock = cfi_intelext_lock;
499 mtd->_unlock = cfi_intelext_unlock;
500 mtd->_is_locked = cfi_intelext_is_locked;
501 mtd->_suspend = cfi_intelext_suspend;
502 mtd->_resume = cfi_intelext_resume;
503 mtd->flags = MTD_CAP_NORFLASH;
504 mtd->name = map->name;
506 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
508 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
510 if (cfi->cfi_mode == CFI_MODE_CFI) {
512 * It's a real CFI chip, not one for which the probe
513 * routine faked a CFI structure. So we read the feature
516 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
517 struct cfi_pri_intelext *extp;
519 extp = read_pri_intelext(map, adr);
525 /* Install our own private info structure */
526 cfi->cmdset_priv = extp;
528 cfi_fixup(mtd, cfi_fixup_table);
530 #ifdef DEBUG_CFI_FEATURES
531 /* Tell the user about it in lots of lovely detail */
532 cfi_tell_features(extp);
535 if(extp->SuspendCmdSupport & 1) {
536 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
539 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
540 /* Apply jedec specific fixups */
541 cfi_fixup(mtd, jedec_fixup_table);
543 /* Apply generic fixups */
544 cfi_fixup(mtd, fixup_table);
546 for (i=0; i< cfi->numchips; i++) {
547 if (cfi->cfiq->WordWriteTimeoutTyp)
548 cfi->chips[i].word_write_time =
549 1<<cfi->cfiq->WordWriteTimeoutTyp;
551 cfi->chips[i].word_write_time = 50000;
553 if (cfi->cfiq->BufWriteTimeoutTyp)
554 cfi->chips[i].buffer_write_time =
555 1<<cfi->cfiq->BufWriteTimeoutTyp;
556 /* No default; if it isn't specified, we won't use it */
558 if (cfi->cfiq->BlockEraseTimeoutTyp)
559 cfi->chips[i].erase_time =
560 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
562 cfi->chips[i].erase_time = 2000000;
564 if (cfi->cfiq->WordWriteTimeoutTyp &&
565 cfi->cfiq->WordWriteTimeoutMax)
566 cfi->chips[i].word_write_time_max =
567 1<<(cfi->cfiq->WordWriteTimeoutTyp +
568 cfi->cfiq->WordWriteTimeoutMax);
570 cfi->chips[i].word_write_time_max = 50000 * 8;
572 if (cfi->cfiq->BufWriteTimeoutTyp &&
573 cfi->cfiq->BufWriteTimeoutMax)
574 cfi->chips[i].buffer_write_time_max =
575 1<<(cfi->cfiq->BufWriteTimeoutTyp +
576 cfi->cfiq->BufWriteTimeoutMax);
578 if (cfi->cfiq->BlockEraseTimeoutTyp &&
579 cfi->cfiq->BlockEraseTimeoutMax)
580 cfi->chips[i].erase_time_max =
581 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
582 cfi->cfiq->BlockEraseTimeoutMax);
584 cfi->chips[i].erase_time_max = 2000000 * 8;
586 cfi->chips[i].ref_point_counter = 0;
587 init_waitqueue_head(&(cfi->chips[i].wq));
590 map->fldrv = &cfi_intelext_chipdrv;
592 return cfi_intelext_setup(mtd);
594 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
595 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
597 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
598 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
600 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
602 struct map_info *map = mtd->priv;
603 struct cfi_private *cfi = map->fldrv_priv;
604 unsigned long offset = 0;
606 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
608 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
610 mtd->size = devsize * cfi->numchips;
612 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
613 mtd->eraseregions = kcalloc(mtd->numeraseregions,
614 sizeof(struct mtd_erase_region_info),
616 if (!mtd->eraseregions)
619 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
620 unsigned long ernum, ersize;
621 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
622 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
624 if (mtd->erasesize < ersize) {
625 mtd->erasesize = ersize;
627 for (j=0; j<cfi->numchips; j++) {
628 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
629 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
630 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
631 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
632 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
635 offset += (ersize * ernum);
638 if (offset != devsize) {
640 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
644 for (i=0; i<mtd->numeraseregions;i++){
645 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
646 i,(unsigned long long)mtd->eraseregions[i].offset,
647 mtd->eraseregions[i].erasesize,
648 mtd->eraseregions[i].numblocks);
651 #ifdef CONFIG_MTD_OTP
652 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
653 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
654 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
655 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
656 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
657 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
660 /* This function has the potential to distort the reality
661 a bit and therefore should be called last. */
662 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
665 __module_get(THIS_MODULE);
666 register_reboot_notifier(&mtd->reboot_notifier);
670 if (mtd->eraseregions)
671 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
672 for (j=0; j<cfi->numchips; j++)
673 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
674 kfree(mtd->eraseregions);
676 kfree(cfi->cmdset_priv);
680 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
681 struct cfi_private **pcfi)
683 struct map_info *map = mtd->priv;
684 struct cfi_private *cfi = *pcfi;
685 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
688 * Probing of multi-partition flash chips.
690 * To support multiple partitions when available, we simply arrange
691 * for each of them to have their own flchip structure even if they
692 * are on the same physical chip. This means completely recreating
693 * a new cfi_private structure right here which is a blatent code
694 * layering violation, but this is still the least intrusive
695 * arrangement at this point. This can be rearranged in the future
696 * if someone feels motivated enough. --nico
698 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
699 && extp->FeatureSupport & (1 << 9)) {
701 struct cfi_private *newcfi;
703 struct flchip_shared *shared;
704 int numregions, numparts, partshift, numvirtchips, i, j;
706 /* Protection Register info */
707 if (extp->NumProtectionFields)
708 offs = (extp->NumProtectionFields - 1) *
709 sizeof(struct cfi_intelext_otpinfo);
711 /* Burst Read info */
712 offs += extp->extra[offs+1]+2;
714 /* Number of partition regions */
715 numregions = extp->extra[offs];
718 /* skip the sizeof(partregion) field in CFI 1.4 */
719 if (extp->MinorVersion >= '4')
722 /* Number of hardware partitions */
724 for (i = 0; i < numregions; i++) {
725 struct cfi_intelext_regioninfo *rinfo;
726 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
727 numparts += rinfo->NumIdentPartitions;
728 offs += sizeof(*rinfo)
729 + (rinfo->NumBlockTypes - 1) *
730 sizeof(struct cfi_intelext_blockinfo);
736 /* Programming Region info */
737 if (extp->MinorVersion >= '4') {
738 struct cfi_intelext_programming_regioninfo *prinfo;
739 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
740 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
741 mtd->flags &= ~MTD_BIT_WRITEABLE;
742 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
743 map->name, mtd->writesize,
744 cfi->interleave * prinfo->ControlValid,
745 cfi->interleave * prinfo->ControlInvalid);
749 * All functions below currently rely on all chips having
750 * the same geometry so we'll just assume that all hardware
751 * partitions are of the same size too.
753 partshift = cfi->chipshift - __ffs(numparts);
755 if ((1 << partshift) < mtd->erasesize) {
757 "%s: bad number of hw partitions (%d)\n",
762 numvirtchips = cfi->numchips * numparts;
763 newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
767 shared = kmalloc_array(cfi->numchips,
768 sizeof(struct flchip_shared),
774 memcpy(newcfi, cfi, sizeof(struct cfi_private));
775 newcfi->numchips = numvirtchips;
776 newcfi->chipshift = partshift;
778 chip = &newcfi->chips[0];
779 for (i = 0; i < cfi->numchips; i++) {
780 shared[i].writing = shared[i].erasing = NULL;
781 mutex_init(&shared[i].lock);
782 for (j = 0; j < numparts; j++) {
783 *chip = cfi->chips[i];
784 chip->start += j << partshift;
785 chip->priv = &shared[i];
786 /* those should be reset too since
787 they create memory references. */
788 init_waitqueue_head(&chip->wq);
789 mutex_init(&chip->mutex);
794 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
795 "--> %d partitions of %d KiB\n",
796 map->name, cfi->numchips, cfi->interleave,
797 newcfi->numchips, 1<<(newcfi->chipshift-10));
799 map->fldrv_priv = newcfi;
808 * *********** CHIP ACCESS FUNCTIONS ***********
810 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
812 DECLARE_WAITQUEUE(wait, current);
813 struct cfi_private *cfi = map->fldrv_priv;
814 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
815 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
816 unsigned long timeo = jiffies + HZ;
818 /* Prevent setting state FL_SYNCING for chip in suspended state. */
819 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
822 switch (chip->state) {
826 status = map_read(map, adr);
827 if (map_word_andequal(map, status, status_OK, status_OK))
830 /* At this point we're fine with write operations
831 in other partitions as they don't conflict. */
832 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
835 mutex_unlock(&chip->mutex);
837 mutex_lock(&chip->mutex);
838 /* Someone else might have been playing with it. */
849 !(cfip->FeatureSupport & 2) ||
850 !(mode == FL_READY || mode == FL_POINT ||
851 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
854 /* Do not allow suspend iff read/write to EB address */
855 if ((adr & chip->in_progress_block_mask) ==
856 chip->in_progress_block_addr)
859 /* do not suspend small EBs, buggy Micron Chips */
860 if (cfi_is_micron_28F00AP30(cfi, chip) &&
861 (chip->in_progress_block_mask == ~(0x8000-1)))
865 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
867 /* If the flash has finished erasing, then 'erase suspend'
868 * appears to make some (28F320) flash devices switch to
869 * 'read' mode. Make sure that we switch to 'read status'
870 * mode so we get the right data. --rmk
872 map_write(map, CMD(0x70), chip->in_progress_block_addr);
873 chip->oldstate = FL_ERASING;
874 chip->state = FL_ERASE_SUSPENDING;
875 chip->erase_suspended = 1;
877 status = map_read(map, chip->in_progress_block_addr);
878 if (map_word_andequal(map, status, status_OK, status_OK))
881 if (time_after(jiffies, timeo)) {
882 /* Urgh. Resume and pretend we weren't here.
883 * Make sure we're in 'read status' mode if it had finished */
884 put_chip(map, chip, adr);
885 printk(KERN_ERR "%s: Chip not ready after erase "
886 "suspended: status = 0x%lx\n", map->name, status.x[0]);
890 mutex_unlock(&chip->mutex);
892 mutex_lock(&chip->mutex);
893 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
894 So we can just loop here. */
896 chip->state = FL_STATUS;
899 case FL_XIP_WHILE_ERASING:
900 if (mode != FL_READY && mode != FL_POINT &&
901 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
903 chip->oldstate = chip->state;
904 chip->state = FL_READY;
908 /* The machine is rebooting now,so no one can get chip anymore */
911 /* Only if there's no operation suspended... */
912 if (mode == FL_READY && chip->oldstate == FL_READY)
917 set_current_state(TASK_UNINTERRUPTIBLE);
918 add_wait_queue(&chip->wq, &wait);
919 mutex_unlock(&chip->mutex);
921 remove_wait_queue(&chip->wq, &wait);
922 mutex_lock(&chip->mutex);
927 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
930 DECLARE_WAITQUEUE(wait, current);
934 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
935 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
937 * OK. We have possibility for contention on the write/erase
938 * operations which are global to the real chip and not per
939 * partition. So let's fight it over in the partition which
940 * currently has authority on the operation.
942 * The rules are as follows:
944 * - any write operation must own shared->writing.
946 * - any erase operation must own _both_ shared->writing and
949 * - contention arbitration is handled in the owner's context.
951 * The 'shared' struct can be read and/or written only when
954 struct flchip_shared *shared = chip->priv;
955 struct flchip *contender;
956 mutex_lock(&shared->lock);
957 contender = shared->writing;
958 if (contender && contender != chip) {
960 * The engine to perform desired operation on this
961 * partition is already in use by someone else.
962 * Let's fight over it in the context of the chip
963 * currently using it. If it is possible to suspend,
964 * that other partition will do just that, otherwise
965 * it'll happily send us to sleep. In any case, when
966 * get_chip returns success we're clear to go ahead.
968 ret = mutex_trylock(&contender->mutex);
969 mutex_unlock(&shared->lock);
972 mutex_unlock(&chip->mutex);
973 ret = chip_ready(map, contender, contender->start, mode);
974 mutex_lock(&chip->mutex);
976 if (ret == -EAGAIN) {
977 mutex_unlock(&contender->mutex);
981 mutex_unlock(&contender->mutex);
984 mutex_lock(&shared->lock);
986 /* We should not own chip if it is already
987 * in FL_SYNCING state. Put contender and retry. */
988 if (chip->state == FL_SYNCING) {
989 put_chip(map, contender, contender->start);
990 mutex_unlock(&contender->mutex);
993 mutex_unlock(&contender->mutex);
996 /* Check if we already have suspended erase
997 * on this chip. Sleep. */
998 if (mode == FL_ERASING && shared->erasing
999 && shared->erasing->oldstate == FL_ERASING) {
1000 mutex_unlock(&shared->lock);
1001 set_current_state(TASK_UNINTERRUPTIBLE);
1002 add_wait_queue(&chip->wq, &wait);
1003 mutex_unlock(&chip->mutex);
1005 remove_wait_queue(&chip->wq, &wait);
1006 mutex_lock(&chip->mutex);
1011 shared->writing = chip;
1012 if (mode == FL_ERASING)
1013 shared->erasing = chip;
1014 mutex_unlock(&shared->lock);
1016 ret = chip_ready(map, chip, adr, mode);
1023 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1025 struct cfi_private *cfi = map->fldrv_priv;
1028 struct flchip_shared *shared = chip->priv;
1029 mutex_lock(&shared->lock);
1030 if (shared->writing == chip && chip->oldstate == FL_READY) {
1031 /* We own the ability to write, but we're done */
1032 shared->writing = shared->erasing;
1033 if (shared->writing && shared->writing != chip) {
1034 /* give back ownership to who we loaned it from */
1035 struct flchip *loaner = shared->writing;
1036 mutex_lock(&loaner->mutex);
1037 mutex_unlock(&shared->lock);
1038 mutex_unlock(&chip->mutex);
1039 put_chip(map, loaner, loaner->start);
1040 mutex_lock(&chip->mutex);
1041 mutex_unlock(&loaner->mutex);
1045 shared->erasing = NULL;
1046 shared->writing = NULL;
1047 } else if (shared->erasing == chip && shared->writing != chip) {
1049 * We own the ability to erase without the ability
1050 * to write, which means the erase was suspended
1051 * and some other partition is currently writing.
1052 * Don't let the switch below mess things up since
1053 * we don't have ownership to resume anything.
1055 mutex_unlock(&shared->lock);
1059 mutex_unlock(&shared->lock);
1062 switch(chip->oldstate) {
1064 /* What if one interleaved chip has finished and the
1065 other hasn't? The old code would leave the finished
1066 one in READY mode. That's bad, and caused -EROFS
1067 errors to be returned from do_erase_oneblock because
1068 that's the only bit it checked for at the time.
1069 As the state machine appears to explicitly allow
1070 sending the 0x70 (Read Status) command to an erasing
1071 chip and expecting it to be ignored, that's what we
1073 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1074 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1075 chip->oldstate = FL_READY;
1076 chip->state = FL_ERASING;
1079 case FL_XIP_WHILE_ERASING:
1080 chip->state = chip->oldstate;
1081 chip->oldstate = FL_READY;
1086 case FL_JEDEC_QUERY:
1089 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1094 #ifdef CONFIG_MTD_XIP
1097 * No interrupt what so ever can be serviced while the flash isn't in array
1098 * mode. This is ensured by the xip_disable() and xip_enable() functions
1099 * enclosing any code path where the flash is known not to be in array mode.
1100 * And within a XIP disabled code path, only functions marked with __xipram
1101 * may be called and nothing else (it's a good thing to inspect generated
1102 * assembly to make sure inline functions were actually inlined and that gcc
1103 * didn't emit calls to its own support functions). Also configuring MTD CFI
1104 * support to a single buswidth and a single interleave is also recommended.
1107 static void xip_disable(struct map_info *map, struct flchip *chip,
1110 /* TODO: chips with no XIP use should ignore and return */
1111 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1112 local_irq_disable();
1115 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1118 struct cfi_private *cfi = map->fldrv_priv;
1119 if (chip->state != FL_POINT && chip->state != FL_READY) {
1120 map_write(map, CMD(0xff), adr);
1121 chip->state = FL_READY;
1123 (void) map_read(map, adr);
1129 * When a delay is required for the flash operation to complete, the
1130 * xip_wait_for_operation() function is polling for both the given timeout
1131 * and pending (but still masked) hardware interrupts. Whenever there is an
1132 * interrupt pending then the flash erase or write operation is suspended,
1133 * array mode restored and interrupts unmasked. Task scheduling might also
1134 * happen at that point. The CPU eventually returns from the interrupt or
1135 * the call to schedule() and the suspended flash operation is resumed for
1136 * the remaining of the delay period.
1138 * Warning: this function _will_ fool interrupt latency tracing tools.
1141 static int __xipram xip_wait_for_operation(
1142 struct map_info *map, struct flchip *chip,
1143 unsigned long adr, unsigned int chip_op_time_max)
1145 struct cfi_private *cfi = map->fldrv_priv;
1146 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1147 map_word status, OK = CMD(0x80);
1148 unsigned long usec, suspended, start, done;
1149 flstate_t oldstate, newstate;
1151 start = xip_currtime();
1152 usec = chip_op_time_max;
1159 if (xip_irqpending() && cfip &&
1160 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1161 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1162 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1164 * Let's suspend the erase or write operation when
1165 * supported. Note that we currently don't try to
1166 * suspend interleaved chips if there is already
1167 * another operation suspended (imagine what happens
1168 * when one chip was already done with the current
1169 * operation while another chip suspended it, then
1170 * we resume the whole thing at once). Yes, it
1174 map_write(map, CMD(0xb0), adr);
1175 map_write(map, CMD(0x70), adr);
1176 suspended = xip_currtime();
1178 if (xip_elapsed_since(suspended) > 100000) {
1180 * The chip doesn't want to suspend
1181 * after waiting for 100 msecs.
1182 * This is a critical error but there
1183 * is not much we can do here.
1187 status = map_read(map, adr);
1188 } while (!map_word_andequal(map, status, OK, OK));
1190 /* Suspend succeeded */
1191 oldstate = chip->state;
1192 if (oldstate == FL_ERASING) {
1193 if (!map_word_bitsset(map, status, CMD(0x40)))
1195 newstate = FL_XIP_WHILE_ERASING;
1196 chip->erase_suspended = 1;
1198 if (!map_word_bitsset(map, status, CMD(0x04)))
1200 newstate = FL_XIP_WHILE_WRITING;
1201 chip->write_suspended = 1;
1203 chip->state = newstate;
1204 map_write(map, CMD(0xff), adr);
1205 (void) map_read(map, adr);
1208 mutex_unlock(&chip->mutex);
1213 * We're back. However someone else might have
1214 * decided to go write to the chip if we are in
1215 * a suspended erase state. If so let's wait
1218 mutex_lock(&chip->mutex);
1219 while (chip->state != newstate) {
1220 DECLARE_WAITQUEUE(wait, current);
1221 set_current_state(TASK_UNINTERRUPTIBLE);
1222 add_wait_queue(&chip->wq, &wait);
1223 mutex_unlock(&chip->mutex);
1225 remove_wait_queue(&chip->wq, &wait);
1226 mutex_lock(&chip->mutex);
1228 /* Disallow XIP again */
1229 local_irq_disable();
1231 /* Resume the write or erase operation */
1232 map_write(map, CMD(0xd0), adr);
1233 map_write(map, CMD(0x70), adr);
1234 chip->state = oldstate;
1235 start = xip_currtime();
1236 } else if (usec >= 1000000/HZ) {
1238 * Try to save on CPU power when waiting delay
1239 * is at least a system timer tick period.
1240 * No need to be extremely accurate here.
1244 status = map_read(map, adr);
1245 done = xip_elapsed_since(start);
1246 } while (!map_word_andequal(map, status, OK, OK)
1249 return (done >= usec) ? -ETIME : 0;
1253 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1254 * the flash is actively programming or erasing since we have to poll for
1255 * the operation to complete anyway. We can't do that in a generic way with
1256 * a XIP setup so do it before the actual flash operation in this case
1257 * and stub it out from INVAL_CACHE_AND_WAIT.
1259 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1260 INVALIDATE_CACHED_RANGE(map, from, size)
1262 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1263 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1267 #define xip_disable(map, chip, adr)
1268 #define xip_enable(map, chip, adr)
1269 #define XIP_INVAL_CACHED_RANGE(x...)
1270 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1272 static int inval_cache_and_wait_for_operation(
1273 struct map_info *map, struct flchip *chip,
1274 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1275 unsigned int chip_op_time, unsigned int chip_op_time_max)
1277 struct cfi_private *cfi = map->fldrv_priv;
1278 map_word status, status_OK = CMD(0x80);
1279 int chip_state = chip->state;
1280 unsigned int timeo, sleep_time, reset_timeo;
1282 mutex_unlock(&chip->mutex);
1284 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1285 mutex_lock(&chip->mutex);
1287 timeo = chip_op_time_max;
1290 reset_timeo = timeo;
1291 sleep_time = chip_op_time / 2;
1294 if (chip->state != chip_state) {
1295 /* Someone's suspended the operation: sleep */
1296 DECLARE_WAITQUEUE(wait, current);
1297 set_current_state(TASK_UNINTERRUPTIBLE);
1298 add_wait_queue(&chip->wq, &wait);
1299 mutex_unlock(&chip->mutex);
1301 remove_wait_queue(&chip->wq, &wait);
1302 mutex_lock(&chip->mutex);
1306 status = map_read(map, cmd_adr);
1307 if (map_word_andequal(map, status, status_OK, status_OK))
1310 if (chip->erase_suspended && chip_state == FL_ERASING) {
1311 /* Erase suspend occurred while sleep: reset timeout */
1312 timeo = reset_timeo;
1313 chip->erase_suspended = 0;
1315 if (chip->write_suspended && chip_state == FL_WRITING) {
1316 /* Write suspend occurred while sleep: reset timeout */
1317 timeo = reset_timeo;
1318 chip->write_suspended = 0;
1321 map_write(map, CMD(0x70), cmd_adr);
1322 chip->state = FL_STATUS;
1326 /* OK Still waiting. Drop the lock, wait a while and retry. */
1327 mutex_unlock(&chip->mutex);
1328 if (sleep_time >= 1000000/HZ) {
1330 * Half of the normal delay still remaining
1331 * can be performed with a sleeping delay instead
1334 msleep(sleep_time/1000);
1335 timeo -= sleep_time;
1336 sleep_time = 1000000/HZ;
1342 mutex_lock(&chip->mutex);
1345 /* Done and happy. */
1346 chip->state = FL_STATUS;
1352 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1353 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1356 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1358 unsigned long cmd_addr;
1359 struct cfi_private *cfi = map->fldrv_priv;
1364 /* Ensure cmd read/writes are aligned. */
1365 cmd_addr = adr & ~(map_bankwidth(map)-1);
1367 mutex_lock(&chip->mutex);
1369 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1372 if (chip->state != FL_POINT && chip->state != FL_READY)
1373 map_write(map, CMD(0xff), cmd_addr);
1375 chip->state = FL_POINT;
1376 chip->ref_point_counter++;
1378 mutex_unlock(&chip->mutex);
1383 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1384 size_t *retlen, void **virt, resource_size_t *phys)
1386 struct map_info *map = mtd->priv;
1387 struct cfi_private *cfi = map->fldrv_priv;
1388 unsigned long ofs, last_end = 0;
1395 /* Now lock the chip(s) to POINT state */
1397 /* ofs: offset within the first chip that the first read should start */
1398 chipnum = (from >> cfi->chipshift);
1399 ofs = from - (chipnum << cfi->chipshift);
1401 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1403 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1406 unsigned long thislen;
1408 if (chipnum >= cfi->numchips)
1411 /* We cannot point across chips that are virtually disjoint */
1413 last_end = cfi->chips[chipnum].start;
1414 else if (cfi->chips[chipnum].start != last_end)
1417 if ((len + ofs -1) >> cfi->chipshift)
1418 thislen = (1<<cfi->chipshift) - ofs;
1422 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1430 last_end += 1 << cfi->chipshift;
1436 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1438 struct map_info *map = mtd->priv;
1439 struct cfi_private *cfi = map->fldrv_priv;
1441 int chipnum, err = 0;
1443 /* Now unlock the chip(s) POINT state */
1445 /* ofs: offset within the first chip that the first read should start */
1446 chipnum = (from >> cfi->chipshift);
1447 ofs = from - (chipnum << cfi->chipshift);
1449 while (len && !err) {
1450 unsigned long thislen;
1451 struct flchip *chip;
1453 chip = &cfi->chips[chipnum];
1454 if (chipnum >= cfi->numchips)
1457 if ((len + ofs -1) >> cfi->chipshift)
1458 thislen = (1<<cfi->chipshift) - ofs;
1462 mutex_lock(&chip->mutex);
1463 if (chip->state == FL_POINT) {
1464 chip->ref_point_counter--;
1465 if(chip->ref_point_counter == 0)
1466 chip->state = FL_READY;
1468 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1472 put_chip(map, chip, chip->start);
1473 mutex_unlock(&chip->mutex);
1483 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1485 unsigned long cmd_addr;
1486 struct cfi_private *cfi = map->fldrv_priv;
1491 /* Ensure cmd read/writes are aligned. */
1492 cmd_addr = adr & ~(map_bankwidth(map)-1);
1494 mutex_lock(&chip->mutex);
1495 ret = get_chip(map, chip, cmd_addr, FL_READY);
1497 mutex_unlock(&chip->mutex);
1501 if (chip->state != FL_POINT && chip->state != FL_READY) {
1502 map_write(map, CMD(0xff), cmd_addr);
1504 chip->state = FL_READY;
1507 map_copy_from(map, buf, adr, len);
1509 put_chip(map, chip, cmd_addr);
1511 mutex_unlock(&chip->mutex);
1515 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1517 struct map_info *map = mtd->priv;
1518 struct cfi_private *cfi = map->fldrv_priv;
1523 /* ofs: offset within the first chip that the first read should start */
1524 chipnum = (from >> cfi->chipshift);
1525 ofs = from - (chipnum << cfi->chipshift);
1528 unsigned long thislen;
1530 if (chipnum >= cfi->numchips)
1533 if ((len + ofs -1) >> cfi->chipshift)
1534 thislen = (1<<cfi->chipshift) - ofs;
1538 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1552 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1553 unsigned long adr, map_word datum, int mode)
1555 struct cfi_private *cfi = map->fldrv_priv;
1556 map_word status, write_cmd;
1563 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1566 write_cmd = CMD(0xc0);
1572 mutex_lock(&chip->mutex);
1573 ret = get_chip(map, chip, adr, mode);
1575 mutex_unlock(&chip->mutex);
1579 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1581 xip_disable(map, chip, adr);
1582 map_write(map, write_cmd, adr);
1583 map_write(map, datum, adr);
1586 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1587 adr, map_bankwidth(map),
1588 chip->word_write_time,
1589 chip->word_write_time_max);
1591 xip_enable(map, chip, adr);
1592 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1596 /* check for errors */
1597 status = map_read(map, adr);
1598 if (map_word_bitsset(map, status, CMD(0x1a))) {
1599 unsigned long chipstatus = MERGESTATUS(status);
1602 map_write(map, CMD(0x50), adr);
1603 map_write(map, CMD(0x70), adr);
1604 xip_enable(map, chip, adr);
1606 if (chipstatus & 0x02) {
1608 } else if (chipstatus & 0x08) {
1609 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1612 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1619 xip_enable(map, chip, adr);
1620 out: DISABLE_VPP(map);
1621 put_chip(map, chip, adr);
1622 mutex_unlock(&chip->mutex);
1627 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1629 struct map_info *map = mtd->priv;
1630 struct cfi_private *cfi = map->fldrv_priv;
1635 chipnum = to >> cfi->chipshift;
1636 ofs = to - (chipnum << cfi->chipshift);
1638 /* If it's not bus-aligned, do the first byte write */
1639 if (ofs & (map_bankwidth(map)-1)) {
1640 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1641 int gap = ofs - bus_ofs;
1645 n = min_t(int, len, map_bankwidth(map)-gap);
1646 datum = map_word_ff(map);
1647 datum = map_word_load_partial(map, datum, buf, gap, n);
1649 ret = do_write_oneword(map, &cfi->chips[chipnum],
1650 bus_ofs, datum, FL_WRITING);
1659 if (ofs >> cfi->chipshift) {
1662 if (chipnum == cfi->numchips)
1667 while(len >= map_bankwidth(map)) {
1668 map_word datum = map_word_load(map, buf);
1670 ret = do_write_oneword(map, &cfi->chips[chipnum],
1671 ofs, datum, FL_WRITING);
1675 ofs += map_bankwidth(map);
1676 buf += map_bankwidth(map);
1677 (*retlen) += map_bankwidth(map);
1678 len -= map_bankwidth(map);
1680 if (ofs >> cfi->chipshift) {
1683 if (chipnum == cfi->numchips)
1688 if (len & (map_bankwidth(map)-1)) {
1691 datum = map_word_ff(map);
1692 datum = map_word_load_partial(map, datum, buf, 0, len);
1694 ret = do_write_oneword(map, &cfi->chips[chipnum],
1695 ofs, datum, FL_WRITING);
1706 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1707 unsigned long adr, const struct kvec **pvec,
1708 unsigned long *pvec_seek, int len)
1710 struct cfi_private *cfi = map->fldrv_priv;
1711 map_word status, write_cmd, datum;
1712 unsigned long cmd_adr;
1713 int ret, wbufsize, word_gap, words;
1714 const struct kvec *vec;
1715 unsigned long vec_seek;
1716 unsigned long initial_adr;
1717 int initial_len = len;
1719 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1722 cmd_adr = adr & ~(wbufsize-1);
1724 /* Sharp LH28F640BF chips need the first address for the
1725 * Page Buffer Program command. See Table 5 of
1726 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1727 if (is_LH28F640BF(cfi))
1730 /* Let's determine this according to the interleave only once */
1731 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1733 mutex_lock(&chip->mutex);
1734 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1736 mutex_unlock(&chip->mutex);
1740 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1742 xip_disable(map, chip, cmd_adr);
1744 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1745 [...], the device will not accept any more Write to Buffer commands".
1746 So we must check here and reset those bits if they're set. Otherwise
1747 we're just pissing in the wind */
1748 if (chip->state != FL_STATUS) {
1749 map_write(map, CMD(0x70), cmd_adr);
1750 chip->state = FL_STATUS;
1752 status = map_read(map, cmd_adr);
1753 if (map_word_bitsset(map, status, CMD(0x30))) {
1754 xip_enable(map, chip, cmd_adr);
1755 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1756 xip_disable(map, chip, cmd_adr);
1757 map_write(map, CMD(0x50), cmd_adr);
1758 map_write(map, CMD(0x70), cmd_adr);
1761 chip->state = FL_WRITING_TO_BUFFER;
1762 map_write(map, write_cmd, cmd_adr);
1763 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1765 /* Argh. Not ready for write to buffer */
1766 map_word Xstatus = map_read(map, cmd_adr);
1767 map_write(map, CMD(0x70), cmd_adr);
1768 chip->state = FL_STATUS;
1769 status = map_read(map, cmd_adr);
1770 map_write(map, CMD(0x50), cmd_adr);
1771 map_write(map, CMD(0x70), cmd_adr);
1772 xip_enable(map, chip, cmd_adr);
1773 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1774 map->name, Xstatus.x[0], status.x[0]);
1778 /* Figure out the number of words to write */
1779 word_gap = (-adr & (map_bankwidth(map)-1));
1780 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1784 word_gap = map_bankwidth(map) - word_gap;
1786 datum = map_word_ff(map);
1789 /* Write length of data to come */
1790 map_write(map, CMD(words), cmd_adr );
1794 vec_seek = *pvec_seek;
1796 int n = map_bankwidth(map) - word_gap;
1797 if (n > vec->iov_len - vec_seek)
1798 n = vec->iov_len - vec_seek;
1802 if (!word_gap && len < map_bankwidth(map))
1803 datum = map_word_ff(map);
1805 datum = map_word_load_partial(map, datum,
1806 vec->iov_base + vec_seek,
1811 if (!len || word_gap == map_bankwidth(map)) {
1812 map_write(map, datum, adr);
1813 adr += map_bankwidth(map);
1818 if (vec_seek == vec->iov_len) {
1824 *pvec_seek = vec_seek;
1827 map_write(map, CMD(0xd0), cmd_adr);
1828 chip->state = FL_WRITING;
1830 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1831 initial_adr, initial_len,
1832 chip->buffer_write_time,
1833 chip->buffer_write_time_max);
1835 map_write(map, CMD(0x70), cmd_adr);
1836 chip->state = FL_STATUS;
1837 xip_enable(map, chip, cmd_adr);
1838 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1842 /* check for errors */
1843 status = map_read(map, cmd_adr);
1844 if (map_word_bitsset(map, status, CMD(0x1a))) {
1845 unsigned long chipstatus = MERGESTATUS(status);
1848 map_write(map, CMD(0x50), cmd_adr);
1849 map_write(map, CMD(0x70), cmd_adr);
1850 xip_enable(map, chip, cmd_adr);
1852 if (chipstatus & 0x02) {
1854 } else if (chipstatus & 0x08) {
1855 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1858 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1865 xip_enable(map, chip, cmd_adr);
1866 out: DISABLE_VPP(map);
1867 put_chip(map, chip, cmd_adr);
1868 mutex_unlock(&chip->mutex);
1872 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1873 unsigned long count, loff_t to, size_t *retlen)
1875 struct map_info *map = mtd->priv;
1876 struct cfi_private *cfi = map->fldrv_priv;
1877 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1880 unsigned long ofs, vec_seek, i;
1883 for (i = 0; i < count; i++)
1884 len += vecs[i].iov_len;
1889 chipnum = to >> cfi->chipshift;
1890 ofs = to - (chipnum << cfi->chipshift);
1894 /* We must not cross write block boundaries */
1895 int size = wbufsize - (ofs & (wbufsize-1));
1899 ret = do_write_buffer(map, &cfi->chips[chipnum],
1900 ofs, &vecs, &vec_seek, size);
1908 if (ofs >> cfi->chipshift) {
1911 if (chipnum == cfi->numchips)
1915 /* Be nice and reschedule with the chip in a usable state for other
1924 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1925 size_t len, size_t *retlen, const u_char *buf)
1929 vec.iov_base = (void *) buf;
1932 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1935 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1936 unsigned long adr, int len, void *thunk)
1938 struct cfi_private *cfi = map->fldrv_priv;
1946 mutex_lock(&chip->mutex);
1947 ret = get_chip(map, chip, adr, FL_ERASING);
1949 mutex_unlock(&chip->mutex);
1953 XIP_INVAL_CACHED_RANGE(map, adr, len);
1955 xip_disable(map, chip, adr);
1957 /* Clear the status register first */
1958 map_write(map, CMD(0x50), adr);
1961 map_write(map, CMD(0x20), adr);
1962 map_write(map, CMD(0xD0), adr);
1963 chip->state = FL_ERASING;
1964 chip->erase_suspended = 0;
1965 chip->in_progress_block_addr = adr;
1966 chip->in_progress_block_mask = ~(len - 1);
1968 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1971 chip->erase_time_max);
1973 map_write(map, CMD(0x70), adr);
1974 chip->state = FL_STATUS;
1975 xip_enable(map, chip, adr);
1976 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1980 /* We've broken this before. It doesn't hurt to be safe */
1981 map_write(map, CMD(0x70), adr);
1982 chip->state = FL_STATUS;
1983 status = map_read(map, adr);
1985 /* check for errors */
1986 if (map_word_bitsset(map, status, CMD(0x3a))) {
1987 unsigned long chipstatus = MERGESTATUS(status);
1989 /* Reset the error bits */
1990 map_write(map, CMD(0x50), adr);
1991 map_write(map, CMD(0x70), adr);
1992 xip_enable(map, chip, adr);
1994 if ((chipstatus & 0x30) == 0x30) {
1995 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1997 } else if (chipstatus & 0x02) {
1998 /* Protection bit set */
2000 } else if (chipstatus & 0x8) {
2002 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2004 } else if (chipstatus & 0x20 && retries--) {
2005 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2007 put_chip(map, chip, adr);
2008 mutex_unlock(&chip->mutex);
2011 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2018 xip_enable(map, chip, adr);
2019 out: DISABLE_VPP(map);
2020 put_chip(map, chip, adr);
2021 mutex_unlock(&chip->mutex);
2025 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2027 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2031 static void cfi_intelext_sync (struct mtd_info *mtd)
2033 struct map_info *map = mtd->priv;
2034 struct cfi_private *cfi = map->fldrv_priv;
2036 struct flchip *chip;
2039 for (i=0; !ret && i<cfi->numchips; i++) {
2040 chip = &cfi->chips[i];
2042 mutex_lock(&chip->mutex);
2043 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2046 chip->oldstate = chip->state;
2047 chip->state = FL_SYNCING;
2048 /* No need to wake_up() on this state change -
2049 * as the whole point is that nobody can do anything
2050 * with the chip now anyway.
2053 mutex_unlock(&chip->mutex);
2056 /* Unlock the chips again */
2058 for (i--; i >=0; i--) {
2059 chip = &cfi->chips[i];
2061 mutex_lock(&chip->mutex);
2063 if (chip->state == FL_SYNCING) {
2064 chip->state = chip->oldstate;
2065 chip->oldstate = FL_READY;
2068 mutex_unlock(&chip->mutex);
2072 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2073 struct flchip *chip,
2075 int len, void *thunk)
2077 struct cfi_private *cfi = map->fldrv_priv;
2078 int status, ofs_factor = cfi->interleave * cfi->device_type;
2081 xip_disable(map, chip, adr+(2*ofs_factor));
2082 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2083 chip->state = FL_JEDEC_QUERY;
2084 status = cfi_read_query(map, adr+(2*ofs_factor));
2085 xip_enable(map, chip, 0);
2089 #ifdef DEBUG_LOCK_BITS
2090 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2091 struct flchip *chip,
2093 int len, void *thunk)
2095 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2096 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2101 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2102 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2104 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2105 unsigned long adr, int len, void *thunk)
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2114 mutex_lock(&chip->mutex);
2115 ret = get_chip(map, chip, adr, FL_LOCKING);
2117 mutex_unlock(&chip->mutex);
2122 xip_disable(map, chip, adr);
2124 map_write(map, CMD(0x60), adr);
2125 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2126 map_write(map, CMD(0x01), adr);
2127 chip->state = FL_LOCKING;
2128 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2129 map_write(map, CMD(0xD0), adr);
2130 chip->state = FL_UNLOCKING;
2135 * If Instant Individual Block Locking supported then no need
2139 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2140 * lets use a max of 1.5 seconds (1500ms) as timeout.
2142 * See "Clear Block Lock-Bits Time" on page 40 in
2143 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2144 * from February 2003
2146 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2148 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2150 map_write(map, CMD(0x70), adr);
2151 chip->state = FL_STATUS;
2152 xip_enable(map, chip, adr);
2153 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2157 xip_enable(map, chip, adr);
2158 out: DISABLE_VPP(map);
2159 put_chip(map, chip, adr);
2160 mutex_unlock(&chip->mutex);
2164 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2168 #ifdef DEBUG_LOCK_BITS
2169 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2170 __func__, ofs, len);
2171 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2175 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2176 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2178 #ifdef DEBUG_LOCK_BITS
2179 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2181 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2188 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2192 #ifdef DEBUG_LOCK_BITS
2193 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2194 __func__, ofs, len);
2195 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2199 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2200 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2202 #ifdef DEBUG_LOCK_BITS
2203 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2205 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2212 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2215 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2216 ofs, len, NULL) ? 1 : 0;
2219 #ifdef CONFIG_MTD_OTP
2221 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2222 u_long data_offset, u_char *buf, u_int size,
2223 u_long prot_offset, u_int groupno, u_int groupsize);
2226 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2227 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2229 struct cfi_private *cfi = map->fldrv_priv;
2232 mutex_lock(&chip->mutex);
2233 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2235 mutex_unlock(&chip->mutex);
2239 /* let's ensure we're not reading back cached data from array mode */
2240 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2242 xip_disable(map, chip, chip->start);
2243 if (chip->state != FL_JEDEC_QUERY) {
2244 map_write(map, CMD(0x90), chip->start);
2245 chip->state = FL_JEDEC_QUERY;
2247 map_copy_from(map, buf, chip->start + offset, size);
2248 xip_enable(map, chip, chip->start);
2250 /* then ensure we don't keep OTP data in the cache */
2251 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2253 put_chip(map, chip, chip->start);
2254 mutex_unlock(&chip->mutex);
2259 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2260 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2265 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2266 int gap = offset - bus_ofs;
2267 int n = min_t(int, size, map_bankwidth(map)-gap);
2268 map_word datum = map_word_ff(map);
2270 datum = map_word_load_partial(map, datum, buf, gap, n);
2271 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2284 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2285 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2287 struct cfi_private *cfi = map->fldrv_priv;
2290 /* make sure area matches group boundaries */
2294 datum = map_word_ff(map);
2295 datum = map_word_clr(map, datum, CMD(1 << grpno));
2296 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2299 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2300 size_t *retlen, u_char *buf,
2301 otp_op_t action, int user_regs)
2303 struct map_info *map = mtd->priv;
2304 struct cfi_private *cfi = map->fldrv_priv;
2305 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2306 struct flchip *chip;
2307 struct cfi_intelext_otpinfo *otp;
2308 u_long devsize, reg_prot_offset, data_offset;
2309 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2310 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2315 /* Check that we actually have some OTP registers */
2316 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2319 /* we need real chips here not virtual ones */
2320 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2321 chip_step = devsize >> cfi->chipshift;
2324 /* Some chips have OTP located in the _top_ partition only.
2325 For example: Intel 28F256L18T (T means top-parameter device) */
2326 if (cfi->mfr == CFI_MFR_INTEL) {
2331 chip_num = chip_step - 1;
2335 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2336 chip = &cfi->chips[chip_num];
2337 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2339 /* first OTP region */
2341 reg_prot_offset = extp->ProtRegAddr;
2342 reg_fact_groups = 1;
2343 reg_fact_size = 1 << extp->FactProtRegSize;
2344 reg_user_groups = 1;
2345 reg_user_size = 1 << extp->UserProtRegSize;
2348 /* flash geometry fixup */
2349 data_offset = reg_prot_offset + 1;
2350 data_offset *= cfi->interleave * cfi->device_type;
2351 reg_prot_offset *= cfi->interleave * cfi->device_type;
2352 reg_fact_size *= cfi->interleave;
2353 reg_user_size *= cfi->interleave;
2356 groups = reg_user_groups;
2357 groupsize = reg_user_size;
2358 /* skip over factory reg area */
2359 groupno = reg_fact_groups;
2360 data_offset += reg_fact_groups * reg_fact_size;
2362 groups = reg_fact_groups;
2363 groupsize = reg_fact_size;
2367 while (len > 0 && groups > 0) {
2370 * Special case: if action is NULL
2371 * we fill buf with otp_info records.
2373 struct otp_info *otpinfo;
2375 len -= sizeof(struct otp_info);
2378 ret = do_otp_read(map, chip,
2380 (u_char *)&lockword,
2385 otpinfo = (struct otp_info *)buf;
2386 otpinfo->start = from;
2387 otpinfo->length = groupsize;
2389 !map_word_bitsset(map, lockword,
2392 buf += sizeof(*otpinfo);
2393 *retlen += sizeof(*otpinfo);
2394 } else if (from >= groupsize) {
2396 data_offset += groupsize;
2398 int size = groupsize;
2399 data_offset += from;
2404 ret = action(map, chip, data_offset,
2405 buf, size, reg_prot_offset,
2406 groupno, groupsize);
2412 data_offset += size;
2418 /* next OTP region */
2419 if (++field == extp->NumProtectionFields)
2421 reg_prot_offset = otp->ProtRegAddr;
2422 reg_fact_groups = otp->FactGroups;
2423 reg_fact_size = 1 << otp->FactProtRegSize;
2424 reg_user_groups = otp->UserGroups;
2425 reg_user_size = 1 << otp->UserProtRegSize;
2433 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2434 size_t len, size_t *retlen,
2437 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2438 buf, do_otp_read, 0);
2441 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2442 size_t len, size_t *retlen,
2445 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2446 buf, do_otp_read, 1);
2449 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2450 size_t len, size_t *retlen,
2453 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2454 (u_char *)buf, do_otp_write, 1);
2457 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2458 loff_t from, size_t len)
2461 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2462 NULL, do_otp_lock, 1);
2465 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2466 size_t *retlen, struct otp_info *buf)
2469 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2473 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2474 size_t *retlen, struct otp_info *buf)
2476 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2482 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2484 struct mtd_erase_region_info *region;
2485 int block, status, i;
2489 for (i = 0; i < mtd->numeraseregions; i++) {
2490 region = &mtd->eraseregions[i];
2491 if (!region->lockmap)
2494 for (block = 0; block < region->numblocks; block++){
2495 len = region->erasesize;
2496 adr = region->offset + block * len;
2498 status = cfi_varsize_frob(mtd,
2499 do_getlockstatus_oneblock, adr, len, NULL);
2501 set_bit(block, region->lockmap);
2503 clear_bit(block, region->lockmap);
2508 static int cfi_intelext_suspend(struct mtd_info *mtd)
2510 struct map_info *map = mtd->priv;
2511 struct cfi_private *cfi = map->fldrv_priv;
2512 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2514 struct flchip *chip;
2517 if ((mtd->flags & MTD_POWERUP_LOCK)
2518 && extp && (extp->FeatureSupport & (1 << 5)))
2519 cfi_intelext_save_locks(mtd);
2521 for (i=0; !ret && i<cfi->numchips; i++) {
2522 chip = &cfi->chips[i];
2524 mutex_lock(&chip->mutex);
2526 switch (chip->state) {
2530 case FL_JEDEC_QUERY:
2531 if (chip->oldstate == FL_READY) {
2532 /* place the chip in a known state before suspend */
2533 map_write(map, CMD(0xFF), cfi->chips[i].start);
2534 chip->oldstate = chip->state;
2535 chip->state = FL_PM_SUSPENDED;
2536 /* No need to wake_up() on this state change -
2537 * as the whole point is that nobody can do anything
2538 * with the chip now anyway.
2541 /* There seems to be an operation pending. We must wait for it. */
2542 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2547 /* Should we actually wait? Once upon a time these routines weren't
2548 allowed to. Or should we return -EAGAIN, because the upper layers
2549 ought to have already shut down anything which was using the device
2550 anyway? The latter for now. */
2551 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2554 case FL_PM_SUSPENDED:
2557 mutex_unlock(&chip->mutex);
2560 /* Unlock the chips again */
2563 for (i--; i >=0; i--) {
2564 chip = &cfi->chips[i];
2566 mutex_lock(&chip->mutex);
2568 if (chip->state == FL_PM_SUSPENDED) {
2569 /* No need to force it into a known state here,
2570 because we're returning failure, and it didn't
2572 chip->state = chip->oldstate;
2573 chip->oldstate = FL_READY;
2576 mutex_unlock(&chip->mutex);
2583 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2585 struct mtd_erase_region_info *region;
2590 for (i = 0; i < mtd->numeraseregions; i++) {
2591 region = &mtd->eraseregions[i];
2592 if (!region->lockmap)
2595 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2596 len = region->erasesize;
2597 adr = region->offset + block * len;
2598 cfi_intelext_unlock(mtd, adr, len);
2603 static void cfi_intelext_resume(struct mtd_info *mtd)
2605 struct map_info *map = mtd->priv;
2606 struct cfi_private *cfi = map->fldrv_priv;
2607 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2609 struct flchip *chip;
2611 for (i=0; i<cfi->numchips; i++) {
2613 chip = &cfi->chips[i];
2615 mutex_lock(&chip->mutex);
2617 /* Go to known state. Chip may have been power cycled */
2618 if (chip->state == FL_PM_SUSPENDED) {
2619 /* Refresh LH28F640BF Partition Config. Register */
2620 fixup_LH28F640BF(mtd);
2621 map_write(map, CMD(0xFF), cfi->chips[i].start);
2622 chip->oldstate = chip->state = FL_READY;
2626 mutex_unlock(&chip->mutex);
2629 if ((mtd->flags & MTD_POWERUP_LOCK)
2630 && extp && (extp->FeatureSupport & (1 << 5)))
2631 cfi_intelext_restore_locks(mtd);
2634 static int cfi_intelext_reset(struct mtd_info *mtd)
2636 struct map_info *map = mtd->priv;
2637 struct cfi_private *cfi = map->fldrv_priv;
2640 for (i=0; i < cfi->numchips; i++) {
2641 struct flchip *chip = &cfi->chips[i];
2643 /* force the completion of any ongoing operation
2644 and switch to array mode so any bootloader in
2645 flash is accessible for soft reboot. */
2646 mutex_lock(&chip->mutex);
2647 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2649 map_write(map, CMD(0xff), chip->start);
2650 chip->state = FL_SHUTDOWN;
2651 put_chip(map, chip, chip->start);
2653 mutex_unlock(&chip->mutex);
2659 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2662 struct mtd_info *mtd;
2664 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2665 cfi_intelext_reset(mtd);
2669 static void cfi_intelext_destroy(struct mtd_info *mtd)
2671 struct map_info *map = mtd->priv;
2672 struct cfi_private *cfi = map->fldrv_priv;
2673 struct mtd_erase_region_info *region;
2675 cfi_intelext_reset(mtd);
2676 unregister_reboot_notifier(&mtd->reboot_notifier);
2677 kfree(cfi->cmdset_priv);
2679 kfree(cfi->chips[0].priv);
2681 for (i = 0; i < mtd->numeraseregions; i++) {
2682 region = &mtd->eraseregions[i];
2683 kfree(region->lockmap);
2685 kfree(mtd->eraseregions);
2688 MODULE_LICENSE("GPL");
2689 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2690 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2691 MODULE_ALIAS("cfi_cmdset_0003");
2692 MODULE_ALIAS("cfi_cmdset_0200");