2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 /* STMicroelectronics chips */
49 #define M50LPW080 0x002F
50 #define M50FLW080A 0x0080
51 #define M50FLW080B 0x0081
53 #define AT49BV640D 0x02de
54 #define AT49BV640DT 0x02db
56 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
60 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
61 static void cfi_intelext_sync (struct mtd_info *);
62 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
72 struct otp_info *, size_t);
73 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
74 struct otp_info *, size_t);
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80 static void cfi_intelext_destroy(struct mtd_info *);
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88 size_t *retlen, void **virt, resource_size_t *phys);
89 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
99 * *********** SETUP AND PROBE BITS ***********
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103 .probe = NULL, /* Not usable directly */
104 .destroy = cfi_intelext_destroy,
105 .name = "cfi_cmdset_0001",
106 .module = THIS_MODULE
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
116 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
118 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129 for (i=11; i<32; i++) {
130 if (extp->FeatureSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
134 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136 for (i=1; i<8; i++) {
137 if (extp->SuspendCmdSupport & (1<<i))
138 printk(" - Unknown Bit %X: supported\n", i);
141 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144 for (i=2; i<3; i++) {
145 if (extp->BlkStatusRegMask & (1<<i))
146 printk(" - Unknown Bit %X Active: yes\n",i);
148 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150 for (i=6; i<16; i++) {
151 if (extp->BlkStatusRegMask & (1<<i))
152 printk(" - Unknown Bit %X Active: yes\n",i);
155 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157 if (extp->VppOptimal)
158 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 struct map_info *map = mtd->priv;
167 struct cfi_private *cfi = map->fldrv_priv;
168 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169 struct cfi_pri_atmel atmel_pri;
170 uint32_t features = 0;
172 /* Reverse byteswapping */
173 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182 if (atmel_pri.Features & 0x01) /* chip erase supported */
184 if (atmel_pri.Features & 0x02) /* erase suspend supported */
186 if (atmel_pri.Features & 0x04) /* program suspend supported */
188 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190 if (atmel_pri.Features & 0x20) /* page mode read supported */
192 if (atmel_pri.Features & 0x40) /* queued erase supported */
194 if (atmel_pri.Features & 0x80) /* Protection bits supported */
197 extp->FeatureSupport = features;
199 /* burst write mode not supported */
200 cfi->cfiq->BufWriteTimeoutTyp = 0;
201 cfi->cfiq->BufWriteTimeoutMax = 0;
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210 cfip->FeatureSupport |= (1 << 5);
211 mtd->flags |= MTD_POWERUP_LOCK;
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223 "erase on write disabled.\n");
224 extp->SuspendCmdSupport &= ~1;
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 struct map_info *map = mtd->priv;
232 struct cfi_private *cfi = map->fldrv_priv;
233 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235 if (cfip && (cfip->FeatureSupport&4)) {
236 cfip->FeatureSupport &= ~4;
237 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
242 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 struct map_info *map = mtd->priv;
245 struct cfi_private *cfi = map->fldrv_priv;
247 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
248 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
251 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 struct map_info *map = mtd->priv;
254 struct cfi_private *cfi = map->fldrv_priv;
256 /* Note this is done after the region info is endian swapped */
257 cfi->cfiq->EraseRegionInfo[1] =
258 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
261 static void fixup_use_point(struct mtd_info *mtd)
263 struct map_info *map = mtd->priv;
264 if (!mtd->_point && map_is_linear(map)) {
265 mtd->_point = cfi_intelext_point;
266 mtd->_unpoint = cfi_intelext_unpoint;
270 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 struct map_info *map = mtd->priv;
273 struct cfi_private *cfi = map->fldrv_priv;
274 if (cfi->cfiq->BufWriteTimeoutTyp) {
275 printk(KERN_INFO "Using buffer write method\n" );
276 mtd->_write = cfi_intelext_write_buffers;
277 mtd->_writev = cfi_intelext_writev;
282 * Some chips power-up with all sectors locked by default.
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 struct map_info *map = mtd->priv;
287 struct cfi_private *cfi = map->fldrv_priv;
288 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290 if (cfip->FeatureSupport&32) {
291 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292 mtd->flags |= MTD_POWERUP_LOCK;
296 static struct cfi_fixup cfi_fixup_table[] = {
297 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
298 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
299 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #if !FORCE_WORD_WRITE
307 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
315 static struct cfi_fixup jedec_fixup_table[] = {
316 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
317 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
318 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
319 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
320 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
323 static struct cfi_fixup fixup_table[] = {
324 /* The CFI vendor ids and the JEDEC vendor IDs appear
325 * to be common. It is like the devices id's are as
326 * well. This table is to pick all cases where
327 * we know that is the case.
329 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334 struct cfi_pri_intelext *extp)
336 if (cfi->mfr == CFI_MFR_INTEL &&
337 cfi->id == PF38F4476 && extp->MinorVersion == '3')
338 extp->MinorVersion = '1';
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
344 struct cfi_private *cfi = map->fldrv_priv;
345 struct cfi_pri_intelext *extp;
346 unsigned int extra_size = 0;
347 unsigned int extp_size = sizeof(*extp);
350 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
354 cfi_fixup_major_minor(cfi, extp);
356 if (extp->MajorVersion != '1' ||
357 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
359 "version %c.%c.\n", extp->MajorVersion,
365 /* Do some byteswapping if necessary */
366 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370 if (extp->MinorVersion >= '0') {
373 /* Protection Register info */
374 extra_size += (extp->NumProtectionFields - 1) *
375 sizeof(struct cfi_intelext_otpinfo);
378 if (extp->MinorVersion >= '1') {
379 /* Burst Read info */
381 if (extp_size < sizeof(*extp) + extra_size)
383 extra_size += extp->extra[extra_size - 1];
386 if (extp->MinorVersion >= '3') {
389 /* Number of hardware-partitions */
391 if (extp_size < sizeof(*extp) + extra_size)
393 nb_parts = extp->extra[extra_size - 1];
395 /* skip the sizeof(partregion) field in CFI 1.4 */
396 if (extp->MinorVersion >= '4')
399 for (i = 0; i < nb_parts; i++) {
400 struct cfi_intelext_regioninfo *rinfo;
401 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402 extra_size += sizeof(*rinfo);
403 if (extp_size < sizeof(*extp) + extra_size)
405 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406 extra_size += (rinfo->NumBlockTypes - 1)
407 * sizeof(struct cfi_intelext_blockinfo);
410 if (extp->MinorVersion >= '4')
411 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413 if (extp_size < sizeof(*extp) + extra_size) {
415 extp_size = sizeof(*extp) + extra_size;
417 if (extp_size > 4096) {
419 "%s: cfi_pri_intelext is too fat\n",
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 struct cfi_private *cfi = map->fldrv_priv;
433 struct mtd_info *mtd;
436 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
442 mtd->type = MTD_NORFLASH;
444 /* Fill in the default mtd operations */
445 mtd->_erase = cfi_intelext_erase_varsize;
446 mtd->_read = cfi_intelext_read;
447 mtd->_write = cfi_intelext_write_words;
448 mtd->_sync = cfi_intelext_sync;
449 mtd->_lock = cfi_intelext_lock;
450 mtd->_unlock = cfi_intelext_unlock;
451 mtd->_is_locked = cfi_intelext_is_locked;
452 mtd->_suspend = cfi_intelext_suspend;
453 mtd->_resume = cfi_intelext_resume;
454 mtd->flags = MTD_CAP_NORFLASH;
455 mtd->name = map->name;
457 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
459 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
461 if (cfi->cfi_mode == CFI_MODE_CFI) {
463 * It's a real CFI chip, not one for which the probe
464 * routine faked a CFI structure. So we read the feature
467 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
468 struct cfi_pri_intelext *extp;
470 extp = read_pri_intelext(map, adr);
476 /* Install our own private info structure */
477 cfi->cmdset_priv = extp;
479 cfi_fixup(mtd, cfi_fixup_table);
481 #ifdef DEBUG_CFI_FEATURES
482 /* Tell the user about it in lots of lovely detail */
483 cfi_tell_features(extp);
486 if(extp->SuspendCmdSupport & 1) {
487 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
490 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
491 /* Apply jedec specific fixups */
492 cfi_fixup(mtd, jedec_fixup_table);
494 /* Apply generic fixups */
495 cfi_fixup(mtd, fixup_table);
497 for (i=0; i< cfi->numchips; i++) {
498 if (cfi->cfiq->WordWriteTimeoutTyp)
499 cfi->chips[i].word_write_time =
500 1<<cfi->cfiq->WordWriteTimeoutTyp;
502 cfi->chips[i].word_write_time = 50000;
504 if (cfi->cfiq->BufWriteTimeoutTyp)
505 cfi->chips[i].buffer_write_time =
506 1<<cfi->cfiq->BufWriteTimeoutTyp;
507 /* No default; if it isn't specified, we won't use it */
509 if (cfi->cfiq->BlockEraseTimeoutTyp)
510 cfi->chips[i].erase_time =
511 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
513 cfi->chips[i].erase_time = 2000000;
515 if (cfi->cfiq->WordWriteTimeoutTyp &&
516 cfi->cfiq->WordWriteTimeoutMax)
517 cfi->chips[i].word_write_time_max =
518 1<<(cfi->cfiq->WordWriteTimeoutTyp +
519 cfi->cfiq->WordWriteTimeoutMax);
521 cfi->chips[i].word_write_time_max = 50000 * 8;
523 if (cfi->cfiq->BufWriteTimeoutTyp &&
524 cfi->cfiq->BufWriteTimeoutMax)
525 cfi->chips[i].buffer_write_time_max =
526 1<<(cfi->cfiq->BufWriteTimeoutTyp +
527 cfi->cfiq->BufWriteTimeoutMax);
529 if (cfi->cfiq->BlockEraseTimeoutTyp &&
530 cfi->cfiq->BlockEraseTimeoutMax)
531 cfi->chips[i].erase_time_max =
532 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
533 cfi->cfiq->BlockEraseTimeoutMax);
535 cfi->chips[i].erase_time_max = 2000000 * 8;
537 cfi->chips[i].ref_point_counter = 0;
538 init_waitqueue_head(&(cfi->chips[i].wq));
541 map->fldrv = &cfi_intelext_chipdrv;
543 return cfi_intelext_setup(mtd);
545 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
546 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
551 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
553 struct map_info *map = mtd->priv;
554 struct cfi_private *cfi = map->fldrv_priv;
555 unsigned long offset = 0;
557 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
561 mtd->size = devsize * cfi->numchips;
563 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
564 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
565 * mtd->numeraseregions, GFP_KERNEL);
566 if (!mtd->eraseregions) {
567 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
571 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
572 unsigned long ernum, ersize;
573 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
574 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
576 if (mtd->erasesize < ersize) {
577 mtd->erasesize = ersize;
579 for (j=0; j<cfi->numchips; j++) {
580 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
581 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
582 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
583 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
585 offset += (ersize * ernum);
588 if (offset != devsize) {
590 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
594 for (i=0; i<mtd->numeraseregions;i++){
595 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
596 i,(unsigned long long)mtd->eraseregions[i].offset,
597 mtd->eraseregions[i].erasesize,
598 mtd->eraseregions[i].numblocks);
601 #ifdef CONFIG_MTD_OTP
602 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
603 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
604 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
605 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
606 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
607 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
610 /* This function has the potential to distort the reality
611 a bit and therefore should be called last. */
612 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
615 __module_get(THIS_MODULE);
616 register_reboot_notifier(&mtd->reboot_notifier);
620 kfree(mtd->eraseregions);
622 kfree(cfi->cmdset_priv);
626 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
627 struct cfi_private **pcfi)
629 struct map_info *map = mtd->priv;
630 struct cfi_private *cfi = *pcfi;
631 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
634 * Probing of multi-partition flash chips.
636 * To support multiple partitions when available, we simply arrange
637 * for each of them to have their own flchip structure even if they
638 * are on the same physical chip. This means completely recreating
639 * a new cfi_private structure right here which is a blatent code
640 * layering violation, but this is still the least intrusive
641 * arrangement at this point. This can be rearranged in the future
642 * if someone feels motivated enough. --nico
644 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
645 && extp->FeatureSupport & (1 << 9)) {
646 struct cfi_private *newcfi;
648 struct flchip_shared *shared;
649 int offs, numregions, numparts, partshift, numvirtchips, i, j;
651 /* Protection Register info */
652 offs = (extp->NumProtectionFields - 1) *
653 sizeof(struct cfi_intelext_otpinfo);
655 /* Burst Read info */
656 offs += extp->extra[offs+1]+2;
658 /* Number of partition regions */
659 numregions = extp->extra[offs];
662 /* skip the sizeof(partregion) field in CFI 1.4 */
663 if (extp->MinorVersion >= '4')
666 /* Number of hardware partitions */
668 for (i = 0; i < numregions; i++) {
669 struct cfi_intelext_regioninfo *rinfo;
670 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
671 numparts += rinfo->NumIdentPartitions;
672 offs += sizeof(*rinfo)
673 + (rinfo->NumBlockTypes - 1) *
674 sizeof(struct cfi_intelext_blockinfo);
680 /* Programming Region info */
681 if (extp->MinorVersion >= '4') {
682 struct cfi_intelext_programming_regioninfo *prinfo;
683 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
684 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
685 mtd->flags &= ~MTD_BIT_WRITEABLE;
686 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
687 map->name, mtd->writesize,
688 cfi->interleave * prinfo->ControlValid,
689 cfi->interleave * prinfo->ControlInvalid);
693 * All functions below currently rely on all chips having
694 * the same geometry so we'll just assume that all hardware
695 * partitions are of the same size too.
697 partshift = cfi->chipshift - __ffs(numparts);
699 if ((1 << partshift) < mtd->erasesize) {
701 "%s: bad number of hw partitions (%d)\n",
706 numvirtchips = cfi->numchips * numparts;
707 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
710 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
715 memcpy(newcfi, cfi, sizeof(struct cfi_private));
716 newcfi->numchips = numvirtchips;
717 newcfi->chipshift = partshift;
719 chip = &newcfi->chips[0];
720 for (i = 0; i < cfi->numchips; i++) {
721 shared[i].writing = shared[i].erasing = NULL;
722 mutex_init(&shared[i].lock);
723 for (j = 0; j < numparts; j++) {
724 *chip = cfi->chips[i];
725 chip->start += j << partshift;
726 chip->priv = &shared[i];
727 /* those should be reset too since
728 they create memory references. */
729 init_waitqueue_head(&chip->wq);
730 mutex_init(&chip->mutex);
735 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
736 "--> %d partitions of %d KiB\n",
737 map->name, cfi->numchips, cfi->interleave,
738 newcfi->numchips, 1<<(newcfi->chipshift-10));
740 map->fldrv_priv = newcfi;
749 * *********** CHIP ACCESS FUNCTIONS ***********
751 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 DECLARE_WAITQUEUE(wait, current);
754 struct cfi_private *cfi = map->fldrv_priv;
755 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
756 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
757 unsigned long timeo = jiffies + HZ;
759 /* Prevent setting state FL_SYNCING for chip in suspended state. */
760 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
763 switch (chip->state) {
767 status = map_read(map, adr);
768 if (map_word_andequal(map, status, status_OK, status_OK))
771 /* At this point we're fine with write operations
772 in other partitions as they don't conflict. */
773 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
776 mutex_unlock(&chip->mutex);
778 mutex_lock(&chip->mutex);
779 /* Someone else might have been playing with it. */
790 !(cfip->FeatureSupport & 2) ||
791 !(mode == FL_READY || mode == FL_POINT ||
792 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
797 map_write(map, CMD(0xB0), adr);
799 /* If the flash has finished erasing, then 'erase suspend'
800 * appears to make some (28F320) flash devices switch to
801 * 'read' mode. Make sure that we switch to 'read status'
802 * mode so we get the right data. --rmk
804 map_write(map, CMD(0x70), adr);
805 chip->oldstate = FL_ERASING;
806 chip->state = FL_ERASE_SUSPENDING;
807 chip->erase_suspended = 1;
809 status = map_read(map, adr);
810 if (map_word_andequal(map, status, status_OK, status_OK))
813 if (time_after(jiffies, timeo)) {
814 /* Urgh. Resume and pretend we weren't here.
815 * Make sure we're in 'read status' mode if it had finished */
816 put_chip(map, chip, adr);
817 printk(KERN_ERR "%s: Chip not ready after erase "
818 "suspended: status = 0x%lx\n", map->name, status.x[0]);
822 mutex_unlock(&chip->mutex);
824 mutex_lock(&chip->mutex);
825 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
826 So we can just loop here. */
828 chip->state = FL_STATUS;
831 case FL_XIP_WHILE_ERASING:
832 if (mode != FL_READY && mode != FL_POINT &&
833 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
835 chip->oldstate = chip->state;
836 chip->state = FL_READY;
840 /* The machine is rebooting now,so no one can get chip anymore */
843 /* Only if there's no operation suspended... */
844 if (mode == FL_READY && chip->oldstate == FL_READY)
849 set_current_state(TASK_UNINTERRUPTIBLE);
850 add_wait_queue(&chip->wq, &wait);
851 mutex_unlock(&chip->mutex);
853 remove_wait_queue(&chip->wq, &wait);
854 mutex_lock(&chip->mutex);
859 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
862 DECLARE_WAITQUEUE(wait, current);
866 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
867 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
869 * OK. We have possibility for contention on the write/erase
870 * operations which are global to the real chip and not per
871 * partition. So let's fight it over in the partition which
872 * currently has authority on the operation.
874 * The rules are as follows:
876 * - any write operation must own shared->writing.
878 * - any erase operation must own _both_ shared->writing and
881 * - contention arbitration is handled in the owner's context.
883 * The 'shared' struct can be read and/or written only when
886 struct flchip_shared *shared = chip->priv;
887 struct flchip *contender;
888 mutex_lock(&shared->lock);
889 contender = shared->writing;
890 if (contender && contender != chip) {
892 * The engine to perform desired operation on this
893 * partition is already in use by someone else.
894 * Let's fight over it in the context of the chip
895 * currently using it. If it is possible to suspend,
896 * that other partition will do just that, otherwise
897 * it'll happily send us to sleep. In any case, when
898 * get_chip returns success we're clear to go ahead.
900 ret = mutex_trylock(&contender->mutex);
901 mutex_unlock(&shared->lock);
904 mutex_unlock(&chip->mutex);
905 ret = chip_ready(map, contender, contender->start, mode);
906 mutex_lock(&chip->mutex);
908 if (ret == -EAGAIN) {
909 mutex_unlock(&contender->mutex);
913 mutex_unlock(&contender->mutex);
916 mutex_lock(&shared->lock);
918 /* We should not own chip if it is already
919 * in FL_SYNCING state. Put contender and retry. */
920 if (chip->state == FL_SYNCING) {
921 put_chip(map, contender, contender->start);
922 mutex_unlock(&contender->mutex);
925 mutex_unlock(&contender->mutex);
928 /* Check if we already have suspended erase
929 * on this chip. Sleep. */
930 if (mode == FL_ERASING && shared->erasing
931 && shared->erasing->oldstate == FL_ERASING) {
932 mutex_unlock(&shared->lock);
933 set_current_state(TASK_UNINTERRUPTIBLE);
934 add_wait_queue(&chip->wq, &wait);
935 mutex_unlock(&chip->mutex);
937 remove_wait_queue(&chip->wq, &wait);
938 mutex_lock(&chip->mutex);
943 shared->writing = chip;
944 if (mode == FL_ERASING)
945 shared->erasing = chip;
946 mutex_unlock(&shared->lock);
948 ret = chip_ready(map, chip, adr, mode);
955 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
957 struct cfi_private *cfi = map->fldrv_priv;
960 struct flchip_shared *shared = chip->priv;
961 mutex_lock(&shared->lock);
962 if (shared->writing == chip && chip->oldstate == FL_READY) {
963 /* We own the ability to write, but we're done */
964 shared->writing = shared->erasing;
965 if (shared->writing && shared->writing != chip) {
966 /* give back ownership to who we loaned it from */
967 struct flchip *loaner = shared->writing;
968 mutex_lock(&loaner->mutex);
969 mutex_unlock(&shared->lock);
970 mutex_unlock(&chip->mutex);
971 put_chip(map, loaner, loaner->start);
972 mutex_lock(&chip->mutex);
973 mutex_unlock(&loaner->mutex);
977 shared->erasing = NULL;
978 shared->writing = NULL;
979 } else if (shared->erasing == chip && shared->writing != chip) {
981 * We own the ability to erase without the ability
982 * to write, which means the erase was suspended
983 * and some other partition is currently writing.
984 * Don't let the switch below mess things up since
985 * we don't have ownership to resume anything.
987 mutex_unlock(&shared->lock);
991 mutex_unlock(&shared->lock);
994 switch(chip->oldstate) {
996 /* What if one interleaved chip has finished and the
997 other hasn't? The old code would leave the finished
998 one in READY mode. That's bad, and caused -EROFS
999 errors to be returned from do_erase_oneblock because
1000 that's the only bit it checked for at the time.
1001 As the state machine appears to explicitly allow
1002 sending the 0x70 (Read Status) command to an erasing
1003 chip and expecting it to be ignored, that's what we
1005 map_write(map, CMD(0xd0), adr);
1006 map_write(map, CMD(0x70), adr);
1007 chip->oldstate = FL_READY;
1008 chip->state = FL_ERASING;
1011 case FL_XIP_WHILE_ERASING:
1012 chip->state = chip->oldstate;
1013 chip->oldstate = FL_READY;
1018 case FL_JEDEC_QUERY:
1021 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1026 #ifdef CONFIG_MTD_XIP
1029 * No interrupt what so ever can be serviced while the flash isn't in array
1030 * mode. This is ensured by the xip_disable() and xip_enable() functions
1031 * enclosing any code path where the flash is known not to be in array mode.
1032 * And within a XIP disabled code path, only functions marked with __xipram
1033 * may be called and nothing else (it's a good thing to inspect generated
1034 * assembly to make sure inline functions were actually inlined and that gcc
1035 * didn't emit calls to its own support functions). Also configuring MTD CFI
1036 * support to a single buswidth and a single interleave is also recommended.
1039 static void xip_disable(struct map_info *map, struct flchip *chip,
1042 /* TODO: chips with no XIP use should ignore and return */
1043 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1044 local_irq_disable();
1047 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1050 struct cfi_private *cfi = map->fldrv_priv;
1051 if (chip->state != FL_POINT && chip->state != FL_READY) {
1052 map_write(map, CMD(0xff), adr);
1053 chip->state = FL_READY;
1055 (void) map_read(map, adr);
1061 * When a delay is required for the flash operation to complete, the
1062 * xip_wait_for_operation() function is polling for both the given timeout
1063 * and pending (but still masked) hardware interrupts. Whenever there is an
1064 * interrupt pending then the flash erase or write operation is suspended,
1065 * array mode restored and interrupts unmasked. Task scheduling might also
1066 * happen at that point. The CPU eventually returns from the interrupt or
1067 * the call to schedule() and the suspended flash operation is resumed for
1068 * the remaining of the delay period.
1070 * Warning: this function _will_ fool interrupt latency tracing tools.
1073 static int __xipram xip_wait_for_operation(
1074 struct map_info *map, struct flchip *chip,
1075 unsigned long adr, unsigned int chip_op_time_max)
1077 struct cfi_private *cfi = map->fldrv_priv;
1078 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1079 map_word status, OK = CMD(0x80);
1080 unsigned long usec, suspended, start, done;
1081 flstate_t oldstate, newstate;
1083 start = xip_currtime();
1084 usec = chip_op_time_max;
1091 if (xip_irqpending() && cfip &&
1092 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1093 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1094 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1096 * Let's suspend the erase or write operation when
1097 * supported. Note that we currently don't try to
1098 * suspend interleaved chips if there is already
1099 * another operation suspended (imagine what happens
1100 * when one chip was already done with the current
1101 * operation while another chip suspended it, then
1102 * we resume the whole thing at once). Yes, it
1106 map_write(map, CMD(0xb0), adr);
1107 map_write(map, CMD(0x70), adr);
1108 suspended = xip_currtime();
1110 if (xip_elapsed_since(suspended) > 100000) {
1112 * The chip doesn't want to suspend
1113 * after waiting for 100 msecs.
1114 * This is a critical error but there
1115 * is not much we can do here.
1119 status = map_read(map, adr);
1120 } while (!map_word_andequal(map, status, OK, OK));
1122 /* Suspend succeeded */
1123 oldstate = chip->state;
1124 if (oldstate == FL_ERASING) {
1125 if (!map_word_bitsset(map, status, CMD(0x40)))
1127 newstate = FL_XIP_WHILE_ERASING;
1128 chip->erase_suspended = 1;
1130 if (!map_word_bitsset(map, status, CMD(0x04)))
1132 newstate = FL_XIP_WHILE_WRITING;
1133 chip->write_suspended = 1;
1135 chip->state = newstate;
1136 map_write(map, CMD(0xff), adr);
1137 (void) map_read(map, adr);
1140 mutex_unlock(&chip->mutex);
1145 * We're back. However someone else might have
1146 * decided to go write to the chip if we are in
1147 * a suspended erase state. If so let's wait
1150 mutex_lock(&chip->mutex);
1151 while (chip->state != newstate) {
1152 DECLARE_WAITQUEUE(wait, current);
1153 set_current_state(TASK_UNINTERRUPTIBLE);
1154 add_wait_queue(&chip->wq, &wait);
1155 mutex_unlock(&chip->mutex);
1157 remove_wait_queue(&chip->wq, &wait);
1158 mutex_lock(&chip->mutex);
1160 /* Disallow XIP again */
1161 local_irq_disable();
1163 /* Resume the write or erase operation */
1164 map_write(map, CMD(0xd0), adr);
1165 map_write(map, CMD(0x70), adr);
1166 chip->state = oldstate;
1167 start = xip_currtime();
1168 } else if (usec >= 1000000/HZ) {
1170 * Try to save on CPU power when waiting delay
1171 * is at least a system timer tick period.
1172 * No need to be extremely accurate here.
1176 status = map_read(map, adr);
1177 done = xip_elapsed_since(start);
1178 } while (!map_word_andequal(map, status, OK, OK)
1181 return (done >= usec) ? -ETIME : 0;
1185 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1186 * the flash is actively programming or erasing since we have to poll for
1187 * the operation to complete anyway. We can't do that in a generic way with
1188 * a XIP setup so do it before the actual flash operation in this case
1189 * and stub it out from INVAL_CACHE_AND_WAIT.
1191 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1192 INVALIDATE_CACHED_RANGE(map, from, size)
1194 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1195 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1199 #define xip_disable(map, chip, adr)
1200 #define xip_enable(map, chip, adr)
1201 #define XIP_INVAL_CACHED_RANGE(x...)
1202 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1204 static int inval_cache_and_wait_for_operation(
1205 struct map_info *map, struct flchip *chip,
1206 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1207 unsigned int chip_op_time, unsigned int chip_op_time_max)
1209 struct cfi_private *cfi = map->fldrv_priv;
1210 map_word status, status_OK = CMD(0x80);
1211 int chip_state = chip->state;
1212 unsigned int timeo, sleep_time, reset_timeo;
1214 mutex_unlock(&chip->mutex);
1216 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1217 mutex_lock(&chip->mutex);
1219 timeo = chip_op_time_max;
1222 reset_timeo = timeo;
1223 sleep_time = chip_op_time / 2;
1226 if (chip->state != chip_state) {
1227 /* Someone's suspended the operation: sleep */
1228 DECLARE_WAITQUEUE(wait, current);
1229 set_current_state(TASK_UNINTERRUPTIBLE);
1230 add_wait_queue(&chip->wq, &wait);
1231 mutex_unlock(&chip->mutex);
1233 remove_wait_queue(&chip->wq, &wait);
1234 mutex_lock(&chip->mutex);
1238 status = map_read(map, cmd_adr);
1239 if (map_word_andequal(map, status, status_OK, status_OK))
1242 if (chip->erase_suspended && chip_state == FL_ERASING) {
1243 /* Erase suspend occurred while sleep: reset timeout */
1244 timeo = reset_timeo;
1245 chip->erase_suspended = 0;
1247 if (chip->write_suspended && chip_state == FL_WRITING) {
1248 /* Write suspend occurred while sleep: reset timeout */
1249 timeo = reset_timeo;
1250 chip->write_suspended = 0;
1253 map_write(map, CMD(0x70), cmd_adr);
1254 chip->state = FL_STATUS;
1258 /* OK Still waiting. Drop the lock, wait a while and retry. */
1259 mutex_unlock(&chip->mutex);
1260 if (sleep_time >= 1000000/HZ) {
1262 * Half of the normal delay still remaining
1263 * can be performed with a sleeping delay instead
1266 msleep(sleep_time/1000);
1267 timeo -= sleep_time;
1268 sleep_time = 1000000/HZ;
1274 mutex_lock(&chip->mutex);
1277 /* Done and happy. */
1278 chip->state = FL_STATUS;
1284 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1285 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1288 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1290 unsigned long cmd_addr;
1291 struct cfi_private *cfi = map->fldrv_priv;
1296 /* Ensure cmd read/writes are aligned. */
1297 cmd_addr = adr & ~(map_bankwidth(map)-1);
1299 mutex_lock(&chip->mutex);
1301 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1304 if (chip->state != FL_POINT && chip->state != FL_READY)
1305 map_write(map, CMD(0xff), cmd_addr);
1307 chip->state = FL_POINT;
1308 chip->ref_point_counter++;
1310 mutex_unlock(&chip->mutex);
1315 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1316 size_t *retlen, void **virt, resource_size_t *phys)
1318 struct map_info *map = mtd->priv;
1319 struct cfi_private *cfi = map->fldrv_priv;
1320 unsigned long ofs, last_end = 0;
1327 /* Now lock the chip(s) to POINT state */
1329 /* ofs: offset within the first chip that the first read should start */
1330 chipnum = (from >> cfi->chipshift);
1331 ofs = from - (chipnum << cfi->chipshift);
1333 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1335 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1338 unsigned long thislen;
1340 if (chipnum >= cfi->numchips)
1343 /* We cannot point across chips that are virtually disjoint */
1345 last_end = cfi->chips[chipnum].start;
1346 else if (cfi->chips[chipnum].start != last_end)
1349 if ((len + ofs -1) >> cfi->chipshift)
1350 thislen = (1<<cfi->chipshift) - ofs;
1354 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1362 last_end += 1 << cfi->chipshift;
1368 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1370 struct map_info *map = mtd->priv;
1371 struct cfi_private *cfi = map->fldrv_priv;
1373 int chipnum, err = 0;
1375 /* Now unlock the chip(s) POINT state */
1377 /* ofs: offset within the first chip that the first read should start */
1378 chipnum = (from >> cfi->chipshift);
1379 ofs = from - (chipnum << cfi->chipshift);
1381 while (len && !err) {
1382 unsigned long thislen;
1383 struct flchip *chip;
1385 chip = &cfi->chips[chipnum];
1386 if (chipnum >= cfi->numchips)
1389 if ((len + ofs -1) >> cfi->chipshift)
1390 thislen = (1<<cfi->chipshift) - ofs;
1394 mutex_lock(&chip->mutex);
1395 if (chip->state == FL_POINT) {
1396 chip->ref_point_counter--;
1397 if(chip->ref_point_counter == 0)
1398 chip->state = FL_READY;
1400 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1404 put_chip(map, chip, chip->start);
1405 mutex_unlock(&chip->mutex);
1415 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1417 unsigned long cmd_addr;
1418 struct cfi_private *cfi = map->fldrv_priv;
1423 /* Ensure cmd read/writes are aligned. */
1424 cmd_addr = adr & ~(map_bankwidth(map)-1);
1426 mutex_lock(&chip->mutex);
1427 ret = get_chip(map, chip, cmd_addr, FL_READY);
1429 mutex_unlock(&chip->mutex);
1433 if (chip->state != FL_POINT && chip->state != FL_READY) {
1434 map_write(map, CMD(0xff), cmd_addr);
1436 chip->state = FL_READY;
1439 map_copy_from(map, buf, adr, len);
1441 put_chip(map, chip, cmd_addr);
1443 mutex_unlock(&chip->mutex);
1447 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1449 struct map_info *map = mtd->priv;
1450 struct cfi_private *cfi = map->fldrv_priv;
1455 /* ofs: offset within the first chip that the first read should start */
1456 chipnum = (from >> cfi->chipshift);
1457 ofs = from - (chipnum << cfi->chipshift);
1460 unsigned long thislen;
1462 if (chipnum >= cfi->numchips)
1465 if ((len + ofs -1) >> cfi->chipshift)
1466 thislen = (1<<cfi->chipshift) - ofs;
1470 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1484 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1485 unsigned long adr, map_word datum, int mode)
1487 struct cfi_private *cfi = map->fldrv_priv;
1488 map_word status, write_cmd;
1495 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1498 write_cmd = CMD(0xc0);
1504 mutex_lock(&chip->mutex);
1505 ret = get_chip(map, chip, adr, mode);
1507 mutex_unlock(&chip->mutex);
1511 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1513 xip_disable(map, chip, adr);
1514 map_write(map, write_cmd, adr);
1515 map_write(map, datum, adr);
1518 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1519 adr, map_bankwidth(map),
1520 chip->word_write_time,
1521 chip->word_write_time_max);
1523 xip_enable(map, chip, adr);
1524 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1528 /* check for errors */
1529 status = map_read(map, adr);
1530 if (map_word_bitsset(map, status, CMD(0x1a))) {
1531 unsigned long chipstatus = MERGESTATUS(status);
1534 map_write(map, CMD(0x50), adr);
1535 map_write(map, CMD(0x70), adr);
1536 xip_enable(map, chip, adr);
1538 if (chipstatus & 0x02) {
1540 } else if (chipstatus & 0x08) {
1541 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1544 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1551 xip_enable(map, chip, adr);
1552 out: DISABLE_VPP(map);
1553 put_chip(map, chip, adr);
1554 mutex_unlock(&chip->mutex);
1559 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1561 struct map_info *map = mtd->priv;
1562 struct cfi_private *cfi = map->fldrv_priv;
1567 chipnum = to >> cfi->chipshift;
1568 ofs = to - (chipnum << cfi->chipshift);
1570 /* If it's not bus-aligned, do the first byte write */
1571 if (ofs & (map_bankwidth(map)-1)) {
1572 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1573 int gap = ofs - bus_ofs;
1577 n = min_t(int, len, map_bankwidth(map)-gap);
1578 datum = map_word_ff(map);
1579 datum = map_word_load_partial(map, datum, buf, gap, n);
1581 ret = do_write_oneword(map, &cfi->chips[chipnum],
1582 bus_ofs, datum, FL_WRITING);
1591 if (ofs >> cfi->chipshift) {
1594 if (chipnum == cfi->numchips)
1599 while(len >= map_bankwidth(map)) {
1600 map_word datum = map_word_load(map, buf);
1602 ret = do_write_oneword(map, &cfi->chips[chipnum],
1603 ofs, datum, FL_WRITING);
1607 ofs += map_bankwidth(map);
1608 buf += map_bankwidth(map);
1609 (*retlen) += map_bankwidth(map);
1610 len -= map_bankwidth(map);
1612 if (ofs >> cfi->chipshift) {
1615 if (chipnum == cfi->numchips)
1620 if (len & (map_bankwidth(map)-1)) {
1623 datum = map_word_ff(map);
1624 datum = map_word_load_partial(map, datum, buf, 0, len);
1626 ret = do_write_oneword(map, &cfi->chips[chipnum],
1627 ofs, datum, FL_WRITING);
1638 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1639 unsigned long adr, const struct kvec **pvec,
1640 unsigned long *pvec_seek, int len)
1642 struct cfi_private *cfi = map->fldrv_priv;
1643 map_word status, write_cmd, datum;
1644 unsigned long cmd_adr;
1645 int ret, wbufsize, word_gap, words;
1646 const struct kvec *vec;
1647 unsigned long vec_seek;
1648 unsigned long initial_adr;
1649 int initial_len = len;
1651 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1654 cmd_adr = adr & ~(wbufsize-1);
1656 /* Let's determine this according to the interleave only once */
1657 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1659 mutex_lock(&chip->mutex);
1660 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1662 mutex_unlock(&chip->mutex);
1666 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1668 xip_disable(map, chip, cmd_adr);
1670 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1671 [...], the device will not accept any more Write to Buffer commands".
1672 So we must check here and reset those bits if they're set. Otherwise
1673 we're just pissing in the wind */
1674 if (chip->state != FL_STATUS) {
1675 map_write(map, CMD(0x70), cmd_adr);
1676 chip->state = FL_STATUS;
1678 status = map_read(map, cmd_adr);
1679 if (map_word_bitsset(map, status, CMD(0x30))) {
1680 xip_enable(map, chip, cmd_adr);
1681 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1682 xip_disable(map, chip, cmd_adr);
1683 map_write(map, CMD(0x50), cmd_adr);
1684 map_write(map, CMD(0x70), cmd_adr);
1687 chip->state = FL_WRITING_TO_BUFFER;
1688 map_write(map, write_cmd, cmd_adr);
1689 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1691 /* Argh. Not ready for write to buffer */
1692 map_word Xstatus = map_read(map, cmd_adr);
1693 map_write(map, CMD(0x70), cmd_adr);
1694 chip->state = FL_STATUS;
1695 status = map_read(map, cmd_adr);
1696 map_write(map, CMD(0x50), cmd_adr);
1697 map_write(map, CMD(0x70), cmd_adr);
1698 xip_enable(map, chip, cmd_adr);
1699 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1700 map->name, Xstatus.x[0], status.x[0]);
1704 /* Figure out the number of words to write */
1705 word_gap = (-adr & (map_bankwidth(map)-1));
1706 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1710 word_gap = map_bankwidth(map) - word_gap;
1712 datum = map_word_ff(map);
1715 /* Write length of data to come */
1716 map_write(map, CMD(words), cmd_adr );
1720 vec_seek = *pvec_seek;
1722 int n = map_bankwidth(map) - word_gap;
1723 if (n > vec->iov_len - vec_seek)
1724 n = vec->iov_len - vec_seek;
1728 if (!word_gap && len < map_bankwidth(map))
1729 datum = map_word_ff(map);
1731 datum = map_word_load_partial(map, datum,
1732 vec->iov_base + vec_seek,
1737 if (!len || word_gap == map_bankwidth(map)) {
1738 map_write(map, datum, adr);
1739 adr += map_bankwidth(map);
1744 if (vec_seek == vec->iov_len) {
1750 *pvec_seek = vec_seek;
1753 map_write(map, CMD(0xd0), cmd_adr);
1754 chip->state = FL_WRITING;
1756 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1757 initial_adr, initial_len,
1758 chip->buffer_write_time,
1759 chip->buffer_write_time_max);
1761 map_write(map, CMD(0x70), cmd_adr);
1762 chip->state = FL_STATUS;
1763 xip_enable(map, chip, cmd_adr);
1764 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1768 /* check for errors */
1769 status = map_read(map, cmd_adr);
1770 if (map_word_bitsset(map, status, CMD(0x1a))) {
1771 unsigned long chipstatus = MERGESTATUS(status);
1774 map_write(map, CMD(0x50), cmd_adr);
1775 map_write(map, CMD(0x70), cmd_adr);
1776 xip_enable(map, chip, cmd_adr);
1778 if (chipstatus & 0x02) {
1780 } else if (chipstatus & 0x08) {
1781 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1784 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1791 xip_enable(map, chip, cmd_adr);
1792 out: DISABLE_VPP(map);
1793 put_chip(map, chip, cmd_adr);
1794 mutex_unlock(&chip->mutex);
1798 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1799 unsigned long count, loff_t to, size_t *retlen)
1801 struct map_info *map = mtd->priv;
1802 struct cfi_private *cfi = map->fldrv_priv;
1803 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1806 unsigned long ofs, vec_seek, i;
1809 for (i = 0; i < count; i++)
1810 len += vecs[i].iov_len;
1815 chipnum = to >> cfi->chipshift;
1816 ofs = to - (chipnum << cfi->chipshift);
1820 /* We must not cross write block boundaries */
1821 int size = wbufsize - (ofs & (wbufsize-1));
1825 ret = do_write_buffer(map, &cfi->chips[chipnum],
1826 ofs, &vecs, &vec_seek, size);
1834 if (ofs >> cfi->chipshift) {
1837 if (chipnum == cfi->numchips)
1841 /* Be nice and reschedule with the chip in a usable state for other
1850 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1851 size_t len, size_t *retlen, const u_char *buf)
1855 vec.iov_base = (void *) buf;
1858 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1861 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1862 unsigned long adr, int len, void *thunk)
1864 struct cfi_private *cfi = map->fldrv_priv;
1872 mutex_lock(&chip->mutex);
1873 ret = get_chip(map, chip, adr, FL_ERASING);
1875 mutex_unlock(&chip->mutex);
1879 XIP_INVAL_CACHED_RANGE(map, adr, len);
1881 xip_disable(map, chip, adr);
1883 /* Clear the status register first */
1884 map_write(map, CMD(0x50), adr);
1887 map_write(map, CMD(0x20), adr);
1888 map_write(map, CMD(0xD0), adr);
1889 chip->state = FL_ERASING;
1890 chip->erase_suspended = 0;
1892 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1895 chip->erase_time_max);
1897 map_write(map, CMD(0x70), adr);
1898 chip->state = FL_STATUS;
1899 xip_enable(map, chip, adr);
1900 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1904 /* We've broken this before. It doesn't hurt to be safe */
1905 map_write(map, CMD(0x70), adr);
1906 chip->state = FL_STATUS;
1907 status = map_read(map, adr);
1909 /* check for errors */
1910 if (map_word_bitsset(map, status, CMD(0x3a))) {
1911 unsigned long chipstatus = MERGESTATUS(status);
1913 /* Reset the error bits */
1914 map_write(map, CMD(0x50), adr);
1915 map_write(map, CMD(0x70), adr);
1916 xip_enable(map, chip, adr);
1918 if ((chipstatus & 0x30) == 0x30) {
1919 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1921 } else if (chipstatus & 0x02) {
1922 /* Protection bit set */
1924 } else if (chipstatus & 0x8) {
1926 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1928 } else if (chipstatus & 0x20 && retries--) {
1929 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931 put_chip(map, chip, adr);
1932 mutex_unlock(&chip->mutex);
1935 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1942 xip_enable(map, chip, adr);
1943 out: DISABLE_VPP(map);
1944 put_chip(map, chip, adr);
1945 mutex_unlock(&chip->mutex);
1949 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1951 unsigned long ofs, len;
1957 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1961 instr->state = MTD_ERASE_DONE;
1962 mtd_erase_callback(instr);
1967 static void cfi_intelext_sync (struct mtd_info *mtd)
1969 struct map_info *map = mtd->priv;
1970 struct cfi_private *cfi = map->fldrv_priv;
1972 struct flchip *chip;
1975 for (i=0; !ret && i<cfi->numchips; i++) {
1976 chip = &cfi->chips[i];
1978 mutex_lock(&chip->mutex);
1979 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1982 chip->oldstate = chip->state;
1983 chip->state = FL_SYNCING;
1984 /* No need to wake_up() on this state change -
1985 * as the whole point is that nobody can do anything
1986 * with the chip now anyway.
1989 mutex_unlock(&chip->mutex);
1992 /* Unlock the chips again */
1994 for (i--; i >=0; i--) {
1995 chip = &cfi->chips[i];
1997 mutex_lock(&chip->mutex);
1999 if (chip->state == FL_SYNCING) {
2000 chip->state = chip->oldstate;
2001 chip->oldstate = FL_READY;
2004 mutex_unlock(&chip->mutex);
2008 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2009 struct flchip *chip,
2011 int len, void *thunk)
2013 struct cfi_private *cfi = map->fldrv_priv;
2014 int status, ofs_factor = cfi->interleave * cfi->device_type;
2017 xip_disable(map, chip, adr+(2*ofs_factor));
2018 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2019 chip->state = FL_JEDEC_QUERY;
2020 status = cfi_read_query(map, adr+(2*ofs_factor));
2021 xip_enable(map, chip, 0);
2025 #ifdef DEBUG_LOCK_BITS
2026 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2027 struct flchip *chip,
2029 int len, void *thunk)
2031 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2032 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2037 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2038 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2040 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2041 unsigned long adr, int len, void *thunk)
2043 struct cfi_private *cfi = map->fldrv_priv;
2044 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2050 mutex_lock(&chip->mutex);
2051 ret = get_chip(map, chip, adr, FL_LOCKING);
2053 mutex_unlock(&chip->mutex);
2058 xip_disable(map, chip, adr);
2060 map_write(map, CMD(0x60), adr);
2061 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2062 map_write(map, CMD(0x01), adr);
2063 chip->state = FL_LOCKING;
2064 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2065 map_write(map, CMD(0xD0), adr);
2066 chip->state = FL_UNLOCKING;
2071 * If Instant Individual Block Locking supported then no need
2075 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2076 * lets use a max of 1.5 seconds (1500ms) as timeout.
2078 * See "Clear Block Lock-Bits Time" on page 40 in
2079 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2080 * from February 2003
2082 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2084 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2086 map_write(map, CMD(0x70), adr);
2087 chip->state = FL_STATUS;
2088 xip_enable(map, chip, adr);
2089 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2093 xip_enable(map, chip, adr);
2094 out: DISABLE_VPP(map);
2095 put_chip(map, chip, adr);
2096 mutex_unlock(&chip->mutex);
2100 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2104 #ifdef DEBUG_LOCK_BITS
2105 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2106 __func__, ofs, len);
2107 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2111 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2112 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2114 #ifdef DEBUG_LOCK_BITS
2115 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2117 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2124 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2128 #ifdef DEBUG_LOCK_BITS
2129 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2130 __func__, ofs, len);
2131 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2135 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2136 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2138 #ifdef DEBUG_LOCK_BITS
2139 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2141 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2148 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2151 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2152 ofs, len, NULL) ? 1 : 0;
2155 #ifdef CONFIG_MTD_OTP
2157 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2158 u_long data_offset, u_char *buf, u_int size,
2159 u_long prot_offset, u_int groupno, u_int groupsize);
2162 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2163 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2165 struct cfi_private *cfi = map->fldrv_priv;
2168 mutex_lock(&chip->mutex);
2169 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2171 mutex_unlock(&chip->mutex);
2175 /* let's ensure we're not reading back cached data from array mode */
2176 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2178 xip_disable(map, chip, chip->start);
2179 if (chip->state != FL_JEDEC_QUERY) {
2180 map_write(map, CMD(0x90), chip->start);
2181 chip->state = FL_JEDEC_QUERY;
2183 map_copy_from(map, buf, chip->start + offset, size);
2184 xip_enable(map, chip, chip->start);
2186 /* then ensure we don't keep OTP data in the cache */
2187 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2189 put_chip(map, chip, chip->start);
2190 mutex_unlock(&chip->mutex);
2195 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2196 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2201 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2202 int gap = offset - bus_ofs;
2203 int n = min_t(int, size, map_bankwidth(map)-gap);
2204 map_word datum = map_word_ff(map);
2206 datum = map_word_load_partial(map, datum, buf, gap, n);
2207 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2220 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2221 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2223 struct cfi_private *cfi = map->fldrv_priv;
2226 /* make sure area matches group boundaries */
2230 datum = map_word_ff(map);
2231 datum = map_word_clr(map, datum, CMD(1 << grpno));
2232 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2235 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2236 size_t *retlen, u_char *buf,
2237 otp_op_t action, int user_regs)
2239 struct map_info *map = mtd->priv;
2240 struct cfi_private *cfi = map->fldrv_priv;
2241 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2242 struct flchip *chip;
2243 struct cfi_intelext_otpinfo *otp;
2244 u_long devsize, reg_prot_offset, data_offset;
2245 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2246 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2251 /* Check that we actually have some OTP registers */
2252 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2255 /* we need real chips here not virtual ones */
2256 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2257 chip_step = devsize >> cfi->chipshift;
2260 /* Some chips have OTP located in the _top_ partition only.
2261 For example: Intel 28F256L18T (T means top-parameter device) */
2262 if (cfi->mfr == CFI_MFR_INTEL) {
2267 chip_num = chip_step - 1;
2271 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2272 chip = &cfi->chips[chip_num];
2273 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2275 /* first OTP region */
2277 reg_prot_offset = extp->ProtRegAddr;
2278 reg_fact_groups = 1;
2279 reg_fact_size = 1 << extp->FactProtRegSize;
2280 reg_user_groups = 1;
2281 reg_user_size = 1 << extp->UserProtRegSize;
2284 /* flash geometry fixup */
2285 data_offset = reg_prot_offset + 1;
2286 data_offset *= cfi->interleave * cfi->device_type;
2287 reg_prot_offset *= cfi->interleave * cfi->device_type;
2288 reg_fact_size *= cfi->interleave;
2289 reg_user_size *= cfi->interleave;
2292 groups = reg_user_groups;
2293 groupsize = reg_user_size;
2294 /* skip over factory reg area */
2295 groupno = reg_fact_groups;
2296 data_offset += reg_fact_groups * reg_fact_size;
2298 groups = reg_fact_groups;
2299 groupsize = reg_fact_size;
2303 while (len > 0 && groups > 0) {
2306 * Special case: if action is NULL
2307 * we fill buf with otp_info records.
2309 struct otp_info *otpinfo;
2311 len -= sizeof(struct otp_info);
2314 ret = do_otp_read(map, chip,
2316 (u_char *)&lockword,
2321 otpinfo = (struct otp_info *)buf;
2322 otpinfo->start = from;
2323 otpinfo->length = groupsize;
2325 !map_word_bitsset(map, lockword,
2328 buf += sizeof(*otpinfo);
2329 *retlen += sizeof(*otpinfo);
2330 } else if (from >= groupsize) {
2332 data_offset += groupsize;
2334 int size = groupsize;
2335 data_offset += from;
2340 ret = action(map, chip, data_offset,
2341 buf, size, reg_prot_offset,
2342 groupno, groupsize);
2348 data_offset += size;
2354 /* next OTP region */
2355 if (++field == extp->NumProtectionFields)
2357 reg_prot_offset = otp->ProtRegAddr;
2358 reg_fact_groups = otp->FactGroups;
2359 reg_fact_size = 1 << otp->FactProtRegSize;
2360 reg_user_groups = otp->UserGroups;
2361 reg_user_size = 1 << otp->UserProtRegSize;
2369 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2370 size_t len, size_t *retlen,
2373 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2374 buf, do_otp_read, 0);
2377 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2378 size_t len, size_t *retlen,
2381 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2382 buf, do_otp_read, 1);
2385 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2386 size_t len, size_t *retlen,
2389 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2390 buf, do_otp_write, 1);
2393 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2394 loff_t from, size_t len)
2397 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2398 NULL, do_otp_lock, 1);
2401 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2402 struct otp_info *buf, size_t len)
2407 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2408 return ret ? : retlen;
2411 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2412 struct otp_info *buf, size_t len)
2417 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2418 return ret ? : retlen;
2423 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2425 struct mtd_erase_region_info *region;
2426 int block, status, i;
2430 for (i = 0; i < mtd->numeraseregions; i++) {
2431 region = &mtd->eraseregions[i];
2432 if (!region->lockmap)
2435 for (block = 0; block < region->numblocks; block++){
2436 len = region->erasesize;
2437 adr = region->offset + block * len;
2439 status = cfi_varsize_frob(mtd,
2440 do_getlockstatus_oneblock, adr, len, NULL);
2442 set_bit(block, region->lockmap);
2444 clear_bit(block, region->lockmap);
2449 static int cfi_intelext_suspend(struct mtd_info *mtd)
2451 struct map_info *map = mtd->priv;
2452 struct cfi_private *cfi = map->fldrv_priv;
2453 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2455 struct flchip *chip;
2458 if ((mtd->flags & MTD_POWERUP_LOCK)
2459 && extp && (extp->FeatureSupport & (1 << 5)))
2460 cfi_intelext_save_locks(mtd);
2462 for (i=0; !ret && i<cfi->numchips; i++) {
2463 chip = &cfi->chips[i];
2465 mutex_lock(&chip->mutex);
2467 switch (chip->state) {
2471 case FL_JEDEC_QUERY:
2472 if (chip->oldstate == FL_READY) {
2473 /* place the chip in a known state before suspend */
2474 map_write(map, CMD(0xFF), cfi->chips[i].start);
2475 chip->oldstate = chip->state;
2476 chip->state = FL_PM_SUSPENDED;
2477 /* No need to wake_up() on this state change -
2478 * as the whole point is that nobody can do anything
2479 * with the chip now anyway.
2482 /* There seems to be an operation pending. We must wait for it. */
2483 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2488 /* Should we actually wait? Once upon a time these routines weren't
2489 allowed to. Or should we return -EAGAIN, because the upper layers
2490 ought to have already shut down anything which was using the device
2491 anyway? The latter for now. */
2492 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2494 case FL_PM_SUSPENDED:
2497 mutex_unlock(&chip->mutex);
2500 /* Unlock the chips again */
2503 for (i--; i >=0; i--) {
2504 chip = &cfi->chips[i];
2506 mutex_lock(&chip->mutex);
2508 if (chip->state == FL_PM_SUSPENDED) {
2509 /* No need to force it into a known state here,
2510 because we're returning failure, and it didn't
2512 chip->state = chip->oldstate;
2513 chip->oldstate = FL_READY;
2516 mutex_unlock(&chip->mutex);
2523 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2525 struct mtd_erase_region_info *region;
2530 for (i = 0; i < mtd->numeraseregions; i++) {
2531 region = &mtd->eraseregions[i];
2532 if (!region->lockmap)
2535 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2536 len = region->erasesize;
2537 adr = region->offset + block * len;
2538 cfi_intelext_unlock(mtd, adr, len);
2543 static void cfi_intelext_resume(struct mtd_info *mtd)
2545 struct map_info *map = mtd->priv;
2546 struct cfi_private *cfi = map->fldrv_priv;
2547 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2549 struct flchip *chip;
2551 for (i=0; i<cfi->numchips; i++) {
2553 chip = &cfi->chips[i];
2555 mutex_lock(&chip->mutex);
2557 /* Go to known state. Chip may have been power cycled */
2558 if (chip->state == FL_PM_SUSPENDED) {
2559 map_write(map, CMD(0xFF), cfi->chips[i].start);
2560 chip->oldstate = chip->state = FL_READY;
2564 mutex_unlock(&chip->mutex);
2567 if ((mtd->flags & MTD_POWERUP_LOCK)
2568 && extp && (extp->FeatureSupport & (1 << 5)))
2569 cfi_intelext_restore_locks(mtd);
2572 static int cfi_intelext_reset(struct mtd_info *mtd)
2574 struct map_info *map = mtd->priv;
2575 struct cfi_private *cfi = map->fldrv_priv;
2578 for (i=0; i < cfi->numchips; i++) {
2579 struct flchip *chip = &cfi->chips[i];
2581 /* force the completion of any ongoing operation
2582 and switch to array mode so any bootloader in
2583 flash is accessible for soft reboot. */
2584 mutex_lock(&chip->mutex);
2585 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2587 map_write(map, CMD(0xff), chip->start);
2588 chip->state = FL_SHUTDOWN;
2589 put_chip(map, chip, chip->start);
2591 mutex_unlock(&chip->mutex);
2597 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2600 struct mtd_info *mtd;
2602 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2603 cfi_intelext_reset(mtd);
2607 static void cfi_intelext_destroy(struct mtd_info *mtd)
2609 struct map_info *map = mtd->priv;
2610 struct cfi_private *cfi = map->fldrv_priv;
2611 struct mtd_erase_region_info *region;
2613 cfi_intelext_reset(mtd);
2614 unregister_reboot_notifier(&mtd->reboot_notifier);
2615 kfree(cfi->cmdset_priv);
2617 kfree(cfi->chips[0].priv);
2619 for (i = 0; i < mtd->numeraseregions; i++) {
2620 region = &mtd->eraseregions[i];
2621 if (region->lockmap)
2622 kfree(region->lockmap);
2624 kfree(mtd->eraseregions);
2627 MODULE_LICENSE("GPL");
2628 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2629 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2630 MODULE_ALIAS("cfi_cmdset_0003");
2631 MODULE_ALIAS("cfi_cmdset_0200");