1 // SPDX-License-Identifier: GPL-2.0
3 * Common Flash Interface support:
4 * Intel Extended Vendor Command Set (ID 0x0001)
9 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
16 * - reworked lock/unlock/erase support for var size flash
17 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
18 * - auto unlock sectors on resume for auto locking flash on power up
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define PF38F4476 0x881c
49 #define M28F00AP30 0x8963
50 /* STMicroelectronics chips */
51 #define M50LPW080 0x002F
52 #define M50FLW080A 0x0080
53 #define M50FLW080B 0x0081
55 #define AT49BV640D 0x02de
56 #define AT49BV640DT 0x02db
58 #define LH28F640BFHE_PTTL90 0x00b0
59 #define LH28F640BFHE_PBTL90 0x00b1
60 #define LH28F640BFHE_PTTL70A 0x00b2
61 #define LH28F640BFHE_PBTL70A 0x00b3
63 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
67 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
68 static void cfi_intelext_sync (struct mtd_info *);
69 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
74 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
77 size_t *, const u_char *);
78 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
79 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
80 size_t *, struct otp_info *);
81 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
82 size_t *, struct otp_info *);
84 static int cfi_intelext_suspend (struct mtd_info *);
85 static void cfi_intelext_resume (struct mtd_info *);
86 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
88 static void cfi_intelext_destroy(struct mtd_info *);
90 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
92 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
93 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
95 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
96 size_t *retlen, void **virt, resource_size_t *phys);
97 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
99 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
101 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
102 #include "fwh_lock.h"
107 * *********** SETUP AND PROBE BITS ***********
110 static struct mtd_chip_driver cfi_intelext_chipdrv = {
111 .probe = NULL, /* Not usable directly */
112 .destroy = cfi_intelext_destroy,
113 .name = "cfi_cmdset_0001",
114 .module = THIS_MODULE
117 /* #define DEBUG_LOCK_BITS */
118 /* #define DEBUG_CFI_FEATURES */
120 #ifdef DEBUG_CFI_FEATURES
121 static void cfi_tell_features(struct cfi_pri_intelext *extp)
124 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
125 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
126 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
127 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
128 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
129 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
130 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
131 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
132 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
133 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
134 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
135 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
136 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
137 for (i=11; i<32; i++) {
138 if (extp->FeatureSupport & (1<<i))
139 printk(" - Unknown Bit %X: supported\n", i);
142 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
143 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
144 for (i=1; i<8; i++) {
145 if (extp->SuspendCmdSupport & (1<<i))
146 printk(" - Unknown Bit %X: supported\n", i);
149 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
150 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
151 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
152 for (i=2; i<3; i++) {
153 if (extp->BlkStatusRegMask & (1<<i))
154 printk(" - Unknown Bit %X Active: yes\n",i);
156 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
157 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
158 for (i=6; i<16; i++) {
159 if (extp->BlkStatusRegMask & (1<<i))
160 printk(" - Unknown Bit %X Active: yes\n",i);
163 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
164 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
165 if (extp->VppOptimal)
166 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
167 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
171 /* Atmel chips don't use the same PRI format as Intel chips */
172 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
174 struct map_info *map = mtd->priv;
175 struct cfi_private *cfi = map->fldrv_priv;
176 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
177 struct cfi_pri_atmel atmel_pri;
178 uint32_t features = 0;
180 /* Reverse byteswapping */
181 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
182 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
183 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
185 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
186 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
188 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
190 if (atmel_pri.Features & 0x01) /* chip erase supported */
192 if (atmel_pri.Features & 0x02) /* erase suspend supported */
194 if (atmel_pri.Features & 0x04) /* program suspend supported */
196 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
198 if (atmel_pri.Features & 0x20) /* page mode read supported */
200 if (atmel_pri.Features & 0x40) /* queued erase supported */
202 if (atmel_pri.Features & 0x80) /* Protection bits supported */
205 extp->FeatureSupport = features;
207 /* burst write mode not supported */
208 cfi->cfiq->BufWriteTimeoutTyp = 0;
209 cfi->cfiq->BufWriteTimeoutMax = 0;
212 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
218 cfip->FeatureSupport |= (1 << 5);
219 mtd->flags |= MTD_POWERUP_LOCK;
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
224 static void fixup_intel_strataflash(struct mtd_info *mtd)
226 struct map_info *map = mtd->priv;
227 struct cfi_private *cfi = map->fldrv_priv;
228 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
230 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
231 "erase on write disabled.\n");
232 extp->SuspendCmdSupport &= ~1;
236 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
237 static void fixup_no_write_suspend(struct mtd_info *mtd)
239 struct map_info *map = mtd->priv;
240 struct cfi_private *cfi = map->fldrv_priv;
241 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
243 if (cfip && (cfip->FeatureSupport&4)) {
244 cfip->FeatureSupport &= ~4;
245 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
250 static void fixup_st_m28w320ct(struct mtd_info *mtd)
252 struct map_info *map = mtd->priv;
253 struct cfi_private *cfi = map->fldrv_priv;
255 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
256 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
259 static void fixup_st_m28w320cb(struct mtd_info *mtd)
261 struct map_info *map = mtd->priv;
262 struct cfi_private *cfi = map->fldrv_priv;
264 /* Note this is done after the region info is endian swapped */
265 cfi->cfiq->EraseRegionInfo[1] =
266 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
269 static int is_LH28F640BF(struct cfi_private *cfi)
271 /* Sharp LH28F640BF Family */
272 if (cfi->mfr == CFI_MFR_SHARP && (
273 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
274 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
279 static void fixup_LH28F640BF(struct mtd_info *mtd)
281 struct map_info *map = mtd->priv;
282 struct cfi_private *cfi = map->fldrv_priv;
283 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
285 /* Reset the Partition Configuration Register on LH28F640BF
286 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
287 if (is_LH28F640BF(cfi)) {
288 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
289 map_write(map, CMD(0x60), 0);
290 map_write(map, CMD(0x04), 0);
292 /* We have set one single partition thus
293 * Simultaneous Operations are not allowed */
294 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
295 extp->FeatureSupport &= ~512;
299 static void fixup_use_point(struct mtd_info *mtd)
301 struct map_info *map = mtd->priv;
302 if (!mtd->_point && map_is_linear(map)) {
303 mtd->_point = cfi_intelext_point;
304 mtd->_unpoint = cfi_intelext_unpoint;
308 static void fixup_use_write_buffers(struct mtd_info *mtd)
310 struct map_info *map = mtd->priv;
311 struct cfi_private *cfi = map->fldrv_priv;
312 if (cfi->cfiq->BufWriteTimeoutTyp) {
313 printk(KERN_INFO "Using buffer write method\n" );
314 mtd->_write = cfi_intelext_write_buffers;
315 mtd->_writev = cfi_intelext_writev;
320 * Some chips power-up with all sectors locked by default.
322 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
324 struct map_info *map = mtd->priv;
325 struct cfi_private *cfi = map->fldrv_priv;
326 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
328 if (cfip->FeatureSupport&32) {
329 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
330 mtd->flags |= MTD_POWERUP_LOCK;
334 static struct cfi_fixup cfi_fixup_table[] = {
335 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
336 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
337 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
338 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
339 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
341 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
342 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
344 #if !FORCE_WORD_WRITE
345 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
347 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
348 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
349 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
350 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
351 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
355 static struct cfi_fixup jedec_fixup_table[] = {
356 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
357 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
358 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
359 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
360 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
363 static struct cfi_fixup fixup_table[] = {
364 /* The CFI vendor ids and the JEDEC vendor IDs appear
365 * to be common. It is like the devices id's are as
366 * well. This table is to pick all cases where
367 * we know that is the case.
369 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
373 static void cfi_fixup_major_minor(struct cfi_private *cfi,
374 struct cfi_pri_intelext *extp)
376 if (cfi->mfr == CFI_MFR_INTEL &&
377 cfi->id == PF38F4476 && extp->MinorVersion == '3')
378 extp->MinorVersion = '1';
381 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
384 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
385 * Erase Supend for their small Erase Blocks(0x8000)
387 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
392 static inline struct cfi_pri_intelext *
393 read_pri_intelext(struct map_info *map, __u16 adr)
395 struct cfi_private *cfi = map->fldrv_priv;
396 struct cfi_pri_intelext *extp;
397 unsigned int extra_size = 0;
398 unsigned int extp_size = sizeof(*extp);
401 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
405 cfi_fixup_major_minor(cfi, extp);
407 if (extp->MajorVersion != '1' ||
408 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
409 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
410 "version %c.%c.\n", extp->MajorVersion,
416 /* Do some byteswapping if necessary */
417 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
418 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
419 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
421 if (extp->MinorVersion >= '0') {
424 /* Protection Register info */
425 if (extp->NumProtectionFields)
426 extra_size += (extp->NumProtectionFields - 1) *
427 sizeof(struct cfi_intelext_otpinfo);
430 if (extp->MinorVersion >= '1') {
431 /* Burst Read info */
433 if (extp_size < sizeof(*extp) + extra_size)
435 extra_size += extp->extra[extra_size - 1];
438 if (extp->MinorVersion >= '3') {
441 /* Number of hardware-partitions */
443 if (extp_size < sizeof(*extp) + extra_size)
445 nb_parts = extp->extra[extra_size - 1];
447 /* skip the sizeof(partregion) field in CFI 1.4 */
448 if (extp->MinorVersion >= '4')
451 for (i = 0; i < nb_parts; i++) {
452 struct cfi_intelext_regioninfo *rinfo;
453 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
454 extra_size += sizeof(*rinfo);
455 if (extp_size < sizeof(*extp) + extra_size)
457 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
458 extra_size += (rinfo->NumBlockTypes - 1)
459 * sizeof(struct cfi_intelext_blockinfo);
462 if (extp->MinorVersion >= '4')
463 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
465 if (extp_size < sizeof(*extp) + extra_size) {
467 extp_size = sizeof(*extp) + extra_size;
469 if (extp_size > 4096) {
471 "%s: cfi_pri_intelext is too fat\n",
482 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
484 struct cfi_private *cfi = map->fldrv_priv;
485 struct mtd_info *mtd;
488 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
492 mtd->type = MTD_NORFLASH;
494 /* Fill in the default mtd operations */
495 mtd->_erase = cfi_intelext_erase_varsize;
496 mtd->_read = cfi_intelext_read;
497 mtd->_write = cfi_intelext_write_words;
498 mtd->_sync = cfi_intelext_sync;
499 mtd->_lock = cfi_intelext_lock;
500 mtd->_unlock = cfi_intelext_unlock;
501 mtd->_is_locked = cfi_intelext_is_locked;
502 mtd->_suspend = cfi_intelext_suspend;
503 mtd->_resume = cfi_intelext_resume;
504 mtd->flags = MTD_CAP_NORFLASH;
505 mtd->name = map->name;
507 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
509 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
511 if (cfi->cfi_mode == CFI_MODE_CFI) {
513 * It's a real CFI chip, not one for which the probe
514 * routine faked a CFI structure. So we read the feature
517 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
518 struct cfi_pri_intelext *extp;
520 extp = read_pri_intelext(map, adr);
526 /* Install our own private info structure */
527 cfi->cmdset_priv = extp;
529 cfi_fixup(mtd, cfi_fixup_table);
531 #ifdef DEBUG_CFI_FEATURES
532 /* Tell the user about it in lots of lovely detail */
533 cfi_tell_features(extp);
536 if(extp->SuspendCmdSupport & 1) {
537 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
540 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
541 /* Apply jedec specific fixups */
542 cfi_fixup(mtd, jedec_fixup_table);
544 /* Apply generic fixups */
545 cfi_fixup(mtd, fixup_table);
547 for (i=0; i< cfi->numchips; i++) {
548 if (cfi->cfiq->WordWriteTimeoutTyp)
549 cfi->chips[i].word_write_time =
550 1<<cfi->cfiq->WordWriteTimeoutTyp;
552 cfi->chips[i].word_write_time = 50000;
554 if (cfi->cfiq->BufWriteTimeoutTyp)
555 cfi->chips[i].buffer_write_time =
556 1<<cfi->cfiq->BufWriteTimeoutTyp;
557 /* No default; if it isn't specified, we won't use it */
559 if (cfi->cfiq->BlockEraseTimeoutTyp)
560 cfi->chips[i].erase_time =
561 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
563 cfi->chips[i].erase_time = 2000000;
565 if (cfi->cfiq->WordWriteTimeoutTyp &&
566 cfi->cfiq->WordWriteTimeoutMax)
567 cfi->chips[i].word_write_time_max =
568 1<<(cfi->cfiq->WordWriteTimeoutTyp +
569 cfi->cfiq->WordWriteTimeoutMax);
571 cfi->chips[i].word_write_time_max = 50000 * 8;
573 if (cfi->cfiq->BufWriteTimeoutTyp &&
574 cfi->cfiq->BufWriteTimeoutMax)
575 cfi->chips[i].buffer_write_time_max =
576 1<<(cfi->cfiq->BufWriteTimeoutTyp +
577 cfi->cfiq->BufWriteTimeoutMax);
579 if (cfi->cfiq->BlockEraseTimeoutTyp &&
580 cfi->cfiq->BlockEraseTimeoutMax)
581 cfi->chips[i].erase_time_max =
582 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
583 cfi->cfiq->BlockEraseTimeoutMax);
585 cfi->chips[i].erase_time_max = 2000000 * 8;
587 cfi->chips[i].ref_point_counter = 0;
588 init_waitqueue_head(&(cfi->chips[i].wq));
591 map->fldrv = &cfi_intelext_chipdrv;
593 return cfi_intelext_setup(mtd);
595 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
596 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
597 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
598 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
599 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
601 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
603 struct map_info *map = mtd->priv;
604 struct cfi_private *cfi = map->fldrv_priv;
605 unsigned long offset = 0;
607 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
609 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
611 mtd->size = devsize * cfi->numchips;
613 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
614 mtd->eraseregions = kcalloc(mtd->numeraseregions,
615 sizeof(struct mtd_erase_region_info),
617 if (!mtd->eraseregions)
620 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
621 unsigned long ernum, ersize;
622 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
623 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
625 if (mtd->erasesize < ersize) {
626 mtd->erasesize = ersize;
628 for (j=0; j<cfi->numchips; j++) {
629 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
630 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
631 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
632 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
633 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
636 offset += (ersize * ernum);
639 if (offset != devsize) {
641 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
645 for (i=0; i<mtd->numeraseregions;i++){
646 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
647 i,(unsigned long long)mtd->eraseregions[i].offset,
648 mtd->eraseregions[i].erasesize,
649 mtd->eraseregions[i].numblocks);
652 #ifdef CONFIG_MTD_OTP
653 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
654 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
655 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
656 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
657 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
658 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
661 /* This function has the potential to distort the reality
662 a bit and therefore should be called last. */
663 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
666 __module_get(THIS_MODULE);
667 register_reboot_notifier(&mtd->reboot_notifier);
671 if (mtd->eraseregions)
672 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
673 for (j=0; j<cfi->numchips; j++)
674 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
675 kfree(mtd->eraseregions);
677 kfree(cfi->cmdset_priv);
681 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
682 struct cfi_private **pcfi)
684 struct map_info *map = mtd->priv;
685 struct cfi_private *cfi = *pcfi;
686 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
689 * Probing of multi-partition flash chips.
691 * To support multiple partitions when available, we simply arrange
692 * for each of them to have their own flchip structure even if they
693 * are on the same physical chip. This means completely recreating
694 * a new cfi_private structure right here which is a blatent code
695 * layering violation, but this is still the least intrusive
696 * arrangement at this point. This can be rearranged in the future
697 * if someone feels motivated enough. --nico
699 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
700 && extp->FeatureSupport & (1 << 9)) {
702 struct cfi_private *newcfi;
704 struct flchip_shared *shared;
705 int numregions, numparts, partshift, numvirtchips, i, j;
707 /* Protection Register info */
708 if (extp->NumProtectionFields)
709 offs = (extp->NumProtectionFields - 1) *
710 sizeof(struct cfi_intelext_otpinfo);
712 /* Burst Read info */
713 offs += extp->extra[offs+1]+2;
715 /* Number of partition regions */
716 numregions = extp->extra[offs];
719 /* skip the sizeof(partregion) field in CFI 1.4 */
720 if (extp->MinorVersion >= '4')
723 /* Number of hardware partitions */
725 for (i = 0; i < numregions; i++) {
726 struct cfi_intelext_regioninfo *rinfo;
727 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
728 numparts += rinfo->NumIdentPartitions;
729 offs += sizeof(*rinfo)
730 + (rinfo->NumBlockTypes - 1) *
731 sizeof(struct cfi_intelext_blockinfo);
737 /* Programming Region info */
738 if (extp->MinorVersion >= '4') {
739 struct cfi_intelext_programming_regioninfo *prinfo;
740 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
741 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
742 mtd->flags &= ~MTD_BIT_WRITEABLE;
743 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
744 map->name, mtd->writesize,
745 cfi->interleave * prinfo->ControlValid,
746 cfi->interleave * prinfo->ControlInvalid);
750 * All functions below currently rely on all chips having
751 * the same geometry so we'll just assume that all hardware
752 * partitions are of the same size too.
754 partshift = cfi->chipshift - __ffs(numparts);
756 if ((1 << partshift) < mtd->erasesize) {
758 "%s: bad number of hw partitions (%d)\n",
763 numvirtchips = cfi->numchips * numparts;
764 newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
768 shared = kmalloc_array(cfi->numchips,
769 sizeof(struct flchip_shared),
775 memcpy(newcfi, cfi, sizeof(struct cfi_private));
776 newcfi->numchips = numvirtchips;
777 newcfi->chipshift = partshift;
779 chip = &newcfi->chips[0];
780 for (i = 0; i < cfi->numchips; i++) {
781 shared[i].writing = shared[i].erasing = NULL;
782 mutex_init(&shared[i].lock);
783 for (j = 0; j < numparts; j++) {
784 *chip = cfi->chips[i];
785 chip->start += j << partshift;
786 chip->priv = &shared[i];
787 /* those should be reset too since
788 they create memory references. */
789 init_waitqueue_head(&chip->wq);
790 mutex_init(&chip->mutex);
795 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
796 "--> %d partitions of %d KiB\n",
797 map->name, cfi->numchips, cfi->interleave,
798 newcfi->numchips, 1<<(newcfi->chipshift-10));
800 map->fldrv_priv = newcfi;
809 * *********** CHIP ACCESS FUNCTIONS ***********
811 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
813 DECLARE_WAITQUEUE(wait, current);
814 struct cfi_private *cfi = map->fldrv_priv;
815 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
816 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
817 unsigned long timeo = jiffies + HZ;
819 /* Prevent setting state FL_SYNCING for chip in suspended state. */
820 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
823 switch (chip->state) {
827 status = map_read(map, adr);
828 if (map_word_andequal(map, status, status_OK, status_OK))
831 /* At this point we're fine with write operations
832 in other partitions as they don't conflict. */
833 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
836 mutex_unlock(&chip->mutex);
838 mutex_lock(&chip->mutex);
839 /* Someone else might have been playing with it. */
850 !(cfip->FeatureSupport & 2) ||
851 !(mode == FL_READY || mode == FL_POINT ||
852 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
855 /* Do not allow suspend iff read/write to EB address */
856 if ((adr & chip->in_progress_block_mask) ==
857 chip->in_progress_block_addr)
860 /* do not suspend small EBs, buggy Micron Chips */
861 if (cfi_is_micron_28F00AP30(cfi, chip) &&
862 (chip->in_progress_block_mask == ~(0x8000-1)))
866 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
868 /* If the flash has finished erasing, then 'erase suspend'
869 * appears to make some (28F320) flash devices switch to
870 * 'read' mode. Make sure that we switch to 'read status'
871 * mode so we get the right data. --rmk
873 map_write(map, CMD(0x70), chip->in_progress_block_addr);
874 chip->oldstate = FL_ERASING;
875 chip->state = FL_ERASE_SUSPENDING;
876 chip->erase_suspended = 1;
878 status = map_read(map, chip->in_progress_block_addr);
879 if (map_word_andequal(map, status, status_OK, status_OK))
882 if (time_after(jiffies, timeo)) {
883 /* Urgh. Resume and pretend we weren't here.
884 * Make sure we're in 'read status' mode if it had finished */
885 put_chip(map, chip, adr);
886 printk(KERN_ERR "%s: Chip not ready after erase "
887 "suspended: status = 0x%lx\n", map->name, status.x[0]);
891 mutex_unlock(&chip->mutex);
893 mutex_lock(&chip->mutex);
894 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
895 So we can just loop here. */
897 chip->state = FL_STATUS;
900 case FL_XIP_WHILE_ERASING:
901 if (mode != FL_READY && mode != FL_POINT &&
902 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
904 chip->oldstate = chip->state;
905 chip->state = FL_READY;
909 /* The machine is rebooting now,so no one can get chip anymore */
912 /* Only if there's no operation suspended... */
913 if (mode == FL_READY && chip->oldstate == FL_READY)
918 set_current_state(TASK_UNINTERRUPTIBLE);
919 add_wait_queue(&chip->wq, &wait);
920 mutex_unlock(&chip->mutex);
922 remove_wait_queue(&chip->wq, &wait);
923 mutex_lock(&chip->mutex);
928 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
931 DECLARE_WAITQUEUE(wait, current);
935 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
936 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
938 * OK. We have possibility for contention on the write/erase
939 * operations which are global to the real chip and not per
940 * partition. So let's fight it over in the partition which
941 * currently has authority on the operation.
943 * The rules are as follows:
945 * - any write operation must own shared->writing.
947 * - any erase operation must own _both_ shared->writing and
950 * - contention arbitration is handled in the owner's context.
952 * The 'shared' struct can be read and/or written only when
955 struct flchip_shared *shared = chip->priv;
956 struct flchip *contender;
957 mutex_lock(&shared->lock);
958 contender = shared->writing;
959 if (contender && contender != chip) {
961 * The engine to perform desired operation on this
962 * partition is already in use by someone else.
963 * Let's fight over it in the context of the chip
964 * currently using it. If it is possible to suspend,
965 * that other partition will do just that, otherwise
966 * it'll happily send us to sleep. In any case, when
967 * get_chip returns success we're clear to go ahead.
969 ret = mutex_trylock(&contender->mutex);
970 mutex_unlock(&shared->lock);
973 mutex_unlock(&chip->mutex);
974 ret = chip_ready(map, contender, contender->start, mode);
975 mutex_lock(&chip->mutex);
977 if (ret == -EAGAIN) {
978 mutex_unlock(&contender->mutex);
982 mutex_unlock(&contender->mutex);
985 mutex_lock(&shared->lock);
987 /* We should not own chip if it is already
988 * in FL_SYNCING state. Put contender and retry. */
989 if (chip->state == FL_SYNCING) {
990 put_chip(map, contender, contender->start);
991 mutex_unlock(&contender->mutex);
994 mutex_unlock(&contender->mutex);
997 /* Check if we already have suspended erase
998 * on this chip. Sleep. */
999 if (mode == FL_ERASING && shared->erasing
1000 && shared->erasing->oldstate == FL_ERASING) {
1001 mutex_unlock(&shared->lock);
1002 set_current_state(TASK_UNINTERRUPTIBLE);
1003 add_wait_queue(&chip->wq, &wait);
1004 mutex_unlock(&chip->mutex);
1006 remove_wait_queue(&chip->wq, &wait);
1007 mutex_lock(&chip->mutex);
1012 shared->writing = chip;
1013 if (mode == FL_ERASING)
1014 shared->erasing = chip;
1015 mutex_unlock(&shared->lock);
1017 ret = chip_ready(map, chip, adr, mode);
1024 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1026 struct cfi_private *cfi = map->fldrv_priv;
1029 struct flchip_shared *shared = chip->priv;
1030 mutex_lock(&shared->lock);
1031 if (shared->writing == chip && chip->oldstate == FL_READY) {
1032 /* We own the ability to write, but we're done */
1033 shared->writing = shared->erasing;
1034 if (shared->writing && shared->writing != chip) {
1035 /* give back ownership to who we loaned it from */
1036 struct flchip *loaner = shared->writing;
1037 mutex_lock(&loaner->mutex);
1038 mutex_unlock(&shared->lock);
1039 mutex_unlock(&chip->mutex);
1040 put_chip(map, loaner, loaner->start);
1041 mutex_lock(&chip->mutex);
1042 mutex_unlock(&loaner->mutex);
1046 shared->erasing = NULL;
1047 shared->writing = NULL;
1048 } else if (shared->erasing == chip && shared->writing != chip) {
1050 * We own the ability to erase without the ability
1051 * to write, which means the erase was suspended
1052 * and some other partition is currently writing.
1053 * Don't let the switch below mess things up since
1054 * we don't have ownership to resume anything.
1056 mutex_unlock(&shared->lock);
1060 mutex_unlock(&shared->lock);
1063 switch(chip->oldstate) {
1065 /* What if one interleaved chip has finished and the
1066 other hasn't? The old code would leave the finished
1067 one in READY mode. That's bad, and caused -EROFS
1068 errors to be returned from do_erase_oneblock because
1069 that's the only bit it checked for at the time.
1070 As the state machine appears to explicitly allow
1071 sending the 0x70 (Read Status) command to an erasing
1072 chip and expecting it to be ignored, that's what we
1074 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1075 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1076 chip->oldstate = FL_READY;
1077 chip->state = FL_ERASING;
1080 case FL_XIP_WHILE_ERASING:
1081 chip->state = chip->oldstate;
1082 chip->oldstate = FL_READY;
1087 case FL_JEDEC_QUERY:
1090 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1095 #ifdef CONFIG_MTD_XIP
1098 * No interrupt what so ever can be serviced while the flash isn't in array
1099 * mode. This is ensured by the xip_disable() and xip_enable() functions
1100 * enclosing any code path where the flash is known not to be in array mode.
1101 * And within a XIP disabled code path, only functions marked with __xipram
1102 * may be called and nothing else (it's a good thing to inspect generated
1103 * assembly to make sure inline functions were actually inlined and that gcc
1104 * didn't emit calls to its own support functions). Also configuring MTD CFI
1105 * support to a single buswidth and a single interleave is also recommended.
1108 static void xip_disable(struct map_info *map, struct flchip *chip,
1111 /* TODO: chips with no XIP use should ignore and return */
1112 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1113 local_irq_disable();
1116 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1119 struct cfi_private *cfi = map->fldrv_priv;
1120 if (chip->state != FL_POINT && chip->state != FL_READY) {
1121 map_write(map, CMD(0xff), adr);
1122 chip->state = FL_READY;
1124 (void) map_read(map, adr);
1130 * When a delay is required for the flash operation to complete, the
1131 * xip_wait_for_operation() function is polling for both the given timeout
1132 * and pending (but still masked) hardware interrupts. Whenever there is an
1133 * interrupt pending then the flash erase or write operation is suspended,
1134 * array mode restored and interrupts unmasked. Task scheduling might also
1135 * happen at that point. The CPU eventually returns from the interrupt or
1136 * the call to schedule() and the suspended flash operation is resumed for
1137 * the remaining of the delay period.
1139 * Warning: this function _will_ fool interrupt latency tracing tools.
1142 static int __xipram xip_wait_for_operation(
1143 struct map_info *map, struct flchip *chip,
1144 unsigned long adr, unsigned int chip_op_time_max)
1146 struct cfi_private *cfi = map->fldrv_priv;
1147 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1148 map_word status, OK = CMD(0x80);
1149 unsigned long usec, suspended, start, done;
1150 flstate_t oldstate, newstate;
1152 start = xip_currtime();
1153 usec = chip_op_time_max;
1160 if (xip_irqpending() && cfip &&
1161 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1162 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1163 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1165 * Let's suspend the erase or write operation when
1166 * supported. Note that we currently don't try to
1167 * suspend interleaved chips if there is already
1168 * another operation suspended (imagine what happens
1169 * when one chip was already done with the current
1170 * operation while another chip suspended it, then
1171 * we resume the whole thing at once). Yes, it
1175 map_write(map, CMD(0xb0), adr);
1176 map_write(map, CMD(0x70), adr);
1177 suspended = xip_currtime();
1179 if (xip_elapsed_since(suspended) > 100000) {
1181 * The chip doesn't want to suspend
1182 * after waiting for 100 msecs.
1183 * This is a critical error but there
1184 * is not much we can do here.
1188 status = map_read(map, adr);
1189 } while (!map_word_andequal(map, status, OK, OK));
1191 /* Suspend succeeded */
1192 oldstate = chip->state;
1193 if (oldstate == FL_ERASING) {
1194 if (!map_word_bitsset(map, status, CMD(0x40)))
1196 newstate = FL_XIP_WHILE_ERASING;
1197 chip->erase_suspended = 1;
1199 if (!map_word_bitsset(map, status, CMD(0x04)))
1201 newstate = FL_XIP_WHILE_WRITING;
1202 chip->write_suspended = 1;
1204 chip->state = newstate;
1205 map_write(map, CMD(0xff), adr);
1206 (void) map_read(map, adr);
1209 mutex_unlock(&chip->mutex);
1214 * We're back. However someone else might have
1215 * decided to go write to the chip if we are in
1216 * a suspended erase state. If so let's wait
1219 mutex_lock(&chip->mutex);
1220 while (chip->state != newstate) {
1221 DECLARE_WAITQUEUE(wait, current);
1222 set_current_state(TASK_UNINTERRUPTIBLE);
1223 add_wait_queue(&chip->wq, &wait);
1224 mutex_unlock(&chip->mutex);
1226 remove_wait_queue(&chip->wq, &wait);
1227 mutex_lock(&chip->mutex);
1229 /* Disallow XIP again */
1230 local_irq_disable();
1232 /* Resume the write or erase operation */
1233 map_write(map, CMD(0xd0), adr);
1234 map_write(map, CMD(0x70), adr);
1235 chip->state = oldstate;
1236 start = xip_currtime();
1237 } else if (usec >= 1000000/HZ) {
1239 * Try to save on CPU power when waiting delay
1240 * is at least a system timer tick period.
1241 * No need to be extremely accurate here.
1245 status = map_read(map, adr);
1246 done = xip_elapsed_since(start);
1247 } while (!map_word_andequal(map, status, OK, OK)
1250 return (done >= usec) ? -ETIME : 0;
1254 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1255 * the flash is actively programming or erasing since we have to poll for
1256 * the operation to complete anyway. We can't do that in a generic way with
1257 * a XIP setup so do it before the actual flash operation in this case
1258 * and stub it out from INVAL_CACHE_AND_WAIT.
1260 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1261 INVALIDATE_CACHED_RANGE(map, from, size)
1263 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1264 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1268 #define xip_disable(map, chip, adr)
1269 #define xip_enable(map, chip, adr)
1270 #define XIP_INVAL_CACHED_RANGE(x...)
1271 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1273 static int inval_cache_and_wait_for_operation(
1274 struct map_info *map, struct flchip *chip,
1275 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1276 unsigned int chip_op_time, unsigned int chip_op_time_max)
1278 struct cfi_private *cfi = map->fldrv_priv;
1279 map_word status, status_OK = CMD(0x80);
1280 int chip_state = chip->state;
1281 unsigned int timeo, sleep_time, reset_timeo;
1283 mutex_unlock(&chip->mutex);
1285 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1286 mutex_lock(&chip->mutex);
1288 timeo = chip_op_time_max;
1291 reset_timeo = timeo;
1292 sleep_time = chip_op_time / 2;
1295 if (chip->state != chip_state) {
1296 /* Someone's suspended the operation: sleep */
1297 DECLARE_WAITQUEUE(wait, current);
1298 set_current_state(TASK_UNINTERRUPTIBLE);
1299 add_wait_queue(&chip->wq, &wait);
1300 mutex_unlock(&chip->mutex);
1302 remove_wait_queue(&chip->wq, &wait);
1303 mutex_lock(&chip->mutex);
1307 status = map_read(map, cmd_adr);
1308 if (map_word_andequal(map, status, status_OK, status_OK))
1311 if (chip->erase_suspended && chip_state == FL_ERASING) {
1312 /* Erase suspend occurred while sleep: reset timeout */
1313 timeo = reset_timeo;
1314 chip->erase_suspended = 0;
1316 if (chip->write_suspended && chip_state == FL_WRITING) {
1317 /* Write suspend occurred while sleep: reset timeout */
1318 timeo = reset_timeo;
1319 chip->write_suspended = 0;
1322 map_write(map, CMD(0x70), cmd_adr);
1323 chip->state = FL_STATUS;
1327 /* OK Still waiting. Drop the lock, wait a while and retry. */
1328 mutex_unlock(&chip->mutex);
1329 if (sleep_time >= 1000000/HZ) {
1331 * Half of the normal delay still remaining
1332 * can be performed with a sleeping delay instead
1335 msleep(sleep_time/1000);
1336 timeo -= sleep_time;
1337 sleep_time = 1000000/HZ;
1343 mutex_lock(&chip->mutex);
1346 /* Done and happy. */
1347 chip->state = FL_STATUS;
1353 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1354 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1357 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1359 unsigned long cmd_addr;
1360 struct cfi_private *cfi = map->fldrv_priv;
1365 /* Ensure cmd read/writes are aligned. */
1366 cmd_addr = adr & ~(map_bankwidth(map)-1);
1368 mutex_lock(&chip->mutex);
1370 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1373 if (chip->state != FL_POINT && chip->state != FL_READY)
1374 map_write(map, CMD(0xff), cmd_addr);
1376 chip->state = FL_POINT;
1377 chip->ref_point_counter++;
1379 mutex_unlock(&chip->mutex);
1384 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1385 size_t *retlen, void **virt, resource_size_t *phys)
1387 struct map_info *map = mtd->priv;
1388 struct cfi_private *cfi = map->fldrv_priv;
1389 unsigned long ofs, last_end = 0;
1396 /* Now lock the chip(s) to POINT state */
1398 /* ofs: offset within the first chip that the first read should start */
1399 chipnum = (from >> cfi->chipshift);
1400 ofs = from - (chipnum << cfi->chipshift);
1402 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1404 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1407 unsigned long thislen;
1409 if (chipnum >= cfi->numchips)
1412 /* We cannot point across chips that are virtually disjoint */
1414 last_end = cfi->chips[chipnum].start;
1415 else if (cfi->chips[chipnum].start != last_end)
1418 if ((len + ofs -1) >> cfi->chipshift)
1419 thislen = (1<<cfi->chipshift) - ofs;
1423 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1431 last_end += 1 << cfi->chipshift;
1437 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1439 struct map_info *map = mtd->priv;
1440 struct cfi_private *cfi = map->fldrv_priv;
1442 int chipnum, err = 0;
1444 /* Now unlock the chip(s) POINT state */
1446 /* ofs: offset within the first chip that the first read should start */
1447 chipnum = (from >> cfi->chipshift);
1448 ofs = from - (chipnum << cfi->chipshift);
1450 while (len && !err) {
1451 unsigned long thislen;
1452 struct flchip *chip;
1454 chip = &cfi->chips[chipnum];
1455 if (chipnum >= cfi->numchips)
1458 if ((len + ofs -1) >> cfi->chipshift)
1459 thislen = (1<<cfi->chipshift) - ofs;
1463 mutex_lock(&chip->mutex);
1464 if (chip->state == FL_POINT) {
1465 chip->ref_point_counter--;
1466 if(chip->ref_point_counter == 0)
1467 chip->state = FL_READY;
1469 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1473 put_chip(map, chip, chip->start);
1474 mutex_unlock(&chip->mutex);
1484 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1486 unsigned long cmd_addr;
1487 struct cfi_private *cfi = map->fldrv_priv;
1492 /* Ensure cmd read/writes are aligned. */
1493 cmd_addr = adr & ~(map_bankwidth(map)-1);
1495 mutex_lock(&chip->mutex);
1496 ret = get_chip(map, chip, cmd_addr, FL_READY);
1498 mutex_unlock(&chip->mutex);
1502 if (chip->state != FL_POINT && chip->state != FL_READY) {
1503 map_write(map, CMD(0xff), cmd_addr);
1505 chip->state = FL_READY;
1508 map_copy_from(map, buf, adr, len);
1510 put_chip(map, chip, cmd_addr);
1512 mutex_unlock(&chip->mutex);
1516 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1518 struct map_info *map = mtd->priv;
1519 struct cfi_private *cfi = map->fldrv_priv;
1524 /* ofs: offset within the first chip that the first read should start */
1525 chipnum = (from >> cfi->chipshift);
1526 ofs = from - (chipnum << cfi->chipshift);
1529 unsigned long thislen;
1531 if (chipnum >= cfi->numchips)
1534 if ((len + ofs -1) >> cfi->chipshift)
1535 thislen = (1<<cfi->chipshift) - ofs;
1539 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1553 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1554 unsigned long adr, map_word datum, int mode)
1556 struct cfi_private *cfi = map->fldrv_priv;
1557 map_word status, write_cmd;
1564 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1567 write_cmd = CMD(0xc0);
1573 mutex_lock(&chip->mutex);
1574 ret = get_chip(map, chip, adr, mode);
1576 mutex_unlock(&chip->mutex);
1580 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1582 xip_disable(map, chip, adr);
1583 map_write(map, write_cmd, adr);
1584 map_write(map, datum, adr);
1587 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1588 adr, map_bankwidth(map),
1589 chip->word_write_time,
1590 chip->word_write_time_max);
1592 xip_enable(map, chip, adr);
1593 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1597 /* check for errors */
1598 status = map_read(map, adr);
1599 if (map_word_bitsset(map, status, CMD(0x1a))) {
1600 unsigned long chipstatus = MERGESTATUS(status);
1603 map_write(map, CMD(0x50), adr);
1604 map_write(map, CMD(0x70), adr);
1605 xip_enable(map, chip, adr);
1607 if (chipstatus & 0x02) {
1609 } else if (chipstatus & 0x08) {
1610 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1613 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1620 xip_enable(map, chip, adr);
1621 out: DISABLE_VPP(map);
1622 put_chip(map, chip, adr);
1623 mutex_unlock(&chip->mutex);
1628 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1630 struct map_info *map = mtd->priv;
1631 struct cfi_private *cfi = map->fldrv_priv;
1636 chipnum = to >> cfi->chipshift;
1637 ofs = to - (chipnum << cfi->chipshift);
1639 /* If it's not bus-aligned, do the first byte write */
1640 if (ofs & (map_bankwidth(map)-1)) {
1641 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1642 int gap = ofs - bus_ofs;
1646 n = min_t(int, len, map_bankwidth(map)-gap);
1647 datum = map_word_ff(map);
1648 datum = map_word_load_partial(map, datum, buf, gap, n);
1650 ret = do_write_oneword(map, &cfi->chips[chipnum],
1651 bus_ofs, datum, FL_WRITING);
1660 if (ofs >> cfi->chipshift) {
1663 if (chipnum == cfi->numchips)
1668 while(len >= map_bankwidth(map)) {
1669 map_word datum = map_word_load(map, buf);
1671 ret = do_write_oneword(map, &cfi->chips[chipnum],
1672 ofs, datum, FL_WRITING);
1676 ofs += map_bankwidth(map);
1677 buf += map_bankwidth(map);
1678 (*retlen) += map_bankwidth(map);
1679 len -= map_bankwidth(map);
1681 if (ofs >> cfi->chipshift) {
1684 if (chipnum == cfi->numchips)
1689 if (len & (map_bankwidth(map)-1)) {
1692 datum = map_word_ff(map);
1693 datum = map_word_load_partial(map, datum, buf, 0, len);
1695 ret = do_write_oneword(map, &cfi->chips[chipnum],
1696 ofs, datum, FL_WRITING);
1707 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1708 unsigned long adr, const struct kvec **pvec,
1709 unsigned long *pvec_seek, int len)
1711 struct cfi_private *cfi = map->fldrv_priv;
1712 map_word status, write_cmd, datum;
1713 unsigned long cmd_adr;
1714 int ret, wbufsize, word_gap, words;
1715 const struct kvec *vec;
1716 unsigned long vec_seek;
1717 unsigned long initial_adr;
1718 int initial_len = len;
1720 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1723 cmd_adr = adr & ~(wbufsize-1);
1725 /* Sharp LH28F640BF chips need the first address for the
1726 * Page Buffer Program command. See Table 5 of
1727 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1728 if (is_LH28F640BF(cfi))
1731 /* Let's determine this according to the interleave only once */
1732 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1734 mutex_lock(&chip->mutex);
1735 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1737 mutex_unlock(&chip->mutex);
1741 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1743 xip_disable(map, chip, cmd_adr);
1745 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1746 [...], the device will not accept any more Write to Buffer commands".
1747 So we must check here and reset those bits if they're set. Otherwise
1748 we're just pissing in the wind */
1749 if (chip->state != FL_STATUS) {
1750 map_write(map, CMD(0x70), cmd_adr);
1751 chip->state = FL_STATUS;
1753 status = map_read(map, cmd_adr);
1754 if (map_word_bitsset(map, status, CMD(0x30))) {
1755 xip_enable(map, chip, cmd_adr);
1756 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1757 xip_disable(map, chip, cmd_adr);
1758 map_write(map, CMD(0x50), cmd_adr);
1759 map_write(map, CMD(0x70), cmd_adr);
1762 chip->state = FL_WRITING_TO_BUFFER;
1763 map_write(map, write_cmd, cmd_adr);
1764 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1766 /* Argh. Not ready for write to buffer */
1767 map_word Xstatus = map_read(map, cmd_adr);
1768 map_write(map, CMD(0x70), cmd_adr);
1769 chip->state = FL_STATUS;
1770 status = map_read(map, cmd_adr);
1771 map_write(map, CMD(0x50), cmd_adr);
1772 map_write(map, CMD(0x70), cmd_adr);
1773 xip_enable(map, chip, cmd_adr);
1774 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1775 map->name, Xstatus.x[0], status.x[0]);
1779 /* Figure out the number of words to write */
1780 word_gap = (-adr & (map_bankwidth(map)-1));
1781 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1785 word_gap = map_bankwidth(map) - word_gap;
1787 datum = map_word_ff(map);
1790 /* Write length of data to come */
1791 map_write(map, CMD(words), cmd_adr );
1795 vec_seek = *pvec_seek;
1797 int n = map_bankwidth(map) - word_gap;
1798 if (n > vec->iov_len - vec_seek)
1799 n = vec->iov_len - vec_seek;
1803 if (!word_gap && len < map_bankwidth(map))
1804 datum = map_word_ff(map);
1806 datum = map_word_load_partial(map, datum,
1807 vec->iov_base + vec_seek,
1812 if (!len || word_gap == map_bankwidth(map)) {
1813 map_write(map, datum, adr);
1814 adr += map_bankwidth(map);
1819 if (vec_seek == vec->iov_len) {
1825 *pvec_seek = vec_seek;
1828 map_write(map, CMD(0xd0), cmd_adr);
1829 chip->state = FL_WRITING;
1831 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1832 initial_adr, initial_len,
1833 chip->buffer_write_time,
1834 chip->buffer_write_time_max);
1836 map_write(map, CMD(0x70), cmd_adr);
1837 chip->state = FL_STATUS;
1838 xip_enable(map, chip, cmd_adr);
1839 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1843 /* check for errors */
1844 status = map_read(map, cmd_adr);
1845 if (map_word_bitsset(map, status, CMD(0x1a))) {
1846 unsigned long chipstatus = MERGESTATUS(status);
1849 map_write(map, CMD(0x50), cmd_adr);
1850 map_write(map, CMD(0x70), cmd_adr);
1851 xip_enable(map, chip, cmd_adr);
1853 if (chipstatus & 0x02) {
1855 } else if (chipstatus & 0x08) {
1856 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1859 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1866 xip_enable(map, chip, cmd_adr);
1867 out: DISABLE_VPP(map);
1868 put_chip(map, chip, cmd_adr);
1869 mutex_unlock(&chip->mutex);
1873 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1874 unsigned long count, loff_t to, size_t *retlen)
1876 struct map_info *map = mtd->priv;
1877 struct cfi_private *cfi = map->fldrv_priv;
1878 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1881 unsigned long ofs, vec_seek, i;
1884 for (i = 0; i < count; i++)
1885 len += vecs[i].iov_len;
1890 chipnum = to >> cfi->chipshift;
1891 ofs = to - (chipnum << cfi->chipshift);
1895 /* We must not cross write block boundaries */
1896 int size = wbufsize - (ofs & (wbufsize-1));
1900 ret = do_write_buffer(map, &cfi->chips[chipnum],
1901 ofs, &vecs, &vec_seek, size);
1909 if (ofs >> cfi->chipshift) {
1912 if (chipnum == cfi->numchips)
1916 /* Be nice and reschedule with the chip in a usable state for other
1925 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1926 size_t len, size_t *retlen, const u_char *buf)
1930 vec.iov_base = (void *) buf;
1933 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1936 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1937 unsigned long adr, int len, void *thunk)
1939 struct cfi_private *cfi = map->fldrv_priv;
1947 mutex_lock(&chip->mutex);
1948 ret = get_chip(map, chip, adr, FL_ERASING);
1950 mutex_unlock(&chip->mutex);
1954 XIP_INVAL_CACHED_RANGE(map, adr, len);
1956 xip_disable(map, chip, adr);
1958 /* Clear the status register first */
1959 map_write(map, CMD(0x50), adr);
1962 map_write(map, CMD(0x20), adr);
1963 map_write(map, CMD(0xD0), adr);
1964 chip->state = FL_ERASING;
1965 chip->erase_suspended = 0;
1966 chip->in_progress_block_addr = adr;
1967 chip->in_progress_block_mask = ~(len - 1);
1969 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1972 chip->erase_time_max);
1974 map_write(map, CMD(0x70), adr);
1975 chip->state = FL_STATUS;
1976 xip_enable(map, chip, adr);
1977 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1981 /* We've broken this before. It doesn't hurt to be safe */
1982 map_write(map, CMD(0x70), adr);
1983 chip->state = FL_STATUS;
1984 status = map_read(map, adr);
1986 /* check for errors */
1987 if (map_word_bitsset(map, status, CMD(0x3a))) {
1988 unsigned long chipstatus = MERGESTATUS(status);
1990 /* Reset the error bits */
1991 map_write(map, CMD(0x50), adr);
1992 map_write(map, CMD(0x70), adr);
1993 xip_enable(map, chip, adr);
1995 if ((chipstatus & 0x30) == 0x30) {
1996 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1998 } else if (chipstatus & 0x02) {
1999 /* Protection bit set */
2001 } else if (chipstatus & 0x8) {
2003 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2005 } else if (chipstatus & 0x20 && retries--) {
2006 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2008 put_chip(map, chip, adr);
2009 mutex_unlock(&chip->mutex);
2012 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2019 xip_enable(map, chip, adr);
2020 out: DISABLE_VPP(map);
2021 put_chip(map, chip, adr);
2022 mutex_unlock(&chip->mutex);
2026 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2028 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2032 static void cfi_intelext_sync (struct mtd_info *mtd)
2034 struct map_info *map = mtd->priv;
2035 struct cfi_private *cfi = map->fldrv_priv;
2037 struct flchip *chip;
2040 for (i=0; !ret && i<cfi->numchips; i++) {
2041 chip = &cfi->chips[i];
2043 mutex_lock(&chip->mutex);
2044 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2047 chip->oldstate = chip->state;
2048 chip->state = FL_SYNCING;
2049 /* No need to wake_up() on this state change -
2050 * as the whole point is that nobody can do anything
2051 * with the chip now anyway.
2054 mutex_unlock(&chip->mutex);
2057 /* Unlock the chips again */
2059 for (i--; i >=0; i--) {
2060 chip = &cfi->chips[i];
2062 mutex_lock(&chip->mutex);
2064 if (chip->state == FL_SYNCING) {
2065 chip->state = chip->oldstate;
2066 chip->oldstate = FL_READY;
2069 mutex_unlock(&chip->mutex);
2073 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2074 struct flchip *chip,
2076 int len, void *thunk)
2078 struct cfi_private *cfi = map->fldrv_priv;
2079 int status, ofs_factor = cfi->interleave * cfi->device_type;
2082 xip_disable(map, chip, adr+(2*ofs_factor));
2083 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2084 chip->state = FL_JEDEC_QUERY;
2085 status = cfi_read_query(map, adr+(2*ofs_factor));
2086 xip_enable(map, chip, 0);
2090 #ifdef DEBUG_LOCK_BITS
2091 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2092 struct flchip *chip,
2094 int len, void *thunk)
2096 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2097 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2102 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2103 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2105 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2106 unsigned long adr, int len, void *thunk)
2108 struct cfi_private *cfi = map->fldrv_priv;
2109 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2115 mutex_lock(&chip->mutex);
2116 ret = get_chip(map, chip, adr, FL_LOCKING);
2118 mutex_unlock(&chip->mutex);
2123 xip_disable(map, chip, adr);
2125 map_write(map, CMD(0x60), adr);
2126 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2127 map_write(map, CMD(0x01), adr);
2128 chip->state = FL_LOCKING;
2129 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2130 map_write(map, CMD(0xD0), adr);
2131 chip->state = FL_UNLOCKING;
2136 * If Instant Individual Block Locking supported then no need
2140 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2141 * lets use a max of 1.5 seconds (1500ms) as timeout.
2143 * See "Clear Block Lock-Bits Time" on page 40 in
2144 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2145 * from February 2003
2147 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2149 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2151 map_write(map, CMD(0x70), adr);
2152 chip->state = FL_STATUS;
2153 xip_enable(map, chip, adr);
2154 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2158 xip_enable(map, chip, adr);
2159 out: DISABLE_VPP(map);
2160 put_chip(map, chip, adr);
2161 mutex_unlock(&chip->mutex);
2165 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2169 #ifdef DEBUG_LOCK_BITS
2170 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2171 __func__, ofs, len);
2172 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2176 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2177 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2179 #ifdef DEBUG_LOCK_BITS
2180 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2182 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2189 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2193 #ifdef DEBUG_LOCK_BITS
2194 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2195 __func__, ofs, len);
2196 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2200 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2201 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2203 #ifdef DEBUG_LOCK_BITS
2204 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2206 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2213 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2216 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2217 ofs, len, NULL) ? 1 : 0;
2220 #ifdef CONFIG_MTD_OTP
2222 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2223 u_long data_offset, u_char *buf, u_int size,
2224 u_long prot_offset, u_int groupno, u_int groupsize);
2227 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2228 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2230 struct cfi_private *cfi = map->fldrv_priv;
2233 mutex_lock(&chip->mutex);
2234 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2236 mutex_unlock(&chip->mutex);
2240 /* let's ensure we're not reading back cached data from array mode */
2241 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2243 xip_disable(map, chip, chip->start);
2244 if (chip->state != FL_JEDEC_QUERY) {
2245 map_write(map, CMD(0x90), chip->start);
2246 chip->state = FL_JEDEC_QUERY;
2248 map_copy_from(map, buf, chip->start + offset, size);
2249 xip_enable(map, chip, chip->start);
2251 /* then ensure we don't keep OTP data in the cache */
2252 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2254 put_chip(map, chip, chip->start);
2255 mutex_unlock(&chip->mutex);
2260 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2261 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2266 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2267 int gap = offset - bus_ofs;
2268 int n = min_t(int, size, map_bankwidth(map)-gap);
2269 map_word datum = map_word_ff(map);
2271 datum = map_word_load_partial(map, datum, buf, gap, n);
2272 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2285 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2286 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2288 struct cfi_private *cfi = map->fldrv_priv;
2291 /* make sure area matches group boundaries */
2295 datum = map_word_ff(map);
2296 datum = map_word_clr(map, datum, CMD(1 << grpno));
2297 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2300 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2301 size_t *retlen, u_char *buf,
2302 otp_op_t action, int user_regs)
2304 struct map_info *map = mtd->priv;
2305 struct cfi_private *cfi = map->fldrv_priv;
2306 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2307 struct flchip *chip;
2308 struct cfi_intelext_otpinfo *otp;
2309 u_long devsize, reg_prot_offset, data_offset;
2310 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2311 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2316 /* Check that we actually have some OTP registers */
2317 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2320 /* we need real chips here not virtual ones */
2321 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2322 chip_step = devsize >> cfi->chipshift;
2325 /* Some chips have OTP located in the _top_ partition only.
2326 For example: Intel 28F256L18T (T means top-parameter device) */
2327 if (cfi->mfr == CFI_MFR_INTEL) {
2332 chip_num = chip_step - 1;
2336 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2337 chip = &cfi->chips[chip_num];
2338 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2340 /* first OTP region */
2342 reg_prot_offset = extp->ProtRegAddr;
2343 reg_fact_groups = 1;
2344 reg_fact_size = 1 << extp->FactProtRegSize;
2345 reg_user_groups = 1;
2346 reg_user_size = 1 << extp->UserProtRegSize;
2349 /* flash geometry fixup */
2350 data_offset = reg_prot_offset + 1;
2351 data_offset *= cfi->interleave * cfi->device_type;
2352 reg_prot_offset *= cfi->interleave * cfi->device_type;
2353 reg_fact_size *= cfi->interleave;
2354 reg_user_size *= cfi->interleave;
2357 groups = reg_user_groups;
2358 groupsize = reg_user_size;
2359 /* skip over factory reg area */
2360 groupno = reg_fact_groups;
2361 data_offset += reg_fact_groups * reg_fact_size;
2363 groups = reg_fact_groups;
2364 groupsize = reg_fact_size;
2368 while (len > 0 && groups > 0) {
2371 * Special case: if action is NULL
2372 * we fill buf with otp_info records.
2374 struct otp_info *otpinfo;
2376 len -= sizeof(struct otp_info);
2379 ret = do_otp_read(map, chip,
2381 (u_char *)&lockword,
2386 otpinfo = (struct otp_info *)buf;
2387 otpinfo->start = from;
2388 otpinfo->length = groupsize;
2390 !map_word_bitsset(map, lockword,
2393 buf += sizeof(*otpinfo);
2394 *retlen += sizeof(*otpinfo);
2395 } else if (from >= groupsize) {
2397 data_offset += groupsize;
2399 int size = groupsize;
2400 data_offset += from;
2405 ret = action(map, chip, data_offset,
2406 buf, size, reg_prot_offset,
2407 groupno, groupsize);
2413 data_offset += size;
2419 /* next OTP region */
2420 if (++field == extp->NumProtectionFields)
2422 reg_prot_offset = otp->ProtRegAddr;
2423 reg_fact_groups = otp->FactGroups;
2424 reg_fact_size = 1 << otp->FactProtRegSize;
2425 reg_user_groups = otp->UserGroups;
2426 reg_user_size = 1 << otp->UserProtRegSize;
2434 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2435 size_t len, size_t *retlen,
2438 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2439 buf, do_otp_read, 0);
2442 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2443 size_t len, size_t *retlen,
2446 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2447 buf, do_otp_read, 1);
2450 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2451 size_t len, size_t *retlen,
2454 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2455 (u_char *)buf, do_otp_write, 1);
2458 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2459 loff_t from, size_t len)
2462 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2463 NULL, do_otp_lock, 1);
2466 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2467 size_t *retlen, struct otp_info *buf)
2470 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2474 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2475 size_t *retlen, struct otp_info *buf)
2477 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2483 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2485 struct mtd_erase_region_info *region;
2486 int block, status, i;
2490 for (i = 0; i < mtd->numeraseregions; i++) {
2491 region = &mtd->eraseregions[i];
2492 if (!region->lockmap)
2495 for (block = 0; block < region->numblocks; block++){
2496 len = region->erasesize;
2497 adr = region->offset + block * len;
2499 status = cfi_varsize_frob(mtd,
2500 do_getlockstatus_oneblock, adr, len, NULL);
2502 set_bit(block, region->lockmap);
2504 clear_bit(block, region->lockmap);
2509 static int cfi_intelext_suspend(struct mtd_info *mtd)
2511 struct map_info *map = mtd->priv;
2512 struct cfi_private *cfi = map->fldrv_priv;
2513 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2515 struct flchip *chip;
2518 if ((mtd->flags & MTD_POWERUP_LOCK)
2519 && extp && (extp->FeatureSupport & (1 << 5)))
2520 cfi_intelext_save_locks(mtd);
2522 for (i=0; !ret && i<cfi->numchips; i++) {
2523 chip = &cfi->chips[i];
2525 mutex_lock(&chip->mutex);
2527 switch (chip->state) {
2531 case FL_JEDEC_QUERY:
2532 if (chip->oldstate == FL_READY) {
2533 /* place the chip in a known state before suspend */
2534 map_write(map, CMD(0xFF), cfi->chips[i].start);
2535 chip->oldstate = chip->state;
2536 chip->state = FL_PM_SUSPENDED;
2537 /* No need to wake_up() on this state change -
2538 * as the whole point is that nobody can do anything
2539 * with the chip now anyway.
2542 /* There seems to be an operation pending. We must wait for it. */
2543 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2548 /* Should we actually wait? Once upon a time these routines weren't
2549 allowed to. Or should we return -EAGAIN, because the upper layers
2550 ought to have already shut down anything which was using the device
2551 anyway? The latter for now. */
2552 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2555 case FL_PM_SUSPENDED:
2558 mutex_unlock(&chip->mutex);
2561 /* Unlock the chips again */
2564 for (i--; i >=0; i--) {
2565 chip = &cfi->chips[i];
2567 mutex_lock(&chip->mutex);
2569 if (chip->state == FL_PM_SUSPENDED) {
2570 /* No need to force it into a known state here,
2571 because we're returning failure, and it didn't
2573 chip->state = chip->oldstate;
2574 chip->oldstate = FL_READY;
2577 mutex_unlock(&chip->mutex);
2584 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2586 struct mtd_erase_region_info *region;
2591 for (i = 0; i < mtd->numeraseregions; i++) {
2592 region = &mtd->eraseregions[i];
2593 if (!region->lockmap)
2596 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2597 len = region->erasesize;
2598 adr = region->offset + block * len;
2599 cfi_intelext_unlock(mtd, adr, len);
2604 static void cfi_intelext_resume(struct mtd_info *mtd)
2606 struct map_info *map = mtd->priv;
2607 struct cfi_private *cfi = map->fldrv_priv;
2608 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2610 struct flchip *chip;
2612 for (i=0; i<cfi->numchips; i++) {
2614 chip = &cfi->chips[i];
2616 mutex_lock(&chip->mutex);
2618 /* Go to known state. Chip may have been power cycled */
2619 if (chip->state == FL_PM_SUSPENDED) {
2620 /* Refresh LH28F640BF Partition Config. Register */
2621 fixup_LH28F640BF(mtd);
2622 map_write(map, CMD(0xFF), cfi->chips[i].start);
2623 chip->oldstate = chip->state = FL_READY;
2627 mutex_unlock(&chip->mutex);
2630 if ((mtd->flags & MTD_POWERUP_LOCK)
2631 && extp && (extp->FeatureSupport & (1 << 5)))
2632 cfi_intelext_restore_locks(mtd);
2635 static int cfi_intelext_reset(struct mtd_info *mtd)
2637 struct map_info *map = mtd->priv;
2638 struct cfi_private *cfi = map->fldrv_priv;
2641 for (i=0; i < cfi->numchips; i++) {
2642 struct flchip *chip = &cfi->chips[i];
2644 /* force the completion of any ongoing operation
2645 and switch to array mode so any bootloader in
2646 flash is accessible for soft reboot. */
2647 mutex_lock(&chip->mutex);
2648 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2650 map_write(map, CMD(0xff), chip->start);
2651 chip->state = FL_SHUTDOWN;
2652 put_chip(map, chip, chip->start);
2654 mutex_unlock(&chip->mutex);
2660 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2663 struct mtd_info *mtd;
2665 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2666 cfi_intelext_reset(mtd);
2670 static void cfi_intelext_destroy(struct mtd_info *mtd)
2672 struct map_info *map = mtd->priv;
2673 struct cfi_private *cfi = map->fldrv_priv;
2674 struct mtd_erase_region_info *region;
2676 cfi_intelext_reset(mtd);
2677 unregister_reboot_notifier(&mtd->reboot_notifier);
2678 kfree(cfi->cmdset_priv);
2680 kfree(cfi->chips[0].priv);
2682 for (i = 0; i < mtd->numeraseregions; i++) {
2683 region = &mtd->eraseregions[i];
2684 kfree(region->lockmap);
2686 kfree(mtd->eraseregions);
2689 MODULE_LICENSE("GPL");
2690 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2691 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2692 MODULE_ALIAS("cfi_cmdset_0003");
2693 MODULE_ALIAS("cfi_cmdset_0200");