mtd: delete non-required instances of include <linux/init.h>
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 /* STMicroelectronics chips */
49 #define M50LPW080       0x002F
50 #define M50FLW080A      0x0080
51 #define M50FLW080B      0x0081
52 /* Atmel chips */
53 #define AT49BV640D      0x02de
54 #define AT49BV640DT     0x02db
55
56 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
60 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
61 static void cfi_intelext_sync (struct mtd_info *);
62 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
63 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
65                                   uint64_t len);
66 #ifdef CONFIG_MTD_OTP
67 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
68 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
71 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
72                                             struct otp_info *, size_t);
73 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
74                                             struct otp_info *, size_t);
75 #endif
76 static int cfi_intelext_suspend (struct mtd_info *);
77 static void cfi_intelext_resume (struct mtd_info *);
78 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
79
80 static void cfi_intelext_destroy(struct mtd_info *);
81
82 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
83
84 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
85 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
86
87 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
88                      size_t *retlen, void **virt, resource_size_t *phys);
89 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
90
91 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
92 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
94 #include "fwh_lock.h"
95
96
97
98 /*
99  *  *********** SETUP AND PROBE BITS  ***********
100  */
101
102 static struct mtd_chip_driver cfi_intelext_chipdrv = {
103         .probe          = NULL, /* Not usable directly */
104         .destroy        = cfi_intelext_destroy,
105         .name           = "cfi_cmdset_0001",
106         .module         = THIS_MODULE
107 };
108
109 /* #define DEBUG_LOCK_BITS */
110 /* #define DEBUG_CFI_FEATURES */
111
112 #ifdef DEBUG_CFI_FEATURES
113 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 {
115         int i;
116         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
117         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
118         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
119         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
120         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
121         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
122         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
123         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
124         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
125         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
126         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
127         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
128         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
129         for (i=11; i<32; i++) {
130                 if (extp->FeatureSupport & (1<<i))
131                         printk("     - Unknown Bit %X:      supported\n", i);
132         }
133
134         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
135         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
136         for (i=1; i<8; i++) {
137                 if (extp->SuspendCmdSupport & (1<<i))
138                         printk("     - Unknown Bit %X:               supported\n", i);
139         }
140
141         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
142         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
143         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
144         for (i=2; i<3; i++) {
145                 if (extp->BlkStatusRegMask & (1<<i))
146                         printk("     - Unknown Bit %X Active: yes\n",i);
147         }
148         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
149         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
150         for (i=6; i<16; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154
155         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
157         if (extp->VppOptimal)
158                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
159                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 }
161 #endif
162
163 /* Atmel chips don't use the same PRI format as Intel chips */
164 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
165 {
166         struct map_info *map = mtd->priv;
167         struct cfi_private *cfi = map->fldrv_priv;
168         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
169         struct cfi_pri_atmel atmel_pri;
170         uint32_t features = 0;
171
172         /* Reverse byteswapping */
173         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
174         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
175         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
176
177         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179
180         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
181
182         if (atmel_pri.Features & 0x01) /* chip erase supported */
183                 features |= (1<<0);
184         if (atmel_pri.Features & 0x02) /* erase suspend supported */
185                 features |= (1<<1);
186         if (atmel_pri.Features & 0x04) /* program suspend supported */
187                 features |= (1<<2);
188         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
189                 features |= (1<<9);
190         if (atmel_pri.Features & 0x20) /* page mode read supported */
191                 features |= (1<<7);
192         if (atmel_pri.Features & 0x40) /* queued erase supported */
193                 features |= (1<<4);
194         if (atmel_pri.Features & 0x80) /* Protection bits supported */
195                 features |= (1<<6);
196
197         extp->FeatureSupport = features;
198
199         /* burst write mode not supported */
200         cfi->cfiq->BufWriteTimeoutTyp = 0;
201         cfi->cfiq->BufWriteTimeoutMax = 0;
202 }
203
204 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
205 {
206         struct map_info *map = mtd->priv;
207         struct cfi_private *cfi = map->fldrv_priv;
208         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
209
210         cfip->FeatureSupport |= (1 << 5);
211         mtd->flags |= MTD_POWERUP_LOCK;
212 }
213
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
216 static void fixup_intel_strataflash(struct mtd_info *mtd)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
221
222         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
223                             "erase on write disabled.\n");
224         extp->SuspendCmdSupport &= ~1;
225 }
226 #endif
227
228 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
229 static void fixup_no_write_suspend(struct mtd_info *mtd)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
234
235         if (cfip && (cfip->FeatureSupport&4)) {
236                 cfip->FeatureSupport &= ~4;
237                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
238         }
239 }
240 #endif
241
242 static void fixup_st_m28w320ct(struct mtd_info *mtd)
243 {
244         struct map_info *map = mtd->priv;
245         struct cfi_private *cfi = map->fldrv_priv;
246
247         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
248         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
249 }
250
251 static void fixup_st_m28w320cb(struct mtd_info *mtd)
252 {
253         struct map_info *map = mtd->priv;
254         struct cfi_private *cfi = map->fldrv_priv;
255
256         /* Note this is done after the region info is endian swapped */
257         cfi->cfiq->EraseRegionInfo[1] =
258                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259 };
260
261 static void fixup_use_point(struct mtd_info *mtd)
262 {
263         struct map_info *map = mtd->priv;
264         if (!mtd->_point && map_is_linear(map)) {
265                 mtd->_point   = cfi_intelext_point;
266                 mtd->_unpoint = cfi_intelext_unpoint;
267         }
268 }
269
270 static void fixup_use_write_buffers(struct mtd_info *mtd)
271 {
272         struct map_info *map = mtd->priv;
273         struct cfi_private *cfi = map->fldrv_priv;
274         if (cfi->cfiq->BufWriteTimeoutTyp) {
275                 printk(KERN_INFO "Using buffer write method\n" );
276                 mtd->_write = cfi_intelext_write_buffers;
277                 mtd->_writev = cfi_intelext_writev;
278         }
279 }
280
281 /*
282  * Some chips power-up with all sectors locked by default.
283  */
284 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
285 {
286         struct map_info *map = mtd->priv;
287         struct cfi_private *cfi = map->fldrv_priv;
288         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
289
290         if (cfip->FeatureSupport&32) {
291                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
292                 mtd->flags |= MTD_POWERUP_LOCK;
293         }
294 }
295
296 static struct cfi_fixup cfi_fixup_table[] = {
297         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
298         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
299         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
300 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
301         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
302 #endif
303 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
304         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
305 #endif
306 #if !FORCE_WORD_WRITE
307         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
308 #endif
309         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
312         { 0, 0, NULL }
313 };
314
315 static struct cfi_fixup jedec_fixup_table[] = {
316         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
317         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
318         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
319         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
320         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
321         { 0, 0, NULL }
322 };
323 static struct cfi_fixup fixup_table[] = {
324         /* The CFI vendor ids and the JEDEC vendor IDs appear
325          * to be common.  It is like the devices id's are as
326          * well.  This table is to pick all cases where
327          * we know that is the case.
328          */
329         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
330         { 0, 0, NULL }
331 };
332
333 static void cfi_fixup_major_minor(struct cfi_private *cfi,
334                                                 struct cfi_pri_intelext *extp)
335 {
336         if (cfi->mfr == CFI_MFR_INTEL &&
337                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
338                 extp->MinorVersion = '1';
339 }
340
341 static inline struct cfi_pri_intelext *
342 read_pri_intelext(struct map_info *map, __u16 adr)
343 {
344         struct cfi_private *cfi = map->fldrv_priv;
345         struct cfi_pri_intelext *extp;
346         unsigned int extra_size = 0;
347         unsigned int extp_size = sizeof(*extp);
348
349  again:
350         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
351         if (!extp)
352                 return NULL;
353
354         cfi_fixup_major_minor(cfi, extp);
355
356         if (extp->MajorVersion != '1' ||
357             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
358                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
359                        "version %c.%c.\n",  extp->MajorVersion,
360                        extp->MinorVersion);
361                 kfree(extp);
362                 return NULL;
363         }
364
365         /* Do some byteswapping if necessary */
366         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
367         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
368         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
369
370         if (extp->MinorVersion >= '0') {
371                 extra_size = 0;
372
373                 /* Protection Register info */
374                 extra_size += (extp->NumProtectionFields - 1) *
375                               sizeof(struct cfi_intelext_otpinfo);
376         }
377
378         if (extp->MinorVersion >= '1') {
379                 /* Burst Read info */
380                 extra_size += 2;
381                 if (extp_size < sizeof(*extp) + extra_size)
382                         goto need_more;
383                 extra_size += extp->extra[extra_size - 1];
384         }
385
386         if (extp->MinorVersion >= '3') {
387                 int nb_parts, i;
388
389                 /* Number of hardware-partitions */
390                 extra_size += 1;
391                 if (extp_size < sizeof(*extp) + extra_size)
392                         goto need_more;
393                 nb_parts = extp->extra[extra_size - 1];
394
395                 /* skip the sizeof(partregion) field in CFI 1.4 */
396                 if (extp->MinorVersion >= '4')
397                         extra_size += 2;
398
399                 for (i = 0; i < nb_parts; i++) {
400                         struct cfi_intelext_regioninfo *rinfo;
401                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
402                         extra_size += sizeof(*rinfo);
403                         if (extp_size < sizeof(*extp) + extra_size)
404                                 goto need_more;
405                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
406                         extra_size += (rinfo->NumBlockTypes - 1)
407                                       * sizeof(struct cfi_intelext_blockinfo);
408                 }
409
410                 if (extp->MinorVersion >= '4')
411                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
412
413                 if (extp_size < sizeof(*extp) + extra_size) {
414                         need_more:
415                         extp_size = sizeof(*extp) + extra_size;
416                         kfree(extp);
417                         if (extp_size > 4096) {
418                                 printk(KERN_ERR
419                                         "%s: cfi_pri_intelext is too fat\n",
420                                         __func__);
421                                 return NULL;
422                         }
423                         goto again;
424                 }
425         }
426
427         return extp;
428 }
429
430 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
431 {
432         struct cfi_private *cfi = map->fldrv_priv;
433         struct mtd_info *mtd;
434         int i;
435
436         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
437         if (!mtd) {
438                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
439                 return NULL;
440         }
441         mtd->priv = map;
442         mtd->type = MTD_NORFLASH;
443
444         /* Fill in the default mtd operations */
445         mtd->_erase   = cfi_intelext_erase_varsize;
446         mtd->_read    = cfi_intelext_read;
447         mtd->_write   = cfi_intelext_write_words;
448         mtd->_sync    = cfi_intelext_sync;
449         mtd->_lock    = cfi_intelext_lock;
450         mtd->_unlock  = cfi_intelext_unlock;
451         mtd->_is_locked = cfi_intelext_is_locked;
452         mtd->_suspend = cfi_intelext_suspend;
453         mtd->_resume  = cfi_intelext_resume;
454         mtd->flags   = MTD_CAP_NORFLASH;
455         mtd->name    = map->name;
456         mtd->writesize = 1;
457         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
458
459         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
460
461         if (cfi->cfi_mode == CFI_MODE_CFI) {
462                 /*
463                  * It's a real CFI chip, not one for which the probe
464                  * routine faked a CFI structure. So we read the feature
465                  * table from it.
466                  */
467                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
468                 struct cfi_pri_intelext *extp;
469
470                 extp = read_pri_intelext(map, adr);
471                 if (!extp) {
472                         kfree(mtd);
473                         return NULL;
474                 }
475
476                 /* Install our own private info structure */
477                 cfi->cmdset_priv = extp;
478
479                 cfi_fixup(mtd, cfi_fixup_table);
480
481 #ifdef DEBUG_CFI_FEATURES
482                 /* Tell the user about it in lots of lovely detail */
483                 cfi_tell_features(extp);
484 #endif
485
486                 if(extp->SuspendCmdSupport & 1) {
487                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
488                 }
489         }
490         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
491                 /* Apply jedec specific fixups */
492                 cfi_fixup(mtd, jedec_fixup_table);
493         }
494         /* Apply generic fixups */
495         cfi_fixup(mtd, fixup_table);
496
497         for (i=0; i< cfi->numchips; i++) {
498                 if (cfi->cfiq->WordWriteTimeoutTyp)
499                         cfi->chips[i].word_write_time =
500                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
501                 else
502                         cfi->chips[i].word_write_time = 50000;
503
504                 if (cfi->cfiq->BufWriteTimeoutTyp)
505                         cfi->chips[i].buffer_write_time =
506                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
507                 /* No default; if it isn't specified, we won't use it */
508
509                 if (cfi->cfiq->BlockEraseTimeoutTyp)
510                         cfi->chips[i].erase_time =
511                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
512                 else
513                         cfi->chips[i].erase_time = 2000000;
514
515                 if (cfi->cfiq->WordWriteTimeoutTyp &&
516                     cfi->cfiq->WordWriteTimeoutMax)
517                         cfi->chips[i].word_write_time_max =
518                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
519                                     cfi->cfiq->WordWriteTimeoutMax);
520                 else
521                         cfi->chips[i].word_write_time_max = 50000 * 8;
522
523                 if (cfi->cfiq->BufWriteTimeoutTyp &&
524                     cfi->cfiq->BufWriteTimeoutMax)
525                         cfi->chips[i].buffer_write_time_max =
526                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
527                                     cfi->cfiq->BufWriteTimeoutMax);
528
529                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
530                     cfi->cfiq->BlockEraseTimeoutMax)
531                         cfi->chips[i].erase_time_max =
532                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
533                                        cfi->cfiq->BlockEraseTimeoutMax);
534                 else
535                         cfi->chips[i].erase_time_max = 2000000 * 8;
536
537                 cfi->chips[i].ref_point_counter = 0;
538                 init_waitqueue_head(&(cfi->chips[i].wq));
539         }
540
541         map->fldrv = &cfi_intelext_chipdrv;
542
543         return cfi_intelext_setup(mtd);
544 }
545 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
546 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
550
551 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
552 {
553         struct map_info *map = mtd->priv;
554         struct cfi_private *cfi = map->fldrv_priv;
555         unsigned long offset = 0;
556         int i,j;
557         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
558
559         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
560
561         mtd->size = devsize * cfi->numchips;
562
563         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
564         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
565                         * mtd->numeraseregions, GFP_KERNEL);
566         if (!mtd->eraseregions) {
567                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
568                 goto setup_err;
569         }
570
571         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
572                 unsigned long ernum, ersize;
573                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
574                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
575
576                 if (mtd->erasesize < ersize) {
577                         mtd->erasesize = ersize;
578                 }
579                 for (j=0; j<cfi->numchips; j++) {
580                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
582                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
583                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
584                 }
585                 offset += (ersize * ernum);
586         }
587
588         if (offset != devsize) {
589                 /* Argh */
590                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
591                 goto setup_err;
592         }
593
594         for (i=0; i<mtd->numeraseregions;i++){
595                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
596                        i,(unsigned long long)mtd->eraseregions[i].offset,
597                        mtd->eraseregions[i].erasesize,
598                        mtd->eraseregions[i].numblocks);
599         }
600
601 #ifdef CONFIG_MTD_OTP
602         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
603         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
604         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
605         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
606         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
607         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
608 #endif
609
610         /* This function has the potential to distort the reality
611            a bit and therefore should be called last. */
612         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
613                 goto setup_err;
614
615         __module_get(THIS_MODULE);
616         register_reboot_notifier(&mtd->reboot_notifier);
617         return mtd;
618
619  setup_err:
620         kfree(mtd->eraseregions);
621         kfree(mtd);
622         kfree(cfi->cmdset_priv);
623         return NULL;
624 }
625
626 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
627                                         struct cfi_private **pcfi)
628 {
629         struct map_info *map = mtd->priv;
630         struct cfi_private *cfi = *pcfi;
631         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
632
633         /*
634          * Probing of multi-partition flash chips.
635          *
636          * To support multiple partitions when available, we simply arrange
637          * for each of them to have their own flchip structure even if they
638          * are on the same physical chip.  This means completely recreating
639          * a new cfi_private structure right here which is a blatent code
640          * layering violation, but this is still the least intrusive
641          * arrangement at this point. This can be rearranged in the future
642          * if someone feels motivated enough.  --nico
643          */
644         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
645             && extp->FeatureSupport & (1 << 9)) {
646                 struct cfi_private *newcfi;
647                 struct flchip *chip;
648                 struct flchip_shared *shared;
649                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
650
651                 /* Protection Register info */
652                 offs = (extp->NumProtectionFields - 1) *
653                        sizeof(struct cfi_intelext_otpinfo);
654
655                 /* Burst Read info */
656                 offs += extp->extra[offs+1]+2;
657
658                 /* Number of partition regions */
659                 numregions = extp->extra[offs];
660                 offs += 1;
661
662                 /* skip the sizeof(partregion) field in CFI 1.4 */
663                 if (extp->MinorVersion >= '4')
664                         offs += 2;
665
666                 /* Number of hardware partitions */
667                 numparts = 0;
668                 for (i = 0; i < numregions; i++) {
669                         struct cfi_intelext_regioninfo *rinfo;
670                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
671                         numparts += rinfo->NumIdentPartitions;
672                         offs += sizeof(*rinfo)
673                                 + (rinfo->NumBlockTypes - 1) *
674                                   sizeof(struct cfi_intelext_blockinfo);
675                 }
676
677                 if (!numparts)
678                         numparts = 1;
679
680                 /* Programming Region info */
681                 if (extp->MinorVersion >= '4') {
682                         struct cfi_intelext_programming_regioninfo *prinfo;
683                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
684                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
685                         mtd->flags &= ~MTD_BIT_WRITEABLE;
686                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
687                                map->name, mtd->writesize,
688                                cfi->interleave * prinfo->ControlValid,
689                                cfi->interleave * prinfo->ControlInvalid);
690                 }
691
692                 /*
693                  * All functions below currently rely on all chips having
694                  * the same geometry so we'll just assume that all hardware
695                  * partitions are of the same size too.
696                  */
697                 partshift = cfi->chipshift - __ffs(numparts);
698
699                 if ((1 << partshift) < mtd->erasesize) {
700                         printk( KERN_ERR
701                                 "%s: bad number of hw partitions (%d)\n",
702                                 __func__, numparts);
703                         return -EINVAL;
704                 }
705
706                 numvirtchips = cfi->numchips * numparts;
707                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
708                 if (!newcfi)
709                         return -ENOMEM;
710                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
711                 if (!shared) {
712                         kfree(newcfi);
713                         return -ENOMEM;
714                 }
715                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
716                 newcfi->numchips = numvirtchips;
717                 newcfi->chipshift = partshift;
718
719                 chip = &newcfi->chips[0];
720                 for (i = 0; i < cfi->numchips; i++) {
721                         shared[i].writing = shared[i].erasing = NULL;
722                         mutex_init(&shared[i].lock);
723                         for (j = 0; j < numparts; j++) {
724                                 *chip = cfi->chips[i];
725                                 chip->start += j << partshift;
726                                 chip->priv = &shared[i];
727                                 /* those should be reset too since
728                                    they create memory references. */
729                                 init_waitqueue_head(&chip->wq);
730                                 mutex_init(&chip->mutex);
731                                 chip++;
732                         }
733                 }
734
735                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
736                                   "--> %d partitions of %d KiB\n",
737                                   map->name, cfi->numchips, cfi->interleave,
738                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
739
740                 map->fldrv_priv = newcfi;
741                 *pcfi = newcfi;
742                 kfree(cfi);
743         }
744
745         return 0;
746 }
747
748 /*
749  *  *********** CHIP ACCESS FUNCTIONS ***********
750  */
751 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
752 {
753         DECLARE_WAITQUEUE(wait, current);
754         struct cfi_private *cfi = map->fldrv_priv;
755         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
756         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
757         unsigned long timeo = jiffies + HZ;
758
759         /* Prevent setting state FL_SYNCING for chip in suspended state. */
760         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
761                 goto sleep;
762
763         switch (chip->state) {
764
765         case FL_STATUS:
766                 for (;;) {
767                         status = map_read(map, adr);
768                         if (map_word_andequal(map, status, status_OK, status_OK))
769                                 break;
770
771                         /* At this point we're fine with write operations
772                            in other partitions as they don't conflict. */
773                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
774                                 break;
775
776                         mutex_unlock(&chip->mutex);
777                         cfi_udelay(1);
778                         mutex_lock(&chip->mutex);
779                         /* Someone else might have been playing with it. */
780                         return -EAGAIN;
781                 }
782                 /* Fall through */
783         case FL_READY:
784         case FL_CFI_QUERY:
785         case FL_JEDEC_QUERY:
786                 return 0;
787
788         case FL_ERASING:
789                 if (!cfip ||
790                     !(cfip->FeatureSupport & 2) ||
791                     !(mode == FL_READY || mode == FL_POINT ||
792                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
793                         goto sleep;
794
795
796                 /* Erase suspend */
797                 map_write(map, CMD(0xB0), adr);
798
799                 /* If the flash has finished erasing, then 'erase suspend'
800                  * appears to make some (28F320) flash devices switch to
801                  * 'read' mode.  Make sure that we switch to 'read status'
802                  * mode so we get the right data. --rmk
803                  */
804                 map_write(map, CMD(0x70), adr);
805                 chip->oldstate = FL_ERASING;
806                 chip->state = FL_ERASE_SUSPENDING;
807                 chip->erase_suspended = 1;
808                 for (;;) {
809                         status = map_read(map, adr);
810                         if (map_word_andequal(map, status, status_OK, status_OK))
811                                 break;
812
813                         if (time_after(jiffies, timeo)) {
814                                 /* Urgh. Resume and pretend we weren't here.
815                                  * Make sure we're in 'read status' mode if it had finished */
816                                 put_chip(map, chip, adr);
817                                 printk(KERN_ERR "%s: Chip not ready after erase "
818                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
819                                 return -EIO;
820                         }
821
822                         mutex_unlock(&chip->mutex);
823                         cfi_udelay(1);
824                         mutex_lock(&chip->mutex);
825                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
826                            So we can just loop here. */
827                 }
828                 chip->state = FL_STATUS;
829                 return 0;
830
831         case FL_XIP_WHILE_ERASING:
832                 if (mode != FL_READY && mode != FL_POINT &&
833                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
834                         goto sleep;
835                 chip->oldstate = chip->state;
836                 chip->state = FL_READY;
837                 return 0;
838
839         case FL_SHUTDOWN:
840                 /* The machine is rebooting now,so no one can get chip anymore */
841                 return -EIO;
842         case FL_POINT:
843                 /* Only if there's no operation suspended... */
844                 if (mode == FL_READY && chip->oldstate == FL_READY)
845                         return 0;
846                 /* Fall through */
847         default:
848         sleep:
849                 set_current_state(TASK_UNINTERRUPTIBLE);
850                 add_wait_queue(&chip->wq, &wait);
851                 mutex_unlock(&chip->mutex);
852                 schedule();
853                 remove_wait_queue(&chip->wq, &wait);
854                 mutex_lock(&chip->mutex);
855                 return -EAGAIN;
856         }
857 }
858
859 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
860 {
861         int ret;
862         DECLARE_WAITQUEUE(wait, current);
863
864  retry:
865         if (chip->priv &&
866             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
867             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
868                 /*
869                  * OK. We have possibility for contention on the write/erase
870                  * operations which are global to the real chip and not per
871                  * partition.  So let's fight it over in the partition which
872                  * currently has authority on the operation.
873                  *
874                  * The rules are as follows:
875                  *
876                  * - any write operation must own shared->writing.
877                  *
878                  * - any erase operation must own _both_ shared->writing and
879                  *   shared->erasing.
880                  *
881                  * - contention arbitration is handled in the owner's context.
882                  *
883                  * The 'shared' struct can be read and/or written only when
884                  * its lock is taken.
885                  */
886                 struct flchip_shared *shared = chip->priv;
887                 struct flchip *contender;
888                 mutex_lock(&shared->lock);
889                 contender = shared->writing;
890                 if (contender && contender != chip) {
891                         /*
892                          * The engine to perform desired operation on this
893                          * partition is already in use by someone else.
894                          * Let's fight over it in the context of the chip
895                          * currently using it.  If it is possible to suspend,
896                          * that other partition will do just that, otherwise
897                          * it'll happily send us to sleep.  In any case, when
898                          * get_chip returns success we're clear to go ahead.
899                          */
900                         ret = mutex_trylock(&contender->mutex);
901                         mutex_unlock(&shared->lock);
902                         if (!ret)
903                                 goto retry;
904                         mutex_unlock(&chip->mutex);
905                         ret = chip_ready(map, contender, contender->start, mode);
906                         mutex_lock(&chip->mutex);
907
908                         if (ret == -EAGAIN) {
909                                 mutex_unlock(&contender->mutex);
910                                 goto retry;
911                         }
912                         if (ret) {
913                                 mutex_unlock(&contender->mutex);
914                                 return ret;
915                         }
916                         mutex_lock(&shared->lock);
917
918                         /* We should not own chip if it is already
919                          * in FL_SYNCING state. Put contender and retry. */
920                         if (chip->state == FL_SYNCING) {
921                                 put_chip(map, contender, contender->start);
922                                 mutex_unlock(&contender->mutex);
923                                 goto retry;
924                         }
925                         mutex_unlock(&contender->mutex);
926                 }
927
928                 /* Check if we already have suspended erase
929                  * on this chip. Sleep. */
930                 if (mode == FL_ERASING && shared->erasing
931                     && shared->erasing->oldstate == FL_ERASING) {
932                         mutex_unlock(&shared->lock);
933                         set_current_state(TASK_UNINTERRUPTIBLE);
934                         add_wait_queue(&chip->wq, &wait);
935                         mutex_unlock(&chip->mutex);
936                         schedule();
937                         remove_wait_queue(&chip->wq, &wait);
938                         mutex_lock(&chip->mutex);
939                         goto retry;
940                 }
941
942                 /* We now own it */
943                 shared->writing = chip;
944                 if (mode == FL_ERASING)
945                         shared->erasing = chip;
946                 mutex_unlock(&shared->lock);
947         }
948         ret = chip_ready(map, chip, adr, mode);
949         if (ret == -EAGAIN)
950                 goto retry;
951
952         return ret;
953 }
954
955 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
956 {
957         struct cfi_private *cfi = map->fldrv_priv;
958
959         if (chip->priv) {
960                 struct flchip_shared *shared = chip->priv;
961                 mutex_lock(&shared->lock);
962                 if (shared->writing == chip && chip->oldstate == FL_READY) {
963                         /* We own the ability to write, but we're done */
964                         shared->writing = shared->erasing;
965                         if (shared->writing && shared->writing != chip) {
966                                 /* give back ownership to who we loaned it from */
967                                 struct flchip *loaner = shared->writing;
968                                 mutex_lock(&loaner->mutex);
969                                 mutex_unlock(&shared->lock);
970                                 mutex_unlock(&chip->mutex);
971                                 put_chip(map, loaner, loaner->start);
972                                 mutex_lock(&chip->mutex);
973                                 mutex_unlock(&loaner->mutex);
974                                 wake_up(&chip->wq);
975                                 return;
976                         }
977                         shared->erasing = NULL;
978                         shared->writing = NULL;
979                 } else if (shared->erasing == chip && shared->writing != chip) {
980                         /*
981                          * We own the ability to erase without the ability
982                          * to write, which means the erase was suspended
983                          * and some other partition is currently writing.
984                          * Don't let the switch below mess things up since
985                          * we don't have ownership to resume anything.
986                          */
987                         mutex_unlock(&shared->lock);
988                         wake_up(&chip->wq);
989                         return;
990                 }
991                 mutex_unlock(&shared->lock);
992         }
993
994         switch(chip->oldstate) {
995         case FL_ERASING:
996                 /* What if one interleaved chip has finished and the
997                    other hasn't? The old code would leave the finished
998                    one in READY mode. That's bad, and caused -EROFS
999                    errors to be returned from do_erase_oneblock because
1000                    that's the only bit it checked for at the time.
1001                    As the state machine appears to explicitly allow
1002                    sending the 0x70 (Read Status) command to an erasing
1003                    chip and expecting it to be ignored, that's what we
1004                    do. */
1005                 map_write(map, CMD(0xd0), adr);
1006                 map_write(map, CMD(0x70), adr);
1007                 chip->oldstate = FL_READY;
1008                 chip->state = FL_ERASING;
1009                 break;
1010
1011         case FL_XIP_WHILE_ERASING:
1012                 chip->state = chip->oldstate;
1013                 chip->oldstate = FL_READY;
1014                 break;
1015
1016         case FL_READY:
1017         case FL_STATUS:
1018         case FL_JEDEC_QUERY:
1019                 break;
1020         default:
1021                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1022         }
1023         wake_up(&chip->wq);
1024 }
1025
1026 #ifdef CONFIG_MTD_XIP
1027
1028 /*
1029  * No interrupt what so ever can be serviced while the flash isn't in array
1030  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1031  * enclosing any code path where the flash is known not to be in array mode.
1032  * And within a XIP disabled code path, only functions marked with __xipram
1033  * may be called and nothing else (it's a good thing to inspect generated
1034  * assembly to make sure inline functions were actually inlined and that gcc
1035  * didn't emit calls to its own support functions). Also configuring MTD CFI
1036  * support to a single buswidth and a single interleave is also recommended.
1037  */
1038
1039 static void xip_disable(struct map_info *map, struct flchip *chip,
1040                         unsigned long adr)
1041 {
1042         /* TODO: chips with no XIP use should ignore and return */
1043         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1044         local_irq_disable();
1045 }
1046
1047 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1048                                 unsigned long adr)
1049 {
1050         struct cfi_private *cfi = map->fldrv_priv;
1051         if (chip->state != FL_POINT && chip->state != FL_READY) {
1052                 map_write(map, CMD(0xff), adr);
1053                 chip->state = FL_READY;
1054         }
1055         (void) map_read(map, adr);
1056         xip_iprefetch();
1057         local_irq_enable();
1058 }
1059
1060 /*
1061  * When a delay is required for the flash operation to complete, the
1062  * xip_wait_for_operation() function is polling for both the given timeout
1063  * and pending (but still masked) hardware interrupts.  Whenever there is an
1064  * interrupt pending then the flash erase or write operation is suspended,
1065  * array mode restored and interrupts unmasked.  Task scheduling might also
1066  * happen at that point.  The CPU eventually returns from the interrupt or
1067  * the call to schedule() and the suspended flash operation is resumed for
1068  * the remaining of the delay period.
1069  *
1070  * Warning: this function _will_ fool interrupt latency tracing tools.
1071  */
1072
1073 static int __xipram xip_wait_for_operation(
1074                 struct map_info *map, struct flchip *chip,
1075                 unsigned long adr, unsigned int chip_op_time_max)
1076 {
1077         struct cfi_private *cfi = map->fldrv_priv;
1078         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1079         map_word status, OK = CMD(0x80);
1080         unsigned long usec, suspended, start, done;
1081         flstate_t oldstate, newstate;
1082
1083         start = xip_currtime();
1084         usec = chip_op_time_max;
1085         if (usec == 0)
1086                 usec = 500000;
1087         done = 0;
1088
1089         do {
1090                 cpu_relax();
1091                 if (xip_irqpending() && cfip &&
1092                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1093                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1094                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1095                         /*
1096                          * Let's suspend the erase or write operation when
1097                          * supported.  Note that we currently don't try to
1098                          * suspend interleaved chips if there is already
1099                          * another operation suspended (imagine what happens
1100                          * when one chip was already done with the current
1101                          * operation while another chip suspended it, then
1102                          * we resume the whole thing at once).  Yes, it
1103                          * can happen!
1104                          */
1105                         usec -= done;
1106                         map_write(map, CMD(0xb0), adr);
1107                         map_write(map, CMD(0x70), adr);
1108                         suspended = xip_currtime();
1109                         do {
1110                                 if (xip_elapsed_since(suspended) > 100000) {
1111                                         /*
1112                                          * The chip doesn't want to suspend
1113                                          * after waiting for 100 msecs.
1114                                          * This is a critical error but there
1115                                          * is not much we can do here.
1116                                          */
1117                                         return -EIO;
1118                                 }
1119                                 status = map_read(map, adr);
1120                         } while (!map_word_andequal(map, status, OK, OK));
1121
1122                         /* Suspend succeeded */
1123                         oldstate = chip->state;
1124                         if (oldstate == FL_ERASING) {
1125                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1126                                         break;
1127                                 newstate = FL_XIP_WHILE_ERASING;
1128                                 chip->erase_suspended = 1;
1129                         } else {
1130                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1131                                         break;
1132                                 newstate = FL_XIP_WHILE_WRITING;
1133                                 chip->write_suspended = 1;
1134                         }
1135                         chip->state = newstate;
1136                         map_write(map, CMD(0xff), adr);
1137                         (void) map_read(map, adr);
1138                         xip_iprefetch();
1139                         local_irq_enable();
1140                         mutex_unlock(&chip->mutex);
1141                         xip_iprefetch();
1142                         cond_resched();
1143
1144                         /*
1145                          * We're back.  However someone else might have
1146                          * decided to go write to the chip if we are in
1147                          * a suspended erase state.  If so let's wait
1148                          * until it's done.
1149                          */
1150                         mutex_lock(&chip->mutex);
1151                         while (chip->state != newstate) {
1152                                 DECLARE_WAITQUEUE(wait, current);
1153                                 set_current_state(TASK_UNINTERRUPTIBLE);
1154                                 add_wait_queue(&chip->wq, &wait);
1155                                 mutex_unlock(&chip->mutex);
1156                                 schedule();
1157                                 remove_wait_queue(&chip->wq, &wait);
1158                                 mutex_lock(&chip->mutex);
1159                         }
1160                         /* Disallow XIP again */
1161                         local_irq_disable();
1162
1163                         /* Resume the write or erase operation */
1164                         map_write(map, CMD(0xd0), adr);
1165                         map_write(map, CMD(0x70), adr);
1166                         chip->state = oldstate;
1167                         start = xip_currtime();
1168                 } else if (usec >= 1000000/HZ) {
1169                         /*
1170                          * Try to save on CPU power when waiting delay
1171                          * is at least a system timer tick period.
1172                          * No need to be extremely accurate here.
1173                          */
1174                         xip_cpu_idle();
1175                 }
1176                 status = map_read(map, adr);
1177                 done = xip_elapsed_since(start);
1178         } while (!map_word_andequal(map, status, OK, OK)
1179                  && done < usec);
1180
1181         return (done >= usec) ? -ETIME : 0;
1182 }
1183
1184 /*
1185  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1186  * the flash is actively programming or erasing since we have to poll for
1187  * the operation to complete anyway.  We can't do that in a generic way with
1188  * a XIP setup so do it before the actual flash operation in this case
1189  * and stub it out from INVAL_CACHE_AND_WAIT.
1190  */
1191 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1192         INVALIDATE_CACHED_RANGE(map, from, size)
1193
1194 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1195         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1196
1197 #else
1198
1199 #define xip_disable(map, chip, adr)
1200 #define xip_enable(map, chip, adr)
1201 #define XIP_INVAL_CACHED_RANGE(x...)
1202 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1203
1204 static int inval_cache_and_wait_for_operation(
1205                 struct map_info *map, struct flchip *chip,
1206                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1207                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1208 {
1209         struct cfi_private *cfi = map->fldrv_priv;
1210         map_word status, status_OK = CMD(0x80);
1211         int chip_state = chip->state;
1212         unsigned int timeo, sleep_time, reset_timeo;
1213
1214         mutex_unlock(&chip->mutex);
1215         if (inval_len)
1216                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1217         mutex_lock(&chip->mutex);
1218
1219         timeo = chip_op_time_max;
1220         if (!timeo)
1221                 timeo = 500000;
1222         reset_timeo = timeo;
1223         sleep_time = chip_op_time / 2;
1224
1225         for (;;) {
1226                 if (chip->state != chip_state) {
1227                         /* Someone's suspended the operation: sleep */
1228                         DECLARE_WAITQUEUE(wait, current);
1229                         set_current_state(TASK_UNINTERRUPTIBLE);
1230                         add_wait_queue(&chip->wq, &wait);
1231                         mutex_unlock(&chip->mutex);
1232                         schedule();
1233                         remove_wait_queue(&chip->wq, &wait);
1234                         mutex_lock(&chip->mutex);
1235                         continue;
1236                 }
1237
1238                 status = map_read(map, cmd_adr);
1239                 if (map_word_andequal(map, status, status_OK, status_OK))
1240                         break;
1241
1242                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1243                         /* Erase suspend occurred while sleep: reset timeout */
1244                         timeo = reset_timeo;
1245                         chip->erase_suspended = 0;
1246                 }
1247                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1248                         /* Write suspend occurred while sleep: reset timeout */
1249                         timeo = reset_timeo;
1250                         chip->write_suspended = 0;
1251                 }
1252                 if (!timeo) {
1253                         map_write(map, CMD(0x70), cmd_adr);
1254                         chip->state = FL_STATUS;
1255                         return -ETIME;
1256                 }
1257
1258                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1259                 mutex_unlock(&chip->mutex);
1260                 if (sleep_time >= 1000000/HZ) {
1261                         /*
1262                          * Half of the normal delay still remaining
1263                          * can be performed with a sleeping delay instead
1264                          * of busy waiting.
1265                          */
1266                         msleep(sleep_time/1000);
1267                         timeo -= sleep_time;
1268                         sleep_time = 1000000/HZ;
1269                 } else {
1270                         udelay(1);
1271                         cond_resched();
1272                         timeo--;
1273                 }
1274                 mutex_lock(&chip->mutex);
1275         }
1276
1277         /* Done and happy. */
1278         chip->state = FL_STATUS;
1279         return 0;
1280 }
1281
1282 #endif
1283
1284 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1285         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1286
1287
1288 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1289 {
1290         unsigned long cmd_addr;
1291         struct cfi_private *cfi = map->fldrv_priv;
1292         int ret = 0;
1293
1294         adr += chip->start;
1295
1296         /* Ensure cmd read/writes are aligned. */
1297         cmd_addr = adr & ~(map_bankwidth(map)-1);
1298
1299         mutex_lock(&chip->mutex);
1300
1301         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1302
1303         if (!ret) {
1304                 if (chip->state != FL_POINT && chip->state != FL_READY)
1305                         map_write(map, CMD(0xff), cmd_addr);
1306
1307                 chip->state = FL_POINT;
1308                 chip->ref_point_counter++;
1309         }
1310         mutex_unlock(&chip->mutex);
1311
1312         return ret;
1313 }
1314
1315 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1316                 size_t *retlen, void **virt, resource_size_t *phys)
1317 {
1318         struct map_info *map = mtd->priv;
1319         struct cfi_private *cfi = map->fldrv_priv;
1320         unsigned long ofs, last_end = 0;
1321         int chipnum;
1322         int ret = 0;
1323
1324         if (!map->virt)
1325                 return -EINVAL;
1326
1327         /* Now lock the chip(s) to POINT state */
1328
1329         /* ofs: offset within the first chip that the first read should start */
1330         chipnum = (from >> cfi->chipshift);
1331         ofs = from - (chipnum << cfi->chipshift);
1332
1333         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1334         if (phys)
1335                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1336
1337         while (len) {
1338                 unsigned long thislen;
1339
1340                 if (chipnum >= cfi->numchips)
1341                         break;
1342
1343                 /* We cannot point across chips that are virtually disjoint */
1344                 if (!last_end)
1345                         last_end = cfi->chips[chipnum].start;
1346                 else if (cfi->chips[chipnum].start != last_end)
1347                         break;
1348
1349                 if ((len + ofs -1) >> cfi->chipshift)
1350                         thislen = (1<<cfi->chipshift) - ofs;
1351                 else
1352                         thislen = len;
1353
1354                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1355                 if (ret)
1356                         break;
1357
1358                 *retlen += thislen;
1359                 len -= thislen;
1360
1361                 ofs = 0;
1362                 last_end += 1 << cfi->chipshift;
1363                 chipnum++;
1364         }
1365         return 0;
1366 }
1367
1368 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1369 {
1370         struct map_info *map = mtd->priv;
1371         struct cfi_private *cfi = map->fldrv_priv;
1372         unsigned long ofs;
1373         int chipnum, err = 0;
1374
1375         /* Now unlock the chip(s) POINT state */
1376
1377         /* ofs: offset within the first chip that the first read should start */
1378         chipnum = (from >> cfi->chipshift);
1379         ofs = from - (chipnum <<  cfi->chipshift);
1380
1381         while (len && !err) {
1382                 unsigned long thislen;
1383                 struct flchip *chip;
1384
1385                 chip = &cfi->chips[chipnum];
1386                 if (chipnum >= cfi->numchips)
1387                         break;
1388
1389                 if ((len + ofs -1) >> cfi->chipshift)
1390                         thislen = (1<<cfi->chipshift) - ofs;
1391                 else
1392                         thislen = len;
1393
1394                 mutex_lock(&chip->mutex);
1395                 if (chip->state == FL_POINT) {
1396                         chip->ref_point_counter--;
1397                         if(chip->ref_point_counter == 0)
1398                                 chip->state = FL_READY;
1399                 } else {
1400                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1401                         err = -EINVAL;
1402                 }
1403
1404                 put_chip(map, chip, chip->start);
1405                 mutex_unlock(&chip->mutex);
1406
1407                 len -= thislen;
1408                 ofs = 0;
1409                 chipnum++;
1410         }
1411
1412         return err;
1413 }
1414
1415 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1416 {
1417         unsigned long cmd_addr;
1418         struct cfi_private *cfi = map->fldrv_priv;
1419         int ret;
1420
1421         adr += chip->start;
1422
1423         /* Ensure cmd read/writes are aligned. */
1424         cmd_addr = adr & ~(map_bankwidth(map)-1);
1425
1426         mutex_lock(&chip->mutex);
1427         ret = get_chip(map, chip, cmd_addr, FL_READY);
1428         if (ret) {
1429                 mutex_unlock(&chip->mutex);
1430                 return ret;
1431         }
1432
1433         if (chip->state != FL_POINT && chip->state != FL_READY) {
1434                 map_write(map, CMD(0xff), cmd_addr);
1435
1436                 chip->state = FL_READY;
1437         }
1438
1439         map_copy_from(map, buf, adr, len);
1440
1441         put_chip(map, chip, cmd_addr);
1442
1443         mutex_unlock(&chip->mutex);
1444         return 0;
1445 }
1446
1447 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1448 {
1449         struct map_info *map = mtd->priv;
1450         struct cfi_private *cfi = map->fldrv_priv;
1451         unsigned long ofs;
1452         int chipnum;
1453         int ret = 0;
1454
1455         /* ofs: offset within the first chip that the first read should start */
1456         chipnum = (from >> cfi->chipshift);
1457         ofs = from - (chipnum <<  cfi->chipshift);
1458
1459         while (len) {
1460                 unsigned long thislen;
1461
1462                 if (chipnum >= cfi->numchips)
1463                         break;
1464
1465                 if ((len + ofs -1) >> cfi->chipshift)
1466                         thislen = (1<<cfi->chipshift) - ofs;
1467                 else
1468                         thislen = len;
1469
1470                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1471                 if (ret)
1472                         break;
1473
1474                 *retlen += thislen;
1475                 len -= thislen;
1476                 buf += thislen;
1477
1478                 ofs = 0;
1479                 chipnum++;
1480         }
1481         return ret;
1482 }
1483
1484 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1485                                      unsigned long adr, map_word datum, int mode)
1486 {
1487         struct cfi_private *cfi = map->fldrv_priv;
1488         map_word status, write_cmd;
1489         int ret=0;
1490
1491         adr += chip->start;
1492
1493         switch (mode) {
1494         case FL_WRITING:
1495                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1496                 break;
1497         case FL_OTP_WRITE:
1498                 write_cmd = CMD(0xc0);
1499                 break;
1500         default:
1501                 return -EINVAL;
1502         }
1503
1504         mutex_lock(&chip->mutex);
1505         ret = get_chip(map, chip, adr, mode);
1506         if (ret) {
1507                 mutex_unlock(&chip->mutex);
1508                 return ret;
1509         }
1510
1511         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1512         ENABLE_VPP(map);
1513         xip_disable(map, chip, adr);
1514         map_write(map, write_cmd, adr);
1515         map_write(map, datum, adr);
1516         chip->state = mode;
1517
1518         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1519                                    adr, map_bankwidth(map),
1520                                    chip->word_write_time,
1521                                    chip->word_write_time_max);
1522         if (ret) {
1523                 xip_enable(map, chip, adr);
1524                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1525                 goto out;
1526         }
1527
1528         /* check for errors */
1529         status = map_read(map, adr);
1530         if (map_word_bitsset(map, status, CMD(0x1a))) {
1531                 unsigned long chipstatus = MERGESTATUS(status);
1532
1533                 /* reset status */
1534                 map_write(map, CMD(0x50), adr);
1535                 map_write(map, CMD(0x70), adr);
1536                 xip_enable(map, chip, adr);
1537
1538                 if (chipstatus & 0x02) {
1539                         ret = -EROFS;
1540                 } else if (chipstatus & 0x08) {
1541                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1542                         ret = -EIO;
1543                 } else {
1544                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1545                         ret = -EINVAL;
1546                 }
1547
1548                 goto out;
1549         }
1550
1551         xip_enable(map, chip, adr);
1552  out:   DISABLE_VPP(map);
1553         put_chip(map, chip, adr);
1554         mutex_unlock(&chip->mutex);
1555         return ret;
1556 }
1557
1558
1559 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1560 {
1561         struct map_info *map = mtd->priv;
1562         struct cfi_private *cfi = map->fldrv_priv;
1563         int ret = 0;
1564         int chipnum;
1565         unsigned long ofs;
1566
1567         chipnum = to >> cfi->chipshift;
1568         ofs = to  - (chipnum << cfi->chipshift);
1569
1570         /* If it's not bus-aligned, do the first byte write */
1571         if (ofs & (map_bankwidth(map)-1)) {
1572                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1573                 int gap = ofs - bus_ofs;
1574                 int n;
1575                 map_word datum;
1576
1577                 n = min_t(int, len, map_bankwidth(map)-gap);
1578                 datum = map_word_ff(map);
1579                 datum = map_word_load_partial(map, datum, buf, gap, n);
1580
1581                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1582                                                bus_ofs, datum, FL_WRITING);
1583                 if (ret)
1584                         return ret;
1585
1586                 len -= n;
1587                 ofs += n;
1588                 buf += n;
1589                 (*retlen) += n;
1590
1591                 if (ofs >> cfi->chipshift) {
1592                         chipnum ++;
1593                         ofs = 0;
1594                         if (chipnum == cfi->numchips)
1595                                 return 0;
1596                 }
1597         }
1598
1599         while(len >= map_bankwidth(map)) {
1600                 map_word datum = map_word_load(map, buf);
1601
1602                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1603                                        ofs, datum, FL_WRITING);
1604                 if (ret)
1605                         return ret;
1606
1607                 ofs += map_bankwidth(map);
1608                 buf += map_bankwidth(map);
1609                 (*retlen) += map_bankwidth(map);
1610                 len -= map_bankwidth(map);
1611
1612                 if (ofs >> cfi->chipshift) {
1613                         chipnum ++;
1614                         ofs = 0;
1615                         if (chipnum == cfi->numchips)
1616                                 return 0;
1617                 }
1618         }
1619
1620         if (len & (map_bankwidth(map)-1)) {
1621                 map_word datum;
1622
1623                 datum = map_word_ff(map);
1624                 datum = map_word_load_partial(map, datum, buf, 0, len);
1625
1626                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1627                                        ofs, datum, FL_WRITING);
1628                 if (ret)
1629                         return ret;
1630
1631                 (*retlen) += len;
1632         }
1633
1634         return 0;
1635 }
1636
1637
1638 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1639                                     unsigned long adr, const struct kvec **pvec,
1640                                     unsigned long *pvec_seek, int len)
1641 {
1642         struct cfi_private *cfi = map->fldrv_priv;
1643         map_word status, write_cmd, datum;
1644         unsigned long cmd_adr;
1645         int ret, wbufsize, word_gap, words;
1646         const struct kvec *vec;
1647         unsigned long vec_seek;
1648         unsigned long initial_adr;
1649         int initial_len = len;
1650
1651         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1652         adr += chip->start;
1653         initial_adr = adr;
1654         cmd_adr = adr & ~(wbufsize-1);
1655
1656         /* Let's determine this according to the interleave only once */
1657         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1658
1659         mutex_lock(&chip->mutex);
1660         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1661         if (ret) {
1662                 mutex_unlock(&chip->mutex);
1663                 return ret;
1664         }
1665
1666         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1667         ENABLE_VPP(map);
1668         xip_disable(map, chip, cmd_adr);
1669
1670         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1671            [...], the device will not accept any more Write to Buffer commands".
1672            So we must check here and reset those bits if they're set. Otherwise
1673            we're just pissing in the wind */
1674         if (chip->state != FL_STATUS) {
1675                 map_write(map, CMD(0x70), cmd_adr);
1676                 chip->state = FL_STATUS;
1677         }
1678         status = map_read(map, cmd_adr);
1679         if (map_word_bitsset(map, status, CMD(0x30))) {
1680                 xip_enable(map, chip, cmd_adr);
1681                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1682                 xip_disable(map, chip, cmd_adr);
1683                 map_write(map, CMD(0x50), cmd_adr);
1684                 map_write(map, CMD(0x70), cmd_adr);
1685         }
1686
1687         chip->state = FL_WRITING_TO_BUFFER;
1688         map_write(map, write_cmd, cmd_adr);
1689         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1690         if (ret) {
1691                 /* Argh. Not ready for write to buffer */
1692                 map_word Xstatus = map_read(map, cmd_adr);
1693                 map_write(map, CMD(0x70), cmd_adr);
1694                 chip->state = FL_STATUS;
1695                 status = map_read(map, cmd_adr);
1696                 map_write(map, CMD(0x50), cmd_adr);
1697                 map_write(map, CMD(0x70), cmd_adr);
1698                 xip_enable(map, chip, cmd_adr);
1699                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1700                                 map->name, Xstatus.x[0], status.x[0]);
1701                 goto out;
1702         }
1703
1704         /* Figure out the number of words to write */
1705         word_gap = (-adr & (map_bankwidth(map)-1));
1706         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1707         if (!word_gap) {
1708                 words--;
1709         } else {
1710                 word_gap = map_bankwidth(map) - word_gap;
1711                 adr -= word_gap;
1712                 datum = map_word_ff(map);
1713         }
1714
1715         /* Write length of data to come */
1716         map_write(map, CMD(words), cmd_adr );
1717
1718         /* Write data */
1719         vec = *pvec;
1720         vec_seek = *pvec_seek;
1721         do {
1722                 int n = map_bankwidth(map) - word_gap;
1723                 if (n > vec->iov_len - vec_seek)
1724                         n = vec->iov_len - vec_seek;
1725                 if (n > len)
1726                         n = len;
1727
1728                 if (!word_gap && len < map_bankwidth(map))
1729                         datum = map_word_ff(map);
1730
1731                 datum = map_word_load_partial(map, datum,
1732                                               vec->iov_base + vec_seek,
1733                                               word_gap, n);
1734
1735                 len -= n;
1736                 word_gap += n;
1737                 if (!len || word_gap == map_bankwidth(map)) {
1738                         map_write(map, datum, adr);
1739                         adr += map_bankwidth(map);
1740                         word_gap = 0;
1741                 }
1742
1743                 vec_seek += n;
1744                 if (vec_seek == vec->iov_len) {
1745                         vec++;
1746                         vec_seek = 0;
1747                 }
1748         } while (len);
1749         *pvec = vec;
1750         *pvec_seek = vec_seek;
1751
1752         /* GO GO GO */
1753         map_write(map, CMD(0xd0), cmd_adr);
1754         chip->state = FL_WRITING;
1755
1756         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1757                                    initial_adr, initial_len,
1758                                    chip->buffer_write_time,
1759                                    chip->buffer_write_time_max);
1760         if (ret) {
1761                 map_write(map, CMD(0x70), cmd_adr);
1762                 chip->state = FL_STATUS;
1763                 xip_enable(map, chip, cmd_adr);
1764                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1765                 goto out;
1766         }
1767
1768         /* check for errors */
1769         status = map_read(map, cmd_adr);
1770         if (map_word_bitsset(map, status, CMD(0x1a))) {
1771                 unsigned long chipstatus = MERGESTATUS(status);
1772
1773                 /* reset status */
1774                 map_write(map, CMD(0x50), cmd_adr);
1775                 map_write(map, CMD(0x70), cmd_adr);
1776                 xip_enable(map, chip, cmd_adr);
1777
1778                 if (chipstatus & 0x02) {
1779                         ret = -EROFS;
1780                 } else if (chipstatus & 0x08) {
1781                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1782                         ret = -EIO;
1783                 } else {
1784                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1785                         ret = -EINVAL;
1786                 }
1787
1788                 goto out;
1789         }
1790
1791         xip_enable(map, chip, cmd_adr);
1792  out:   DISABLE_VPP(map);
1793         put_chip(map, chip, cmd_adr);
1794         mutex_unlock(&chip->mutex);
1795         return ret;
1796 }
1797
1798 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1799                                 unsigned long count, loff_t to, size_t *retlen)
1800 {
1801         struct map_info *map = mtd->priv;
1802         struct cfi_private *cfi = map->fldrv_priv;
1803         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1804         int ret = 0;
1805         int chipnum;
1806         unsigned long ofs, vec_seek, i;
1807         size_t len = 0;
1808
1809         for (i = 0; i < count; i++)
1810                 len += vecs[i].iov_len;
1811
1812         if (!len)
1813                 return 0;
1814
1815         chipnum = to >> cfi->chipshift;
1816         ofs = to - (chipnum << cfi->chipshift);
1817         vec_seek = 0;
1818
1819         do {
1820                 /* We must not cross write block boundaries */
1821                 int size = wbufsize - (ofs & (wbufsize-1));
1822
1823                 if (size > len)
1824                         size = len;
1825                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1826                                       ofs, &vecs, &vec_seek, size);
1827                 if (ret)
1828                         return ret;
1829
1830                 ofs += size;
1831                 (*retlen) += size;
1832                 len -= size;
1833
1834                 if (ofs >> cfi->chipshift) {
1835                         chipnum ++;
1836                         ofs = 0;
1837                         if (chipnum == cfi->numchips)
1838                                 return 0;
1839                 }
1840
1841                 /* Be nice and reschedule with the chip in a usable state for other
1842                    processes. */
1843                 cond_resched();
1844
1845         } while (len);
1846
1847         return 0;
1848 }
1849
1850 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1851                                        size_t len, size_t *retlen, const u_char *buf)
1852 {
1853         struct kvec vec;
1854
1855         vec.iov_base = (void *) buf;
1856         vec.iov_len = len;
1857
1858         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1859 }
1860
1861 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1862                                       unsigned long adr, int len, void *thunk)
1863 {
1864         struct cfi_private *cfi = map->fldrv_priv;
1865         map_word status;
1866         int retries = 3;
1867         int ret;
1868
1869         adr += chip->start;
1870
1871  retry:
1872         mutex_lock(&chip->mutex);
1873         ret = get_chip(map, chip, adr, FL_ERASING);
1874         if (ret) {
1875                 mutex_unlock(&chip->mutex);
1876                 return ret;
1877         }
1878
1879         XIP_INVAL_CACHED_RANGE(map, adr, len);
1880         ENABLE_VPP(map);
1881         xip_disable(map, chip, adr);
1882
1883         /* Clear the status register first */
1884         map_write(map, CMD(0x50), adr);
1885
1886         /* Now erase */
1887         map_write(map, CMD(0x20), adr);
1888         map_write(map, CMD(0xD0), adr);
1889         chip->state = FL_ERASING;
1890         chip->erase_suspended = 0;
1891
1892         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1893                                    adr, len,
1894                                    chip->erase_time,
1895                                    chip->erase_time_max);
1896         if (ret) {
1897                 map_write(map, CMD(0x70), adr);
1898                 chip->state = FL_STATUS;
1899                 xip_enable(map, chip, adr);
1900                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1901                 goto out;
1902         }
1903
1904         /* We've broken this before. It doesn't hurt to be safe */
1905         map_write(map, CMD(0x70), adr);
1906         chip->state = FL_STATUS;
1907         status = map_read(map, adr);
1908
1909         /* check for errors */
1910         if (map_word_bitsset(map, status, CMD(0x3a))) {
1911                 unsigned long chipstatus = MERGESTATUS(status);
1912
1913                 /* Reset the error bits */
1914                 map_write(map, CMD(0x50), adr);
1915                 map_write(map, CMD(0x70), adr);
1916                 xip_enable(map, chip, adr);
1917
1918                 if ((chipstatus & 0x30) == 0x30) {
1919                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1920                         ret = -EINVAL;
1921                 } else if (chipstatus & 0x02) {
1922                         /* Protection bit set */
1923                         ret = -EROFS;
1924                 } else if (chipstatus & 0x8) {
1925                         /* Voltage */
1926                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1927                         ret = -EIO;
1928                 } else if (chipstatus & 0x20 && retries--) {
1929                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1930                         DISABLE_VPP(map);
1931                         put_chip(map, chip, adr);
1932                         mutex_unlock(&chip->mutex);
1933                         goto retry;
1934                 } else {
1935                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1936                         ret = -EIO;
1937                 }
1938
1939                 goto out;
1940         }
1941
1942         xip_enable(map, chip, adr);
1943  out:   DISABLE_VPP(map);
1944         put_chip(map, chip, adr);
1945         mutex_unlock(&chip->mutex);
1946         return ret;
1947 }
1948
1949 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1950 {
1951         unsigned long ofs, len;
1952         int ret;
1953
1954         ofs = instr->addr;
1955         len = instr->len;
1956
1957         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1958         if (ret)
1959                 return ret;
1960
1961         instr->state = MTD_ERASE_DONE;
1962         mtd_erase_callback(instr);
1963
1964         return 0;
1965 }
1966
1967 static void cfi_intelext_sync (struct mtd_info *mtd)
1968 {
1969         struct map_info *map = mtd->priv;
1970         struct cfi_private *cfi = map->fldrv_priv;
1971         int i;
1972         struct flchip *chip;
1973         int ret = 0;
1974
1975         for (i=0; !ret && i<cfi->numchips; i++) {
1976                 chip = &cfi->chips[i];
1977
1978                 mutex_lock(&chip->mutex);
1979                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1980
1981                 if (!ret) {
1982                         chip->oldstate = chip->state;
1983                         chip->state = FL_SYNCING;
1984                         /* No need to wake_up() on this state change -
1985                          * as the whole point is that nobody can do anything
1986                          * with the chip now anyway.
1987                          */
1988                 }
1989                 mutex_unlock(&chip->mutex);
1990         }
1991
1992         /* Unlock the chips again */
1993
1994         for (i--; i >=0; i--) {
1995                 chip = &cfi->chips[i];
1996
1997                 mutex_lock(&chip->mutex);
1998
1999                 if (chip->state == FL_SYNCING) {
2000                         chip->state = chip->oldstate;
2001                         chip->oldstate = FL_READY;
2002                         wake_up(&chip->wq);
2003                 }
2004                 mutex_unlock(&chip->mutex);
2005         }
2006 }
2007
2008 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2009                                                 struct flchip *chip,
2010                                                 unsigned long adr,
2011                                                 int len, void *thunk)
2012 {
2013         struct cfi_private *cfi = map->fldrv_priv;
2014         int status, ofs_factor = cfi->interleave * cfi->device_type;
2015
2016         adr += chip->start;
2017         xip_disable(map, chip, adr+(2*ofs_factor));
2018         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2019         chip->state = FL_JEDEC_QUERY;
2020         status = cfi_read_query(map, adr+(2*ofs_factor));
2021         xip_enable(map, chip, 0);
2022         return status;
2023 }
2024
2025 #ifdef DEBUG_LOCK_BITS
2026 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2027                                                 struct flchip *chip,
2028                                                 unsigned long adr,
2029                                                 int len, void *thunk)
2030 {
2031         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2032                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2033         return 0;
2034 }
2035 #endif
2036
2037 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2038 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2039
2040 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2041                                        unsigned long adr, int len, void *thunk)
2042 {
2043         struct cfi_private *cfi = map->fldrv_priv;
2044         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2045         int mdelay;
2046         int ret;
2047
2048         adr += chip->start;
2049
2050         mutex_lock(&chip->mutex);
2051         ret = get_chip(map, chip, adr, FL_LOCKING);
2052         if (ret) {
2053                 mutex_unlock(&chip->mutex);
2054                 return ret;
2055         }
2056
2057         ENABLE_VPP(map);
2058         xip_disable(map, chip, adr);
2059
2060         map_write(map, CMD(0x60), adr);
2061         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2062                 map_write(map, CMD(0x01), adr);
2063                 chip->state = FL_LOCKING;
2064         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2065                 map_write(map, CMD(0xD0), adr);
2066                 chip->state = FL_UNLOCKING;
2067         } else
2068                 BUG();
2069
2070         /*
2071          * If Instant Individual Block Locking supported then no need
2072          * to delay.
2073          */
2074         /*
2075          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2076          * lets use a max of 1.5 seconds (1500ms) as timeout.
2077          *
2078          * See "Clear Block Lock-Bits Time" on page 40 in
2079          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2080          * from February 2003
2081          */
2082         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2083
2084         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2085         if (ret) {
2086                 map_write(map, CMD(0x70), adr);
2087                 chip->state = FL_STATUS;
2088                 xip_enable(map, chip, adr);
2089                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2090                 goto out;
2091         }
2092
2093         xip_enable(map, chip, adr);
2094  out:   DISABLE_VPP(map);
2095         put_chip(map, chip, adr);
2096         mutex_unlock(&chip->mutex);
2097         return ret;
2098 }
2099
2100 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2101 {
2102         int ret;
2103
2104 #ifdef DEBUG_LOCK_BITS
2105         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2106                __func__, ofs, len);
2107         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2108                 ofs, len, NULL);
2109 #endif
2110
2111         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2112                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2113
2114 #ifdef DEBUG_LOCK_BITS
2115         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2116                __func__, ret);
2117         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2118                 ofs, len, NULL);
2119 #endif
2120
2121         return ret;
2122 }
2123
2124 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2125 {
2126         int ret;
2127
2128 #ifdef DEBUG_LOCK_BITS
2129         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2130                __func__, ofs, len);
2131         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2132                 ofs, len, NULL);
2133 #endif
2134
2135         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2136                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2137
2138 #ifdef DEBUG_LOCK_BITS
2139         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2140                __func__, ret);
2141         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2142                 ofs, len, NULL);
2143 #endif
2144
2145         return ret;
2146 }
2147
2148 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2149                                   uint64_t len)
2150 {
2151         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2152                                 ofs, len, NULL) ? 1 : 0;
2153 }
2154
2155 #ifdef CONFIG_MTD_OTP
2156
2157 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2158                         u_long data_offset, u_char *buf, u_int size,
2159                         u_long prot_offset, u_int groupno, u_int groupsize);
2160
2161 static int __xipram
2162 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2163             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2164 {
2165         struct cfi_private *cfi = map->fldrv_priv;
2166         int ret;
2167
2168         mutex_lock(&chip->mutex);
2169         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2170         if (ret) {
2171                 mutex_unlock(&chip->mutex);
2172                 return ret;
2173         }
2174
2175         /* let's ensure we're not reading back cached data from array mode */
2176         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2177
2178         xip_disable(map, chip, chip->start);
2179         if (chip->state != FL_JEDEC_QUERY) {
2180                 map_write(map, CMD(0x90), chip->start);
2181                 chip->state = FL_JEDEC_QUERY;
2182         }
2183         map_copy_from(map, buf, chip->start + offset, size);
2184         xip_enable(map, chip, chip->start);
2185
2186         /* then ensure we don't keep OTP data in the cache */
2187         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2188
2189         put_chip(map, chip, chip->start);
2190         mutex_unlock(&chip->mutex);
2191         return 0;
2192 }
2193
2194 static int
2195 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2196              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2197 {
2198         int ret;
2199
2200         while (size) {
2201                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2202                 int gap = offset - bus_ofs;
2203                 int n = min_t(int, size, map_bankwidth(map)-gap);
2204                 map_word datum = map_word_ff(map);
2205
2206                 datum = map_word_load_partial(map, datum, buf, gap, n);
2207                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2208                 if (ret)
2209                         return ret;
2210
2211                 offset += n;
2212                 buf += n;
2213                 size -= n;
2214         }
2215
2216         return 0;
2217 }
2218
2219 static int
2220 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2221             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2222 {
2223         struct cfi_private *cfi = map->fldrv_priv;
2224         map_word datum;
2225
2226         /* make sure area matches group boundaries */
2227         if (size != grpsz)
2228                 return -EXDEV;
2229
2230         datum = map_word_ff(map);
2231         datum = map_word_clr(map, datum, CMD(1 << grpno));
2232         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2233 }
2234
2235 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2236                                  size_t *retlen, u_char *buf,
2237                                  otp_op_t action, int user_regs)
2238 {
2239         struct map_info *map = mtd->priv;
2240         struct cfi_private *cfi = map->fldrv_priv;
2241         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2242         struct flchip *chip;
2243         struct cfi_intelext_otpinfo *otp;
2244         u_long devsize, reg_prot_offset, data_offset;
2245         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2246         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2247         int ret;
2248
2249         *retlen = 0;
2250
2251         /* Check that we actually have some OTP registers */
2252         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2253                 return -ENODATA;
2254
2255         /* we need real chips here not virtual ones */
2256         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2257         chip_step = devsize >> cfi->chipshift;
2258         chip_num = 0;
2259
2260         /* Some chips have OTP located in the _top_ partition only.
2261            For example: Intel 28F256L18T (T means top-parameter device) */
2262         if (cfi->mfr == CFI_MFR_INTEL) {
2263                 switch (cfi->id) {
2264                 case 0x880b:
2265                 case 0x880c:
2266                 case 0x880d:
2267                         chip_num = chip_step - 1;
2268                 }
2269         }
2270
2271         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2272                 chip = &cfi->chips[chip_num];
2273                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2274
2275                 /* first OTP region */
2276                 field = 0;
2277                 reg_prot_offset = extp->ProtRegAddr;
2278                 reg_fact_groups = 1;
2279                 reg_fact_size = 1 << extp->FactProtRegSize;
2280                 reg_user_groups = 1;
2281                 reg_user_size = 1 << extp->UserProtRegSize;
2282
2283                 while (len > 0) {
2284                         /* flash geometry fixup */
2285                         data_offset = reg_prot_offset + 1;
2286                         data_offset *= cfi->interleave * cfi->device_type;
2287                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2288                         reg_fact_size *= cfi->interleave;
2289                         reg_user_size *= cfi->interleave;
2290
2291                         if (user_regs) {
2292                                 groups = reg_user_groups;
2293                                 groupsize = reg_user_size;
2294                                 /* skip over factory reg area */
2295                                 groupno = reg_fact_groups;
2296                                 data_offset += reg_fact_groups * reg_fact_size;
2297                         } else {
2298                                 groups = reg_fact_groups;
2299                                 groupsize = reg_fact_size;
2300                                 groupno = 0;
2301                         }
2302
2303                         while (len > 0 && groups > 0) {
2304                                 if (!action) {
2305                                         /*
2306                                          * Special case: if action is NULL
2307                                          * we fill buf with otp_info records.
2308                                          */
2309                                         struct otp_info *otpinfo;
2310                                         map_word lockword;
2311                                         len -= sizeof(struct otp_info);
2312                                         if (len <= 0)
2313                                                 return -ENOSPC;
2314                                         ret = do_otp_read(map, chip,
2315                                                           reg_prot_offset,
2316                                                           (u_char *)&lockword,
2317                                                           map_bankwidth(map),
2318                                                           0, 0,  0);
2319                                         if (ret)
2320                                                 return ret;
2321                                         otpinfo = (struct otp_info *)buf;
2322                                         otpinfo->start = from;
2323                                         otpinfo->length = groupsize;
2324                                         otpinfo->locked =
2325                                            !map_word_bitsset(map, lockword,
2326                                                              CMD(1 << groupno));
2327                                         from += groupsize;
2328                                         buf += sizeof(*otpinfo);
2329                                         *retlen += sizeof(*otpinfo);
2330                                 } else if (from >= groupsize) {
2331                                         from -= groupsize;
2332                                         data_offset += groupsize;
2333                                 } else {
2334                                         int size = groupsize;
2335                                         data_offset += from;
2336                                         size -= from;
2337                                         from = 0;
2338                                         if (size > len)
2339                                                 size = len;
2340                                         ret = action(map, chip, data_offset,
2341                                                      buf, size, reg_prot_offset,
2342                                                      groupno, groupsize);
2343                                         if (ret < 0)
2344                                                 return ret;
2345                                         buf += size;
2346                                         len -= size;
2347                                         *retlen += size;
2348                                         data_offset += size;
2349                                 }
2350                                 groupno++;
2351                                 groups--;
2352                         }
2353
2354                         /* next OTP region */
2355                         if (++field == extp->NumProtectionFields)
2356                                 break;
2357                         reg_prot_offset = otp->ProtRegAddr;
2358                         reg_fact_groups = otp->FactGroups;
2359                         reg_fact_size = 1 << otp->FactProtRegSize;
2360                         reg_user_groups = otp->UserGroups;
2361                         reg_user_size = 1 << otp->UserProtRegSize;
2362                         otp++;
2363                 }
2364         }
2365
2366         return 0;
2367 }
2368
2369 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2370                                            size_t len, size_t *retlen,
2371                                             u_char *buf)
2372 {
2373         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2374                                      buf, do_otp_read, 0);
2375 }
2376
2377 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2378                                            size_t len, size_t *retlen,
2379                                             u_char *buf)
2380 {
2381         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2382                                      buf, do_otp_read, 1);
2383 }
2384
2385 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2386                                             size_t len, size_t *retlen,
2387                                              u_char *buf)
2388 {
2389         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2390                                      buf, do_otp_write, 1);
2391 }
2392
2393 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2394                                            loff_t from, size_t len)
2395 {
2396         size_t retlen;
2397         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2398                                      NULL, do_otp_lock, 1);
2399 }
2400
2401 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2402                                            struct otp_info *buf, size_t len)
2403 {
2404         size_t retlen;
2405         int ret;
2406
2407         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2408         return ret ? : retlen;
2409 }
2410
2411 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2412                                            struct otp_info *buf, size_t len)
2413 {
2414         size_t retlen;
2415         int ret;
2416
2417         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2418         return ret ? : retlen;
2419 }
2420
2421 #endif
2422
2423 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2424 {
2425         struct mtd_erase_region_info *region;
2426         int block, status, i;
2427         unsigned long adr;
2428         size_t len;
2429
2430         for (i = 0; i < mtd->numeraseregions; i++) {
2431                 region = &mtd->eraseregions[i];
2432                 if (!region->lockmap)
2433                         continue;
2434
2435                 for (block = 0; block < region->numblocks; block++){
2436                         len = region->erasesize;
2437                         adr = region->offset + block * len;
2438
2439                         status = cfi_varsize_frob(mtd,
2440                                         do_getlockstatus_oneblock, adr, len, NULL);
2441                         if (status)
2442                                 set_bit(block, region->lockmap);
2443                         else
2444                                 clear_bit(block, region->lockmap);
2445                 }
2446         }
2447 }
2448
2449 static int cfi_intelext_suspend(struct mtd_info *mtd)
2450 {
2451         struct map_info *map = mtd->priv;
2452         struct cfi_private *cfi = map->fldrv_priv;
2453         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2454         int i;
2455         struct flchip *chip;
2456         int ret = 0;
2457
2458         if ((mtd->flags & MTD_POWERUP_LOCK)
2459             && extp && (extp->FeatureSupport & (1 << 5)))
2460                 cfi_intelext_save_locks(mtd);
2461
2462         for (i=0; !ret && i<cfi->numchips; i++) {
2463                 chip = &cfi->chips[i];
2464
2465                 mutex_lock(&chip->mutex);
2466
2467                 switch (chip->state) {
2468                 case FL_READY:
2469                 case FL_STATUS:
2470                 case FL_CFI_QUERY:
2471                 case FL_JEDEC_QUERY:
2472                         if (chip->oldstate == FL_READY) {
2473                                 /* place the chip in a known state before suspend */
2474                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2475                                 chip->oldstate = chip->state;
2476                                 chip->state = FL_PM_SUSPENDED;
2477                                 /* No need to wake_up() on this state change -
2478                                  * as the whole point is that nobody can do anything
2479                                  * with the chip now anyway.
2480                                  */
2481                         } else {
2482                                 /* There seems to be an operation pending. We must wait for it. */
2483                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2484                                 ret = -EAGAIN;
2485                         }
2486                         break;
2487                 default:
2488                         /* Should we actually wait? Once upon a time these routines weren't
2489                            allowed to. Or should we return -EAGAIN, because the upper layers
2490                            ought to have already shut down anything which was using the device
2491                            anyway? The latter for now. */
2492                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2493                         ret = -EAGAIN;
2494                 case FL_PM_SUSPENDED:
2495                         break;
2496                 }
2497                 mutex_unlock(&chip->mutex);
2498         }
2499
2500         /* Unlock the chips again */
2501
2502         if (ret) {
2503                 for (i--; i >=0; i--) {
2504                         chip = &cfi->chips[i];
2505
2506                         mutex_lock(&chip->mutex);
2507
2508                         if (chip->state == FL_PM_SUSPENDED) {
2509                                 /* No need to force it into a known state here,
2510                                    because we're returning failure, and it didn't
2511                                    get power cycled */
2512                                 chip->state = chip->oldstate;
2513                                 chip->oldstate = FL_READY;
2514                                 wake_up(&chip->wq);
2515                         }
2516                         mutex_unlock(&chip->mutex);
2517                 }
2518         }
2519
2520         return ret;
2521 }
2522
2523 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2524 {
2525         struct mtd_erase_region_info *region;
2526         int block, i;
2527         unsigned long adr;
2528         size_t len;
2529
2530         for (i = 0; i < mtd->numeraseregions; i++) {
2531                 region = &mtd->eraseregions[i];
2532                 if (!region->lockmap)
2533                         continue;
2534
2535                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2536                         len = region->erasesize;
2537                         adr = region->offset + block * len;
2538                         cfi_intelext_unlock(mtd, adr, len);
2539                 }
2540         }
2541 }
2542
2543 static void cfi_intelext_resume(struct mtd_info *mtd)
2544 {
2545         struct map_info *map = mtd->priv;
2546         struct cfi_private *cfi = map->fldrv_priv;
2547         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2548         int i;
2549         struct flchip *chip;
2550
2551         for (i=0; i<cfi->numchips; i++) {
2552
2553                 chip = &cfi->chips[i];
2554
2555                 mutex_lock(&chip->mutex);
2556
2557                 /* Go to known state. Chip may have been power cycled */
2558                 if (chip->state == FL_PM_SUSPENDED) {
2559                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2560                         chip->oldstate = chip->state = FL_READY;
2561                         wake_up(&chip->wq);
2562                 }
2563
2564                 mutex_unlock(&chip->mutex);
2565         }
2566
2567         if ((mtd->flags & MTD_POWERUP_LOCK)
2568             && extp && (extp->FeatureSupport & (1 << 5)))
2569                 cfi_intelext_restore_locks(mtd);
2570 }
2571
2572 static int cfi_intelext_reset(struct mtd_info *mtd)
2573 {
2574         struct map_info *map = mtd->priv;
2575         struct cfi_private *cfi = map->fldrv_priv;
2576         int i, ret;
2577
2578         for (i=0; i < cfi->numchips; i++) {
2579                 struct flchip *chip = &cfi->chips[i];
2580
2581                 /* force the completion of any ongoing operation
2582                    and switch to array mode so any bootloader in
2583                    flash is accessible for soft reboot. */
2584                 mutex_lock(&chip->mutex);
2585                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2586                 if (!ret) {
2587                         map_write(map, CMD(0xff), chip->start);
2588                         chip->state = FL_SHUTDOWN;
2589                         put_chip(map, chip, chip->start);
2590                 }
2591                 mutex_unlock(&chip->mutex);
2592         }
2593
2594         return 0;
2595 }
2596
2597 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2598                                void *v)
2599 {
2600         struct mtd_info *mtd;
2601
2602         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2603         cfi_intelext_reset(mtd);
2604         return NOTIFY_DONE;
2605 }
2606
2607 static void cfi_intelext_destroy(struct mtd_info *mtd)
2608 {
2609         struct map_info *map = mtd->priv;
2610         struct cfi_private *cfi = map->fldrv_priv;
2611         struct mtd_erase_region_info *region;
2612         int i;
2613         cfi_intelext_reset(mtd);
2614         unregister_reboot_notifier(&mtd->reboot_notifier);
2615         kfree(cfi->cmdset_priv);
2616         kfree(cfi->cfiq);
2617         kfree(cfi->chips[0].priv);
2618         kfree(cfi);
2619         for (i = 0; i < mtd->numeraseregions; i++) {
2620                 region = &mtd->eraseregions[i];
2621                 if (region->lockmap)
2622                         kfree(region->lockmap);
2623         }
2624         kfree(mtd->eraseregions);
2625 }
2626
2627 MODULE_LICENSE("GPL");
2628 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2629 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2630 MODULE_ALIAS("cfi_cmdset_0003");
2631 MODULE_ALIAS("cfi_cmdset_0200");