upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / mtd / chips / cfi_cmdset_0020.c
1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
8  *      - completely revamped method functions so they are aware and
9  *        independent of the flash geometry (buswidth, interleave, etc.)
10  *      - scalability vs code size is completely set at compile-time
11  *        (see include/linux/mtd/cfi.h for selection)
12  *      - optimized write buffer method
13  * 06/21/2002   Joern Engel <joern@wh.fh-wedel.de> and others
14  *      - modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *        (command set 0x0020)
16  *      - added a writev function
17  * 07/13/2005   Joern Engel <joern@wh.fh-wedel.de>
18  *      - Plugged memory leak in cfi_staa_writev().
19  */
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
36
37
38 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41                 unsigned long count, loff_t to, size_t *retlen);
42 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43 static void cfi_staa_sync (struct mtd_info *);
44 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46 static int cfi_staa_suspend (struct mtd_info *);
47 static void cfi_staa_resume (struct mtd_info *);
48
49 static void cfi_staa_destroy(struct mtd_info *);
50
51 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52
53 static struct mtd_info *cfi_staa_setup (struct map_info *);
54
55 static struct mtd_chip_driver cfi_staa_chipdrv = {
56         .probe          = NULL, /* Not usable directly */
57         .destroy        = cfi_staa_destroy,
58         .name           = "cfi_cmdset_0020",
59         .module         = THIS_MODULE
60 };
61
62 /* #define DEBUG_LOCK_BITS */
63 //#define DEBUG_CFI_FEATURES
64
65 #ifdef DEBUG_CFI_FEATURES
66 static void cfi_tell_features(struct cfi_pri_intelext *extp)
67 {
68         int i;
69         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70         printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71         printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72         printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73         printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74         printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75         printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76         printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77         printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78         printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79         for (i=9; i<32; i++) {
80                 if (extp->FeatureSupport & (1<<i))
81                         printk("     - Unknown Bit %X:      supported\n", i);
82         }
83
84         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86         for (i=1; i<8; i++) {
87                 if (extp->SuspendCmdSupport & (1<<i))
88                         printk("     - Unknown Bit %X:               supported\n", i);
89         }
90
91         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94         for (i=2; i<16; i++) {
95                 if (extp->BlkStatusRegMask & (1<<i))
96                         printk("     - Unknown Bit %X Active: yes\n",i);
97         }
98
99         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100                extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101         if (extp->VppOptimal)
102                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103                        extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104 }
105 #endif
106
107 /* This routine is made available to other mtd code via
108  * inter_module_register.  It must only be accessed through
109  * inter_module_get which will bump the use count of this module.  The
110  * addresses passed back in cfi are valid as long as the use count of
111  * this module is non-zero, i.e. between inter_module_get and
112  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
113  */
114 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115 {
116         struct cfi_private *cfi = map->fldrv_priv;
117         int i;
118
119         if (cfi->cfi_mode) {
120                 /*
121                  * It's a real CFI chip, not one for which the probe
122                  * routine faked a CFI structure. So we read the feature
123                  * table from it.
124                  */
125                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126                 struct cfi_pri_intelext *extp;
127
128                 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129                 if (!extp)
130                         return NULL;
131
132                 if (extp->MajorVersion != '1' ||
133                     (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134                         printk(KERN_ERR "  Unknown ST Microelectronics"
135                                " Extended Query version %c.%c.\n",
136                                extp->MajorVersion, extp->MinorVersion);
137                         kfree(extp);
138                         return NULL;
139                 }
140
141                 /* Do some byteswapping if necessary */
142                 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
143                 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
144
145 #ifdef DEBUG_CFI_FEATURES
146                 /* Tell the user about it in lots of lovely detail */
147                 cfi_tell_features(extp);
148 #endif
149
150                 /* Install our own private info structure */
151                 cfi->cmdset_priv = extp;
152         }
153
154         for (i=0; i< cfi->numchips; i++) {
155                 cfi->chips[i].word_write_time = 128;
156                 cfi->chips[i].buffer_write_time = 128;
157                 cfi->chips[i].erase_time = 1024;
158                 cfi->chips[i].ref_point_counter = 0;
159                 init_waitqueue_head(&(cfi->chips[i].wq));
160         }
161
162         return cfi_staa_setup(map);
163 }
164 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165
166 static struct mtd_info *cfi_staa_setup(struct map_info *map)
167 {
168         struct cfi_private *cfi = map->fldrv_priv;
169         struct mtd_info *mtd;
170         unsigned long offset = 0;
171         int i,j;
172         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173
174         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
175         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176
177         if (!mtd) {
178                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
179                 kfree(cfi->cmdset_priv);
180                 return NULL;
181         }
182
183         mtd->priv = map;
184         mtd->type = MTD_NORFLASH;
185         mtd->size = devsize * cfi->numchips;
186
187         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
188         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
189                         * mtd->numeraseregions, GFP_KERNEL);
190         if (!mtd->eraseregions) {
191                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
192                 kfree(cfi->cmdset_priv);
193                 kfree(mtd);
194                 return NULL;
195         }
196
197         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
198                 unsigned long ernum, ersize;
199                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
200                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
201
202                 if (mtd->erasesize < ersize) {
203                         mtd->erasesize = ersize;
204                 }
205                 for (j=0; j<cfi->numchips; j++) {
206                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
207                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
208                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
209                 }
210                 offset += (ersize * ernum);
211                 }
212
213                 if (offset != devsize) {
214                         /* Argh */
215                         printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
216                         kfree(mtd->eraseregions);
217                         kfree(cfi->cmdset_priv);
218                         kfree(mtd);
219                         return NULL;
220                 }
221
222                 for (i=0; i<mtd->numeraseregions;i++){
223                         printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
224                                i, (unsigned long long)mtd->eraseregions[i].offset,
225                                mtd->eraseregions[i].erasesize,
226                                mtd->eraseregions[i].numblocks);
227                 }
228
229         /* Also select the correct geometry setup too */
230         mtd->erase = cfi_staa_erase_varsize;
231         mtd->read = cfi_staa_read;
232         mtd->write = cfi_staa_write_buffers;
233         mtd->writev = cfi_staa_writev;
234         mtd->sync = cfi_staa_sync;
235         mtd->lock = cfi_staa_lock;
236         mtd->unlock = cfi_staa_unlock;
237         mtd->suspend = cfi_staa_suspend;
238         mtd->resume = cfi_staa_resume;
239         mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
240         mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
241         map->fldrv = &cfi_staa_chipdrv;
242         __module_get(THIS_MODULE);
243         mtd->name = map->name;
244         return mtd;
245 }
246
247
248 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
249 {
250         map_word status, status_OK;
251         unsigned long timeo;
252         DECLARE_WAITQUEUE(wait, current);
253         int suspended = 0;
254         unsigned long cmd_addr;
255         struct cfi_private *cfi = map->fldrv_priv;
256
257         adr += chip->start;
258
259         /* Ensure cmd read/writes are aligned. */
260         cmd_addr = adr & ~(map_bankwidth(map)-1);
261
262         /* Let's determine this according to the interleave only once */
263         status_OK = CMD(0x80);
264
265         timeo = jiffies + HZ;
266  retry:
267         mutex_lock(&chip->mutex);
268
269         /* Check that the chip's ready to talk to us.
270          * If it's in FL_ERASING state, suspend it and make it talk now.
271          */
272         switch (chip->state) {
273         case FL_ERASING:
274                 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
275                         goto sleep; /* We don't support erase suspend */
276
277                 map_write (map, CMD(0xb0), cmd_addr);
278                 /* If the flash has finished erasing, then 'erase suspend'
279                  * appears to make some (28F320) flash devices switch to
280                  * 'read' mode.  Make sure that we switch to 'read status'
281                  * mode so we get the right data. --rmk
282                  */
283                 map_write(map, CMD(0x70), cmd_addr);
284                 chip->oldstate = FL_ERASING;
285                 chip->state = FL_ERASE_SUSPENDING;
286                 //              printk("Erase suspending at 0x%lx\n", cmd_addr);
287                 for (;;) {
288                         status = map_read(map, cmd_addr);
289                         if (map_word_andequal(map, status, status_OK, status_OK))
290                                 break;
291
292                         if (time_after(jiffies, timeo)) {
293                                 /* Urgh */
294                                 map_write(map, CMD(0xd0), cmd_addr);
295                                 /* make sure we're in 'read status' mode */
296                                 map_write(map, CMD(0x70), cmd_addr);
297                                 chip->state = FL_ERASING;
298                                 mutex_unlock(&chip->mutex);
299                                 printk(KERN_ERR "Chip not ready after erase "
300                                        "suspended: status = 0x%lx\n", status.x[0]);
301                                 return -EIO;
302                         }
303
304                         mutex_unlock(&chip->mutex);
305                         cfi_udelay(1);
306                         mutex_lock(&chip->mutex);
307                 }
308
309                 suspended = 1;
310                 map_write(map, CMD(0xff), cmd_addr);
311                 chip->state = FL_READY;
312                 break;
313
314 #if 0
315         case FL_WRITING:
316                 /* Not quite yet */
317 #endif
318
319         case FL_READY:
320                 break;
321
322         case FL_CFI_QUERY:
323         case FL_JEDEC_QUERY:
324                 map_write(map, CMD(0x70), cmd_addr);
325                 chip->state = FL_STATUS;
326
327         case FL_STATUS:
328                 status = map_read(map, cmd_addr);
329                 if (map_word_andequal(map, status, status_OK, status_OK)) {
330                         map_write(map, CMD(0xff), cmd_addr);
331                         chip->state = FL_READY;
332                         break;
333                 }
334
335                 /* Urgh. Chip not yet ready to talk to us. */
336                 if (time_after(jiffies, timeo)) {
337                         mutex_unlock(&chip->mutex);
338                         printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
339                         return -EIO;
340                 }
341
342                 /* Latency issues. Drop the lock, wait a while and retry */
343                 mutex_unlock(&chip->mutex);
344                 cfi_udelay(1);
345                 goto retry;
346
347         default:
348         sleep:
349                 /* Stick ourselves on a wait queue to be woken when
350                    someone changes the status */
351                 set_current_state(TASK_UNINTERRUPTIBLE);
352                 add_wait_queue(&chip->wq, &wait);
353                 mutex_unlock(&chip->mutex);
354                 schedule();
355                 remove_wait_queue(&chip->wq, &wait);
356                 timeo = jiffies + HZ;
357                 goto retry;
358         }
359
360         map_copy_from(map, buf, adr, len);
361
362         if (suspended) {
363                 chip->state = chip->oldstate;
364                 /* What if one interleaved chip has finished and the
365                    other hasn't? The old code would leave the finished
366                    one in READY mode. That's bad, and caused -EROFS
367                    errors to be returned from do_erase_oneblock because
368                    that's the only bit it checked for at the time.
369                    As the state machine appears to explicitly allow
370                    sending the 0x70 (Read Status) command to an erasing
371                    chip and expecting it to be ignored, that's what we
372                    do. */
373                 map_write(map, CMD(0xd0), cmd_addr);
374                 map_write(map, CMD(0x70), cmd_addr);
375         }
376
377         wake_up(&chip->wq);
378         mutex_unlock(&chip->mutex);
379         return 0;
380 }
381
382 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
383 {
384         struct map_info *map = mtd->priv;
385         struct cfi_private *cfi = map->fldrv_priv;
386         unsigned long ofs;
387         int chipnum;
388         int ret = 0;
389
390         /* ofs: offset within the first chip that the first read should start */
391         chipnum = (from >> cfi->chipshift);
392         ofs = from - (chipnum <<  cfi->chipshift);
393
394         *retlen = 0;
395
396         while (len) {
397                 unsigned long thislen;
398
399                 if (chipnum >= cfi->numchips)
400                         break;
401
402                 if ((len + ofs -1) >> cfi->chipshift)
403                         thislen = (1<<cfi->chipshift) - ofs;
404                 else
405                         thislen = len;
406
407                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
408                 if (ret)
409                         break;
410
411                 *retlen += thislen;
412                 len -= thislen;
413                 buf += thislen;
414
415                 ofs = 0;
416                 chipnum++;
417         }
418         return ret;
419 }
420
421 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
422                                   unsigned long adr, const u_char *buf, int len)
423 {
424         struct cfi_private *cfi = map->fldrv_priv;
425         map_word status, status_OK;
426         unsigned long cmd_adr, timeo;
427         DECLARE_WAITQUEUE(wait, current);
428         int wbufsize, z;
429
430         /* M58LW064A requires bus alignment for buffer wriets -- saw */
431         if (adr & (map_bankwidth(map)-1))
432             return -EINVAL;
433
434         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
435         adr += chip->start;
436         cmd_adr = adr & ~(wbufsize-1);
437
438         /* Let's determine this according to the interleave only once */
439         status_OK = CMD(0x80);
440
441         timeo = jiffies + HZ;
442  retry:
443
444 #ifdef DEBUG_CFI_FEATURES
445        printk("%s: chip->state[%d]\n", __func__, chip->state);
446 #endif
447         mutex_lock(&chip->mutex);
448
449         /* Check that the chip's ready to talk to us.
450          * Later, we can actually think about interrupting it
451          * if it's in FL_ERASING state.
452          * Not just yet, though.
453          */
454         switch (chip->state) {
455         case FL_READY:
456                 break;
457
458         case FL_CFI_QUERY:
459         case FL_JEDEC_QUERY:
460                 map_write(map, CMD(0x70), cmd_adr);
461                 chip->state = FL_STATUS;
462 #ifdef DEBUG_CFI_FEATURES
463         printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
464 #endif
465
466         case FL_STATUS:
467                 status = map_read(map, cmd_adr);
468                 if (map_word_andequal(map, status, status_OK, status_OK))
469                         break;
470                 /* Urgh. Chip not yet ready to talk to us. */
471                 if (time_after(jiffies, timeo)) {
472                         mutex_unlock(&chip->mutex);
473                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
474                                status.x[0], map_read(map, cmd_adr).x[0]);
475                         return -EIO;
476                 }
477
478                 /* Latency issues. Drop the lock, wait a while and retry */
479                 mutex_unlock(&chip->mutex);
480                 cfi_udelay(1);
481                 goto retry;
482
483         default:
484                 /* Stick ourselves on a wait queue to be woken when
485                    someone changes the status */
486                 set_current_state(TASK_UNINTERRUPTIBLE);
487                 add_wait_queue(&chip->wq, &wait);
488                 mutex_unlock(&chip->mutex);
489                 schedule();
490                 remove_wait_queue(&chip->wq, &wait);
491                 timeo = jiffies + HZ;
492                 goto retry;
493         }
494
495         ENABLE_VPP(map);
496         map_write(map, CMD(0xe8), cmd_adr);
497         chip->state = FL_WRITING_TO_BUFFER;
498
499         z = 0;
500         for (;;) {
501                 status = map_read(map, cmd_adr);
502                 if (map_word_andequal(map, status, status_OK, status_OK))
503                         break;
504
505                 mutex_unlock(&chip->mutex);
506                 cfi_udelay(1);
507                 mutex_lock(&chip->mutex);
508
509                 if (++z > 100) {
510                         /* Argh. Not ready for write to buffer */
511                         DISABLE_VPP(map);
512                         map_write(map, CMD(0x70), cmd_adr);
513                         chip->state = FL_STATUS;
514                         mutex_unlock(&chip->mutex);
515                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
516                         return -EIO;
517                 }
518         }
519
520         /* Write length of data to come */
521         map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
522
523         /* Write data */
524         for (z = 0; z < len;
525              z += map_bankwidth(map), buf += map_bankwidth(map)) {
526                 map_word d;
527                 d = map_word_load(map, buf);
528                 map_write(map, d, adr+z);
529         }
530         /* GO GO GO */
531         map_write(map, CMD(0xd0), cmd_adr);
532         chip->state = FL_WRITING;
533
534         mutex_unlock(&chip->mutex);
535         cfi_udelay(chip->buffer_write_time);
536         mutex_lock(&chip->mutex);
537
538         timeo = jiffies + (HZ/2);
539         z = 0;
540         for (;;) {
541                 if (chip->state != FL_WRITING) {
542                         /* Someone's suspended the write. Sleep */
543                         set_current_state(TASK_UNINTERRUPTIBLE);
544                         add_wait_queue(&chip->wq, &wait);
545                         mutex_unlock(&chip->mutex);
546                         schedule();
547                         remove_wait_queue(&chip->wq, &wait);
548                         timeo = jiffies + (HZ / 2); /* FIXME */
549                         mutex_lock(&chip->mutex);
550                         continue;
551                 }
552
553                 status = map_read(map, cmd_adr);
554                 if (map_word_andequal(map, status, status_OK, status_OK))
555                         break;
556
557                 /* OK Still waiting */
558                 if (time_after(jiffies, timeo)) {
559                         /* clear status */
560                         map_write(map, CMD(0x50), cmd_adr);
561                         /* put back into read status register mode */
562                         map_write(map, CMD(0x70), adr);
563                         chip->state = FL_STATUS;
564                         DISABLE_VPP(map);
565                         mutex_unlock(&chip->mutex);
566                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
567                         return -EIO;
568                 }
569
570                 /* Latency issues. Drop the lock, wait a while and retry */
571                 mutex_unlock(&chip->mutex);
572                 cfi_udelay(1);
573                 z++;
574                 mutex_lock(&chip->mutex);
575         }
576         if (!z) {
577                 chip->buffer_write_time--;
578                 if (!chip->buffer_write_time)
579                         chip->buffer_write_time++;
580         }
581         if (z > 1)
582                 chip->buffer_write_time++;
583
584         /* Done and happy. */
585         DISABLE_VPP(map);
586         chip->state = FL_STATUS;
587
588         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
589         if (map_word_bitsset(map, status, CMD(0x3a))) {
590 #ifdef DEBUG_CFI_FEATURES
591                 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
592 #endif
593                 /* clear status */
594                 map_write(map, CMD(0x50), cmd_adr);
595                 /* put back into read status register mode */
596                 map_write(map, CMD(0x70), adr);
597                 wake_up(&chip->wq);
598                 mutex_unlock(&chip->mutex);
599                 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
600         }
601         wake_up(&chip->wq);
602         mutex_unlock(&chip->mutex);
603
604         return 0;
605 }
606
607 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
608                                        size_t len, size_t *retlen, const u_char *buf)
609 {
610         struct map_info *map = mtd->priv;
611         struct cfi_private *cfi = map->fldrv_priv;
612         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
613         int ret = 0;
614         int chipnum;
615         unsigned long ofs;
616
617         *retlen = 0;
618         if (!len)
619                 return 0;
620
621         chipnum = to >> cfi->chipshift;
622         ofs = to  - (chipnum << cfi->chipshift);
623
624 #ifdef DEBUG_CFI_FEATURES
625         printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
626         printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
627         printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
628 #endif
629
630         /* Write buffer is worth it only if more than one word to write... */
631         while (len > 0) {
632                 /* We must not cross write block boundaries */
633                 int size = wbufsize - (ofs & (wbufsize-1));
634
635                 if (size > len)
636                     size = len;
637
638                 ret = do_write_buffer(map, &cfi->chips[chipnum],
639                                       ofs, buf, size);
640                 if (ret)
641                         return ret;
642
643                 ofs += size;
644                 buf += size;
645                 (*retlen) += size;
646                 len -= size;
647
648                 if (ofs >> cfi->chipshift) {
649                         chipnum ++;
650                         ofs = 0;
651                         if (chipnum == cfi->numchips)
652                                 return 0;
653                 }
654         }
655
656         return 0;
657 }
658
659 /*
660  * Writev for ECC-Flashes is a little more complicated. We need to maintain
661  * a small buffer for this.
662  * XXX: If the buffer size is not a multiple of 2, this will break
663  */
664 #define ECCBUF_SIZE (mtd->writesize)
665 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
666 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
667 static int
668 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
669                 unsigned long count, loff_t to, size_t *retlen)
670 {
671         unsigned long i;
672         size_t   totlen = 0, thislen;
673         int      ret = 0;
674         size_t   buflen = 0;
675         static char *buffer;
676
677         if (!ECCBUF_SIZE) {
678                 /* We should fall back to a general writev implementation.
679                  * Until that is written, just break.
680                  */
681                 return -EIO;
682         }
683         buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
684         if (!buffer)
685                 return -ENOMEM;
686
687         for (i=0; i<count; i++) {
688                 size_t elem_len = vecs[i].iov_len;
689                 void *elem_base = vecs[i].iov_base;
690                 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
691                         continue;
692                 if (buflen) { /* cut off head */
693                         if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
694                                 memcpy(buffer+buflen, elem_base, elem_len);
695                                 buflen += elem_len;
696                                 continue;
697                         }
698                         memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
699                         ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
700                         totlen += thislen;
701                         if (ret || thislen != ECCBUF_SIZE)
702                                 goto write_error;
703                         elem_len -= thislen-buflen;
704                         elem_base += thislen-buflen;
705                         to += ECCBUF_SIZE;
706                 }
707                 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
708                         ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
709                         totlen += thislen;
710                         if (ret || thislen != ECCBUF_DIV(elem_len))
711                                 goto write_error;
712                         to += thislen;
713                 }
714                 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
715                 if (buflen) {
716                         memset(buffer, 0xff, ECCBUF_SIZE);
717                         memcpy(buffer, elem_base + thislen, buflen);
718                 }
719         }
720         if (buflen) { /* flush last page, even if not full */
721                 /* This is sometimes intended behaviour, really */
722                 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
723                 totlen += thislen;
724                 if (ret || thislen != ECCBUF_SIZE)
725                         goto write_error;
726         }
727 write_error:
728         if (retlen)
729                 *retlen = totlen;
730         kfree(buffer);
731         return ret;
732 }
733
734
735 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
736 {
737         struct cfi_private *cfi = map->fldrv_priv;
738         map_word status, status_OK;
739         unsigned long timeo;
740         int retries = 3;
741         DECLARE_WAITQUEUE(wait, current);
742         int ret = 0;
743
744         adr += chip->start;
745
746         /* Let's determine this according to the interleave only once */
747         status_OK = CMD(0x80);
748
749         timeo = jiffies + HZ;
750 retry:
751         mutex_lock(&chip->mutex);
752
753         /* Check that the chip's ready to talk to us. */
754         switch (chip->state) {
755         case FL_CFI_QUERY:
756         case FL_JEDEC_QUERY:
757         case FL_READY:
758                 map_write(map, CMD(0x70), adr);
759                 chip->state = FL_STATUS;
760
761         case FL_STATUS:
762                 status = map_read(map, adr);
763                 if (map_word_andequal(map, status, status_OK, status_OK))
764                         break;
765
766                 /* Urgh. Chip not yet ready to talk to us. */
767                 if (time_after(jiffies, timeo)) {
768                         mutex_unlock(&chip->mutex);
769                         printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
770                         return -EIO;
771                 }
772
773                 /* Latency issues. Drop the lock, wait a while and retry */
774                 mutex_unlock(&chip->mutex);
775                 cfi_udelay(1);
776                 goto retry;
777
778         default:
779                 /* Stick ourselves on a wait queue to be woken when
780                    someone changes the status */
781                 set_current_state(TASK_UNINTERRUPTIBLE);
782                 add_wait_queue(&chip->wq, &wait);
783                 mutex_unlock(&chip->mutex);
784                 schedule();
785                 remove_wait_queue(&chip->wq, &wait);
786                 timeo = jiffies + HZ;
787                 goto retry;
788         }
789
790         ENABLE_VPP(map);
791         /* Clear the status register first */
792         map_write(map, CMD(0x50), adr);
793
794         /* Now erase */
795         map_write(map, CMD(0x20), adr);
796         map_write(map, CMD(0xD0), adr);
797         chip->state = FL_ERASING;
798
799         mutex_unlock(&chip->mutex);
800         msleep(1000);
801         mutex_lock(&chip->mutex);
802
803         /* FIXME. Use a timer to check this, and return immediately. */
804         /* Once the state machine's known to be working I'll do that */
805
806         timeo = jiffies + (HZ*20);
807         for (;;) {
808                 if (chip->state != FL_ERASING) {
809                         /* Someone's suspended the erase. Sleep */
810                         set_current_state(TASK_UNINTERRUPTIBLE);
811                         add_wait_queue(&chip->wq, &wait);
812                         mutex_unlock(&chip->mutex);
813                         schedule();
814                         remove_wait_queue(&chip->wq, &wait);
815                         timeo = jiffies + (HZ*20); /* FIXME */
816                         mutex_lock(&chip->mutex);
817                         continue;
818                 }
819
820                 status = map_read(map, adr);
821                 if (map_word_andequal(map, status, status_OK, status_OK))
822                         break;
823
824                 /* OK Still waiting */
825                 if (time_after(jiffies, timeo)) {
826                         map_write(map, CMD(0x70), adr);
827                         chip->state = FL_STATUS;
828                         printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
829                         DISABLE_VPP(map);
830                         mutex_unlock(&chip->mutex);
831                         return -EIO;
832                 }
833
834                 /* Latency issues. Drop the lock, wait a while and retry */
835                 mutex_unlock(&chip->mutex);
836                 cfi_udelay(1);
837                 mutex_lock(&chip->mutex);
838         }
839
840         DISABLE_VPP(map);
841         ret = 0;
842
843         /* We've broken this before. It doesn't hurt to be safe */
844         map_write(map, CMD(0x70), adr);
845         chip->state = FL_STATUS;
846         status = map_read(map, adr);
847
848         /* check for lock bit */
849         if (map_word_bitsset(map, status, CMD(0x3a))) {
850                 unsigned char chipstatus = status.x[0];
851                 if (!map_word_equal(map, status, CMD(chipstatus))) {
852                         int i, w;
853                         for (w=0; w<map_words(map); w++) {
854                                 for (i = 0; i<cfi_interleave(cfi); i++) {
855                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
856                                 }
857                         }
858                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
859                                status.x[0], chipstatus);
860                 }
861                 /* Reset the error bits */
862                 map_write(map, CMD(0x50), adr);
863                 map_write(map, CMD(0x70), adr);
864
865                 if ((chipstatus & 0x30) == 0x30) {
866                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
867                         ret = -EIO;
868                 } else if (chipstatus & 0x02) {
869                         /* Protection bit set */
870                         ret = -EROFS;
871                 } else if (chipstatus & 0x8) {
872                         /* Voltage */
873                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
874                         ret = -EIO;
875                 } else if (chipstatus & 0x20) {
876                         if (retries--) {
877                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
878                                 timeo = jiffies + HZ;
879                                 chip->state = FL_STATUS;
880                                 mutex_unlock(&chip->mutex);
881                                 goto retry;
882                         }
883                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
884                         ret = -EIO;
885                 }
886         }
887
888         wake_up(&chip->wq);
889         mutex_unlock(&chip->mutex);
890         return ret;
891 }
892
893 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
894                                   struct erase_info *instr)
895 {       struct map_info *map = mtd->priv;
896         struct cfi_private *cfi = map->fldrv_priv;
897         unsigned long adr, len;
898         int chipnum, ret = 0;
899         int i, first;
900         struct mtd_erase_region_info *regions = mtd->eraseregions;
901
902         if (instr->addr > mtd->size)
903                 return -EINVAL;
904
905         if ((instr->len + instr->addr) > mtd->size)
906                 return -EINVAL;
907
908         /* Check that both start and end of the requested erase are
909          * aligned with the erasesize at the appropriate addresses.
910          */
911
912         i = 0;
913
914         /* Skip all erase regions which are ended before the start of
915            the requested erase. Actually, to save on the calculations,
916            we skip to the first erase region which starts after the
917            start of the requested erase, and then go back one.
918         */
919
920         while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
921                i++;
922         i--;
923
924         /* OK, now i is pointing at the erase region in which this
925            erase request starts. Check the start of the requested
926            erase range is aligned with the erase size which is in
927            effect here.
928         */
929
930         if (instr->addr & (regions[i].erasesize-1))
931                 return -EINVAL;
932
933         /* Remember the erase region we start on */
934         first = i;
935
936         /* Next, check that the end of the requested erase is aligned
937          * with the erase region at that address.
938          */
939
940         while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
941                 i++;
942
943         /* As before, drop back one to point at the region in which
944            the address actually falls
945         */
946         i--;
947
948         if ((instr->addr + instr->len) & (regions[i].erasesize-1))
949                 return -EINVAL;
950
951         chipnum = instr->addr >> cfi->chipshift;
952         adr = instr->addr - (chipnum << cfi->chipshift);
953         len = instr->len;
954
955         i=first;
956
957         while(len) {
958                 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
959
960                 if (ret)
961                         return ret;
962
963                 adr += regions[i].erasesize;
964                 len -= regions[i].erasesize;
965
966                 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
967                         i++;
968
969                 if (adr >> cfi->chipshift) {
970                         adr = 0;
971                         chipnum++;
972
973                         if (chipnum >= cfi->numchips)
974                         break;
975                 }
976         }
977
978         instr->state = MTD_ERASE_DONE;
979         mtd_erase_callback(instr);
980
981         return 0;
982 }
983
984 static void cfi_staa_sync (struct mtd_info *mtd)
985 {
986         struct map_info *map = mtd->priv;
987         struct cfi_private *cfi = map->fldrv_priv;
988         int i;
989         struct flchip *chip;
990         int ret = 0;
991         DECLARE_WAITQUEUE(wait, current);
992
993         for (i=0; !ret && i<cfi->numchips; i++) {
994                 chip = &cfi->chips[i];
995
996         retry:
997                 mutex_lock(&chip->mutex);
998
999                 switch(chip->state) {
1000                 case FL_READY:
1001                 case FL_STATUS:
1002                 case FL_CFI_QUERY:
1003                 case FL_JEDEC_QUERY:
1004                         chip->oldstate = chip->state;
1005                         chip->state = FL_SYNCING;
1006                         /* No need to wake_up() on this state change -
1007                          * as the whole point is that nobody can do anything
1008                          * with the chip now anyway.
1009                          */
1010                 case FL_SYNCING:
1011                         mutex_unlock(&chip->mutex);
1012                         break;
1013
1014                 default:
1015                         /* Not an idle state */
1016                         set_current_state(TASK_UNINTERRUPTIBLE);
1017                         add_wait_queue(&chip->wq, &wait);
1018
1019                         mutex_unlock(&chip->mutex);
1020                         schedule();
1021                         remove_wait_queue(&chip->wq, &wait);
1022
1023                         goto retry;
1024                 }
1025         }
1026
1027         /* Unlock the chips again */
1028
1029         for (i--; i >=0; i--) {
1030                 chip = &cfi->chips[i];
1031
1032                 mutex_lock(&chip->mutex);
1033
1034                 if (chip->state == FL_SYNCING) {
1035                         chip->state = chip->oldstate;
1036                         wake_up(&chip->wq);
1037                 }
1038                 mutex_unlock(&chip->mutex);
1039         }
1040 }
1041
1042 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1043 {
1044         struct cfi_private *cfi = map->fldrv_priv;
1045         map_word status, status_OK;
1046         unsigned long timeo = jiffies + HZ;
1047         DECLARE_WAITQUEUE(wait, current);
1048
1049         adr += chip->start;
1050
1051         /* Let's determine this according to the interleave only once */
1052         status_OK = CMD(0x80);
1053
1054         timeo = jiffies + HZ;
1055 retry:
1056         mutex_lock(&chip->mutex);
1057
1058         /* Check that the chip's ready to talk to us. */
1059         switch (chip->state) {
1060         case FL_CFI_QUERY:
1061         case FL_JEDEC_QUERY:
1062         case FL_READY:
1063                 map_write(map, CMD(0x70), adr);
1064                 chip->state = FL_STATUS;
1065
1066         case FL_STATUS:
1067                 status = map_read(map, adr);
1068                 if (map_word_andequal(map, status, status_OK, status_OK))
1069                         break;
1070
1071                 /* Urgh. Chip not yet ready to talk to us. */
1072                 if (time_after(jiffies, timeo)) {
1073                         mutex_unlock(&chip->mutex);
1074                         printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1075                         return -EIO;
1076                 }
1077
1078                 /* Latency issues. Drop the lock, wait a while and retry */
1079                 mutex_unlock(&chip->mutex);
1080                 cfi_udelay(1);
1081                 goto retry;
1082
1083         default:
1084                 /* Stick ourselves on a wait queue to be woken when
1085                    someone changes the status */
1086                 set_current_state(TASK_UNINTERRUPTIBLE);
1087                 add_wait_queue(&chip->wq, &wait);
1088                 mutex_unlock(&chip->mutex);
1089                 schedule();
1090                 remove_wait_queue(&chip->wq, &wait);
1091                 timeo = jiffies + HZ;
1092                 goto retry;
1093         }
1094
1095         ENABLE_VPP(map);
1096         map_write(map, CMD(0x60), adr);
1097         map_write(map, CMD(0x01), adr);
1098         chip->state = FL_LOCKING;
1099
1100         mutex_unlock(&chip->mutex);
1101         msleep(1000);
1102         mutex_lock(&chip->mutex);
1103
1104         /* FIXME. Use a timer to check this, and return immediately. */
1105         /* Once the state machine's known to be working I'll do that */
1106
1107         timeo = jiffies + (HZ*2);
1108         for (;;) {
1109
1110                 status = map_read(map, adr);
1111                 if (map_word_andequal(map, status, status_OK, status_OK))
1112                         break;
1113
1114                 /* OK Still waiting */
1115                 if (time_after(jiffies, timeo)) {
1116                         map_write(map, CMD(0x70), adr);
1117                         chip->state = FL_STATUS;
1118                         printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1119                         DISABLE_VPP(map);
1120                         mutex_unlock(&chip->mutex);
1121                         return -EIO;
1122                 }
1123
1124                 /* Latency issues. Drop the lock, wait a while and retry */
1125                 mutex_unlock(&chip->mutex);
1126                 cfi_udelay(1);
1127                 mutex_lock(&chip->mutex);
1128         }
1129
1130         /* Done and happy. */
1131         chip->state = FL_STATUS;
1132         DISABLE_VPP(map);
1133         wake_up(&chip->wq);
1134         mutex_unlock(&chip->mutex);
1135         return 0;
1136 }
1137 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1138 {
1139         struct map_info *map = mtd->priv;
1140         struct cfi_private *cfi = map->fldrv_priv;
1141         unsigned long adr;
1142         int chipnum, ret = 0;
1143 #ifdef DEBUG_LOCK_BITS
1144         int ofs_factor = cfi->interleave * cfi->device_type;
1145 #endif
1146
1147         if (ofs & (mtd->erasesize - 1))
1148                 return -EINVAL;
1149
1150         if (len & (mtd->erasesize -1))
1151                 return -EINVAL;
1152
1153         if ((len + ofs) > mtd->size)
1154                 return -EINVAL;
1155
1156         chipnum = ofs >> cfi->chipshift;
1157         adr = ofs - (chipnum << cfi->chipshift);
1158
1159         while(len) {
1160
1161 #ifdef DEBUG_LOCK_BITS
1162                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163                 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1164                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165 #endif
1166
1167                 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1168
1169 #ifdef DEBUG_LOCK_BITS
1170                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1171                 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1172                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1173 #endif
1174
1175                 if (ret)
1176                         return ret;
1177
1178                 adr += mtd->erasesize;
1179                 len -= mtd->erasesize;
1180
1181                 if (adr >> cfi->chipshift) {
1182                         adr = 0;
1183                         chipnum++;
1184
1185                         if (chipnum >= cfi->numchips)
1186                         break;
1187                 }
1188         }
1189         return 0;
1190 }
1191 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1192 {
1193         struct cfi_private *cfi = map->fldrv_priv;
1194         map_word status, status_OK;
1195         unsigned long timeo = jiffies + HZ;
1196         DECLARE_WAITQUEUE(wait, current);
1197
1198         adr += chip->start;
1199
1200         /* Let's determine this according to the interleave only once */
1201         status_OK = CMD(0x80);
1202
1203         timeo = jiffies + HZ;
1204 retry:
1205         mutex_lock(&chip->mutex);
1206
1207         /* Check that the chip's ready to talk to us. */
1208         switch (chip->state) {
1209         case FL_CFI_QUERY:
1210         case FL_JEDEC_QUERY:
1211         case FL_READY:
1212                 map_write(map, CMD(0x70), adr);
1213                 chip->state = FL_STATUS;
1214
1215         case FL_STATUS:
1216                 status = map_read(map, adr);
1217                 if (map_word_andequal(map, status, status_OK, status_OK))
1218                         break;
1219
1220                 /* Urgh. Chip not yet ready to talk to us. */
1221                 if (time_after(jiffies, timeo)) {
1222                         mutex_unlock(&chip->mutex);
1223                         printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1224                         return -EIO;
1225                 }
1226
1227                 /* Latency issues. Drop the lock, wait a while and retry */
1228                 mutex_unlock(&chip->mutex);
1229                 cfi_udelay(1);
1230                 goto retry;
1231
1232         default:
1233                 /* Stick ourselves on a wait queue to be woken when
1234                    someone changes the status */
1235                 set_current_state(TASK_UNINTERRUPTIBLE);
1236                 add_wait_queue(&chip->wq, &wait);
1237                 mutex_unlock(&chip->mutex);
1238                 schedule();
1239                 remove_wait_queue(&chip->wq, &wait);
1240                 timeo = jiffies + HZ;
1241                 goto retry;
1242         }
1243
1244         ENABLE_VPP(map);
1245         map_write(map, CMD(0x60), adr);
1246         map_write(map, CMD(0xD0), adr);
1247         chip->state = FL_UNLOCKING;
1248
1249         mutex_unlock(&chip->mutex);
1250         msleep(1000);
1251         mutex_lock(&chip->mutex);
1252
1253         /* FIXME. Use a timer to check this, and return immediately. */
1254         /* Once the state machine's known to be working I'll do that */
1255
1256         timeo = jiffies + (HZ*2);
1257         for (;;) {
1258
1259                 status = map_read(map, adr);
1260                 if (map_word_andequal(map, status, status_OK, status_OK))
1261                         break;
1262
1263                 /* OK Still waiting */
1264                 if (time_after(jiffies, timeo)) {
1265                         map_write(map, CMD(0x70), adr);
1266                         chip->state = FL_STATUS;
1267                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1268                         DISABLE_VPP(map);
1269                         mutex_unlock(&chip->mutex);
1270                         return -EIO;
1271                 }
1272
1273                 /* Latency issues. Drop the unlock, wait a while and retry */
1274                 mutex_unlock(&chip->mutex);
1275                 cfi_udelay(1);
1276                 mutex_lock(&chip->mutex);
1277         }
1278
1279         /* Done and happy. */
1280         chip->state = FL_STATUS;
1281         DISABLE_VPP(map);
1282         wake_up(&chip->wq);
1283         mutex_unlock(&chip->mutex);
1284         return 0;
1285 }
1286 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1287 {
1288         struct map_info *map = mtd->priv;
1289         struct cfi_private *cfi = map->fldrv_priv;
1290         unsigned long adr;
1291         int chipnum, ret = 0;
1292 #ifdef DEBUG_LOCK_BITS
1293         int ofs_factor = cfi->interleave * cfi->device_type;
1294 #endif
1295
1296         chipnum = ofs >> cfi->chipshift;
1297         adr = ofs - (chipnum << cfi->chipshift);
1298
1299 #ifdef DEBUG_LOCK_BITS
1300         {
1301                 unsigned long temp_adr = adr;
1302                 unsigned long temp_len = len;
1303
1304                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305                 while (temp_len) {
1306                         printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1307                         temp_adr += mtd->erasesize;
1308                         temp_len -= mtd->erasesize;
1309                 }
1310                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1311         }
1312 #endif
1313
1314         ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1315
1316 #ifdef DEBUG_LOCK_BITS
1317         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1318         printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1319         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1320 #endif
1321
1322         return ret;
1323 }
1324
1325 static int cfi_staa_suspend(struct mtd_info *mtd)
1326 {
1327         struct map_info *map = mtd->priv;
1328         struct cfi_private *cfi = map->fldrv_priv;
1329         int i;
1330         struct flchip *chip;
1331         int ret = 0;
1332
1333         for (i=0; !ret && i<cfi->numchips; i++) {
1334                 chip = &cfi->chips[i];
1335
1336                 mutex_lock(&chip->mutex);
1337
1338                 switch(chip->state) {
1339                 case FL_READY:
1340                 case FL_STATUS:
1341                 case FL_CFI_QUERY:
1342                 case FL_JEDEC_QUERY:
1343                         chip->oldstate = chip->state;
1344                         chip->state = FL_PM_SUSPENDED;
1345                         /* No need to wake_up() on this state change -
1346                          * as the whole point is that nobody can do anything
1347                          * with the chip now anyway.
1348                          */
1349                 case FL_PM_SUSPENDED:
1350                         break;
1351
1352                 default:
1353                         ret = -EAGAIN;
1354                         break;
1355                 }
1356                 mutex_unlock(&chip->mutex);
1357         }
1358
1359         /* Unlock the chips again */
1360
1361         if (ret) {
1362                 for (i--; i >=0; i--) {
1363                         chip = &cfi->chips[i];
1364
1365                         mutex_lock(&chip->mutex);
1366
1367                         if (chip->state == FL_PM_SUSPENDED) {
1368                                 /* No need to force it into a known state here,
1369                                    because we're returning failure, and it didn't
1370                                    get power cycled */
1371                                 chip->state = chip->oldstate;
1372                                 wake_up(&chip->wq);
1373                         }
1374                         mutex_unlock(&chip->mutex);
1375                 }
1376         }
1377
1378         return ret;
1379 }
1380
1381 static void cfi_staa_resume(struct mtd_info *mtd)
1382 {
1383         struct map_info *map = mtd->priv;
1384         struct cfi_private *cfi = map->fldrv_priv;
1385         int i;
1386         struct flchip *chip;
1387
1388         for (i=0; i<cfi->numchips; i++) {
1389
1390                 chip = &cfi->chips[i];
1391
1392                 mutex_lock(&chip->mutex);
1393
1394                 /* Go to known state. Chip may have been power cycled */
1395                 if (chip->state == FL_PM_SUSPENDED) {
1396                         map_write(map, CMD(0xFF), 0);
1397                         chip->state = FL_READY;
1398                         wake_up(&chip->wq);
1399                 }
1400
1401                 mutex_unlock(&chip->mutex);
1402         }
1403 }
1404
1405 static void cfi_staa_destroy(struct mtd_info *mtd)
1406 {
1407         struct map_info *map = mtd->priv;
1408         struct cfi_private *cfi = map->fldrv_priv;
1409         kfree(cfi->cmdset_priv);
1410         kfree(cfi);
1411 }
1412
1413 MODULE_LICENSE("GPL");