Merge branch 'arch/nohz' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[platform/kernel/linux-rpi.git] / drivers / s390 / block / dasd_eckd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *                  Horst Hummel <Horst.Hummel@de.ibm.com>
5  *                  Carsten Otte <Cotte@de.ibm.com>
6  *                  Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12
13 #define KMSG_COMPONENT "dasd-eckd"
14
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>        /* HDIO_GETGEO                      */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/io.h>
30 #include <linux/uaccess.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif                          /* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44
45 /*
46  * raw track access always map to 64k in memory
47  * so it maps to 16 blocks of 4k per track
48  */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors  */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53
54 MODULE_LICENSE("GPL");
55
56 static struct dasd_discipline dasd_eckd_discipline;
57
58 /* The ccw bus type uses this table to find devices that it sends to
59  * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63         { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64         { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65         { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66         { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68         { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70         { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71         { /* end of list */ },
72 };
73
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77
78 static void *rawpadpage;
79
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83
84 /* emergency request for reserve/release */
85 static struct {
86         struct dasd_ccw_req cqr;
87         struct ccw1 ccw;
88         char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91
92 static struct {
93         struct dasd_ccw_req cqr;
94         struct ccw1 ccw[2];
95         char data[40];
96 } *dasd_vol_info_req;
97 static DEFINE_MUTEX(dasd_vol_info_mutex);
98
99 struct ext_pool_exhaust_work_data {
100         struct work_struct worker;
101         struct dasd_device *device;
102         struct dasd_device *base;
103 };
104
105 /* definitions for the path verification worker */
106 struct path_verification_work_data {
107         struct work_struct worker;
108         struct dasd_device *device;
109         struct dasd_ccw_req cqr;
110         struct ccw1 ccw;
111         __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
112         int isglobal;
113         __u8 tbvpm;
114 };
115 static struct path_verification_work_data *path_verification_worker;
116 static DEFINE_MUTEX(dasd_path_verification_mutex);
117
118 struct check_attention_work_data {
119         struct work_struct worker;
120         struct dasd_device *device;
121         __u8 lpum;
122 };
123
124 static int dasd_eckd_ext_pool_id(struct dasd_device *);
125 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
126                         struct dasd_device *, struct dasd_device *,
127                         unsigned int, int, unsigned int, unsigned int,
128                         unsigned int, unsigned int);
129
130 /* initial attempt at a probe function. this can be simplified once
131  * the other detection code is gone */
132 static int
133 dasd_eckd_probe (struct ccw_device *cdev)
134 {
135         int ret;
136
137         /* set ECKD specific ccw-device options */
138         ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
139                                      CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
140         if (ret) {
141                 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
142                                 "dasd_eckd_probe: could not set "
143                                 "ccw-device options");
144                 return ret;
145         }
146         ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
147         return ret;
148 }
149
150 static int
151 dasd_eckd_set_online(struct ccw_device *cdev)
152 {
153         return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
154 }
155
156 static const int sizes_trk0[] = { 28, 148, 84 };
157 #define LABEL_SIZE 140
158
159 /* head and record addresses of count_area read in analysis ccw */
160 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
161 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
162
163 static inline unsigned int
164 ceil_quot(unsigned int d1, unsigned int d2)
165 {
166         return (d1 + (d2 - 1)) / d2;
167 }
168
169 static unsigned int
170 recs_per_track(struct dasd_eckd_characteristics * rdc,
171                unsigned int kl, unsigned int dl)
172 {
173         int dn, kn;
174
175         switch (rdc->dev_type) {
176         case 0x3380:
177                 if (kl)
178                         return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
179                                        ceil_quot(dl + 12, 32));
180                 else
181                         return 1499 / (15 + ceil_quot(dl + 12, 32));
182         case 0x3390:
183                 dn = ceil_quot(dl + 6, 232) + 1;
184                 if (kl) {
185                         kn = ceil_quot(kl + 6, 232) + 1;
186                         return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
187                                        9 + ceil_quot(dl + 6 * dn, 34));
188                 } else
189                         return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
190         case 0x9345:
191                 dn = ceil_quot(dl + 6, 232) + 1;
192                 if (kl) {
193                         kn = ceil_quot(kl + 6, 232) + 1;
194                         return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
195                                        ceil_quot(dl + 6 * dn, 34));
196                 } else
197                         return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
198         }
199         return 0;
200 }
201
202 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
203 {
204         geo->cyl = (__u16) cyl;
205         geo->head = cyl >> 16;
206         geo->head <<= 4;
207         geo->head |= head;
208 }
209
210 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
211                      struct dasd_device *device)
212 {
213         struct dasd_eckd_private *private = device->private;
214         int rc;
215
216         rc = get_phys_clock(&data->ep_sys_time);
217         /*
218          * Ignore return code if XRC is not supported or
219          * sync clock is switched off
220          */
221         if ((rc && !private->rdc_data.facilities.XRC_supported) ||
222             rc == -EOPNOTSUPP || rc == -EACCES)
223                 return 0;
224
225         /* switch on System Time Stamp - needed for XRC Support */
226         data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
227         data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
228
229         if (ccw) {
230                 ccw->count = sizeof(struct DE_eckd_data);
231                 ccw->flags |= CCW_FLAG_SLI;
232         }
233
234         return rc;
235 }
236
237 static int
238 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
239               unsigned int totrk, int cmd, struct dasd_device *device,
240               int blksize)
241 {
242         struct dasd_eckd_private *private = device->private;
243         u16 heads, beghead, endhead;
244         u32 begcyl, endcyl;
245         int rc = 0;
246
247         if (ccw) {
248                 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
249                 ccw->flags = 0;
250                 ccw->count = 16;
251                 ccw->cda = (__u32)__pa(data);
252         }
253
254         memset(data, 0, sizeof(struct DE_eckd_data));
255         switch (cmd) {
256         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
257         case DASD_ECKD_CCW_READ_RECORD_ZERO:
258         case DASD_ECKD_CCW_READ:
259         case DASD_ECKD_CCW_READ_MT:
260         case DASD_ECKD_CCW_READ_CKD:
261         case DASD_ECKD_CCW_READ_CKD_MT:
262         case DASD_ECKD_CCW_READ_KD:
263         case DASD_ECKD_CCW_READ_KD_MT:
264                 data->mask.perm = 0x1;
265                 data->attributes.operation = private->attrib.operation;
266                 break;
267         case DASD_ECKD_CCW_READ_COUNT:
268                 data->mask.perm = 0x1;
269                 data->attributes.operation = DASD_BYPASS_CACHE;
270                 break;
271         case DASD_ECKD_CCW_READ_TRACK:
272         case DASD_ECKD_CCW_READ_TRACK_DATA:
273                 data->mask.perm = 0x1;
274                 data->attributes.operation = private->attrib.operation;
275                 data->blk_size = 0;
276                 break;
277         case DASD_ECKD_CCW_WRITE:
278         case DASD_ECKD_CCW_WRITE_MT:
279         case DASD_ECKD_CCW_WRITE_KD:
280         case DASD_ECKD_CCW_WRITE_KD_MT:
281                 data->mask.perm = 0x02;
282                 data->attributes.operation = private->attrib.operation;
283                 rc = set_timestamp(ccw, data, device);
284                 break;
285         case DASD_ECKD_CCW_WRITE_CKD:
286         case DASD_ECKD_CCW_WRITE_CKD_MT:
287                 data->attributes.operation = DASD_BYPASS_CACHE;
288                 rc = set_timestamp(ccw, data, device);
289                 break;
290         case DASD_ECKD_CCW_ERASE:
291         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
292         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
293                 data->mask.perm = 0x3;
294                 data->mask.auth = 0x1;
295                 data->attributes.operation = DASD_BYPASS_CACHE;
296                 rc = set_timestamp(ccw, data, device);
297                 break;
298         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
299                 data->mask.perm = 0x03;
300                 data->attributes.operation = private->attrib.operation;
301                 data->blk_size = 0;
302                 break;
303         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
304                 data->mask.perm = 0x02;
305                 data->attributes.operation = private->attrib.operation;
306                 data->blk_size = blksize;
307                 rc = set_timestamp(ccw, data, device);
308                 break;
309         default:
310                 dev_err(&device->cdev->dev,
311                         "0x%x is not a known command\n", cmd);
312                 break;
313         }
314
315         data->attributes.mode = 0x3;    /* ECKD */
316
317         if ((private->rdc_data.cu_type == 0x2105 ||
318              private->rdc_data.cu_type == 0x2107 ||
319              private->rdc_data.cu_type == 0x1750)
320             && !(private->uses_cdl && trk < 2))
321                 data->ga_extended |= 0x40; /* Regular Data Format Mode */
322
323         heads = private->rdc_data.trk_per_cyl;
324         begcyl = trk / heads;
325         beghead = trk % heads;
326         endcyl = totrk / heads;
327         endhead = totrk % heads;
328
329         /* check for sequential prestage - enhance cylinder range */
330         if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
331             data->attributes.operation == DASD_SEQ_ACCESS) {
332
333                 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
334                         endcyl += private->attrib.nr_cyl;
335                 else
336                         endcyl = (private->real_cyl - 1);
337         }
338
339         set_ch_t(&data->beg_ext, begcyl, beghead);
340         set_ch_t(&data->end_ext, endcyl, endhead);
341         return rc;
342 }
343
344
345 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
346                               unsigned int trk, unsigned int rec_on_trk,
347                               int count, int cmd, struct dasd_device *device,
348                               unsigned int reclen, unsigned int tlf)
349 {
350         struct dasd_eckd_private *private = device->private;
351         int sector;
352         int dn, d;
353
354         if (ccw) {
355                 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
356                 ccw->flags = 0;
357                 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
358                         ccw->count = 22;
359                 else
360                         ccw->count = 20;
361                 ccw->cda = (__u32)__pa(data);
362         }
363
364         memset(data, 0, sizeof(*data));
365         sector = 0;
366         if (rec_on_trk) {
367                 switch (private->rdc_data.dev_type) {
368                 case 0x3390:
369                         dn = ceil_quot(reclen + 6, 232);
370                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
371                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
372                         break;
373                 case 0x3380:
374                         d = 7 + ceil_quot(reclen + 12, 32);
375                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
376                         break;
377                 }
378         }
379         data->sector = sector;
380         /* note: meaning of count depends on the operation
381          *       for record based I/O it's the number of records, but for
382          *       track based I/O it's the number of tracks
383          */
384         data->count = count;
385         switch (cmd) {
386         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
387                 data->operation.orientation = 0x3;
388                 data->operation.operation = 0x03;
389                 break;
390         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
391                 data->operation.orientation = 0x3;
392                 data->operation.operation = 0x16;
393                 break;
394         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
395                 data->operation.orientation = 0x1;
396                 data->operation.operation = 0x03;
397                 data->count++;
398                 break;
399         case DASD_ECKD_CCW_READ_RECORD_ZERO:
400                 data->operation.orientation = 0x3;
401                 data->operation.operation = 0x16;
402                 data->count++;
403                 break;
404         case DASD_ECKD_CCW_WRITE:
405         case DASD_ECKD_CCW_WRITE_MT:
406         case DASD_ECKD_CCW_WRITE_KD:
407         case DASD_ECKD_CCW_WRITE_KD_MT:
408                 data->auxiliary.length_valid = 0x1;
409                 data->length = reclen;
410                 data->operation.operation = 0x01;
411                 break;
412         case DASD_ECKD_CCW_WRITE_CKD:
413         case DASD_ECKD_CCW_WRITE_CKD_MT:
414                 data->auxiliary.length_valid = 0x1;
415                 data->length = reclen;
416                 data->operation.operation = 0x03;
417                 break;
418         case DASD_ECKD_CCW_WRITE_FULL_TRACK:
419                 data->operation.orientation = 0x0;
420                 data->operation.operation = 0x3F;
421                 data->extended_operation = 0x11;
422                 data->length = 0;
423                 data->extended_parameter_length = 0x02;
424                 if (data->count > 8) {
425                         data->extended_parameter[0] = 0xFF;
426                         data->extended_parameter[1] = 0xFF;
427                         data->extended_parameter[1] <<= (16 - count);
428                 } else {
429                         data->extended_parameter[0] = 0xFF;
430                         data->extended_parameter[0] <<= (8 - count);
431                         data->extended_parameter[1] = 0x00;
432                 }
433                 data->sector = 0xFF;
434                 break;
435         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
436                 data->auxiliary.length_valid = 0x1;
437                 data->length = reclen;  /* not tlf, as one might think */
438                 data->operation.operation = 0x3F;
439                 data->extended_operation = 0x23;
440                 break;
441         case DASD_ECKD_CCW_READ:
442         case DASD_ECKD_CCW_READ_MT:
443         case DASD_ECKD_CCW_READ_KD:
444         case DASD_ECKD_CCW_READ_KD_MT:
445                 data->auxiliary.length_valid = 0x1;
446                 data->length = reclen;
447                 data->operation.operation = 0x06;
448                 break;
449         case DASD_ECKD_CCW_READ_CKD:
450         case DASD_ECKD_CCW_READ_CKD_MT:
451                 data->auxiliary.length_valid = 0x1;
452                 data->length = reclen;
453                 data->operation.operation = 0x16;
454                 break;
455         case DASD_ECKD_CCW_READ_COUNT:
456                 data->operation.operation = 0x06;
457                 break;
458         case DASD_ECKD_CCW_READ_TRACK:
459                 data->operation.orientation = 0x1;
460                 data->operation.operation = 0x0C;
461                 data->extended_parameter_length = 0;
462                 data->sector = 0xFF;
463                 break;
464         case DASD_ECKD_CCW_READ_TRACK_DATA:
465                 data->auxiliary.length_valid = 0x1;
466                 data->length = tlf;
467                 data->operation.operation = 0x0C;
468                 break;
469         case DASD_ECKD_CCW_ERASE:
470                 data->length = reclen;
471                 data->auxiliary.length_valid = 0x1;
472                 data->operation.operation = 0x0b;
473                 break;
474         default:
475                 DBF_DEV_EVENT(DBF_ERR, device,
476                             "fill LRE unknown opcode 0x%x", cmd);
477                 BUG();
478         }
479         set_ch_t(&data->seek_addr,
480                  trk / private->rdc_data.trk_per_cyl,
481                  trk % private->rdc_data.trk_per_cyl);
482         data->search_arg.cyl = data->seek_addr.cyl;
483         data->search_arg.head = data->seek_addr.head;
484         data->search_arg.record = rec_on_trk;
485 }
486
487 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
488                       unsigned int trk, unsigned int totrk, int cmd,
489                       struct dasd_device *basedev, struct dasd_device *startdev,
490                       unsigned int format, unsigned int rec_on_trk, int count,
491                       unsigned int blksize, unsigned int tlf)
492 {
493         struct dasd_eckd_private *basepriv, *startpriv;
494         struct LRE_eckd_data *lredata;
495         struct DE_eckd_data *dedata;
496         int rc = 0;
497
498         basepriv = basedev->private;
499         startpriv = startdev->private;
500         dedata = &pfxdata->define_extent;
501         lredata = &pfxdata->locate_record;
502
503         ccw->cmd_code = DASD_ECKD_CCW_PFX;
504         ccw->flags = 0;
505         if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
506                 ccw->count = sizeof(*pfxdata) + 2;
507                 ccw->cda = (__u32) __pa(pfxdata);
508                 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
509         } else {
510                 ccw->count = sizeof(*pfxdata);
511                 ccw->cda = (__u32) __pa(pfxdata);
512                 memset(pfxdata, 0, sizeof(*pfxdata));
513         }
514
515         /* prefix data */
516         if (format > 1) {
517                 DBF_DEV_EVENT(DBF_ERR, basedev,
518                               "PFX LRE unknown format 0x%x", format);
519                 BUG();
520                 return -EINVAL;
521         }
522         pfxdata->format = format;
523         pfxdata->base_address = basepriv->ned->unit_addr;
524         pfxdata->base_lss = basepriv->ned->ID;
525         pfxdata->validity.define_extent = 1;
526
527         /* private uid is kept up to date, conf_data may be outdated */
528         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
529                 pfxdata->validity.verify_base = 1;
530
531         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
532                 pfxdata->validity.verify_base = 1;
533                 pfxdata->validity.hyper_pav = 1;
534         }
535
536         rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
537
538         /*
539          * For some commands the System Time Stamp is set in the define extent
540          * data when XRC is supported. The validity of the time stamp must be
541          * reflected in the prefix data as well.
542          */
543         if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
544                 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
545
546         if (format == 1) {
547                 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
548                                   basedev, blksize, tlf);
549         }
550
551         return rc;
552 }
553
554 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
555                   unsigned int trk, unsigned int totrk, int cmd,
556                   struct dasd_device *basedev, struct dasd_device *startdev)
557 {
558         return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
559                           0, 0, 0, 0, 0);
560 }
561
562 static void
563 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
564               unsigned int rec_on_trk, int no_rec, int cmd,
565               struct dasd_device * device, int reclen)
566 {
567         struct dasd_eckd_private *private = device->private;
568         int sector;
569         int dn, d;
570
571         DBF_DEV_EVENT(DBF_INFO, device,
572                   "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
573                   trk, rec_on_trk, no_rec, cmd, reclen);
574
575         ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
576         ccw->flags = 0;
577         ccw->count = 16;
578         ccw->cda = (__u32) __pa(data);
579
580         memset(data, 0, sizeof(struct LO_eckd_data));
581         sector = 0;
582         if (rec_on_trk) {
583                 switch (private->rdc_data.dev_type) {
584                 case 0x3390:
585                         dn = ceil_quot(reclen + 6, 232);
586                         d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
587                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
588                         break;
589                 case 0x3380:
590                         d = 7 + ceil_quot(reclen + 12, 32);
591                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
592                         break;
593                 }
594         }
595         data->sector = sector;
596         data->count = no_rec;
597         switch (cmd) {
598         case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
599                 data->operation.orientation = 0x3;
600                 data->operation.operation = 0x03;
601                 break;
602         case DASD_ECKD_CCW_READ_HOME_ADDRESS:
603                 data->operation.orientation = 0x3;
604                 data->operation.operation = 0x16;
605                 break;
606         case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
607                 data->operation.orientation = 0x1;
608                 data->operation.operation = 0x03;
609                 data->count++;
610                 break;
611         case DASD_ECKD_CCW_READ_RECORD_ZERO:
612                 data->operation.orientation = 0x3;
613                 data->operation.operation = 0x16;
614                 data->count++;
615                 break;
616         case DASD_ECKD_CCW_WRITE:
617         case DASD_ECKD_CCW_WRITE_MT:
618         case DASD_ECKD_CCW_WRITE_KD:
619         case DASD_ECKD_CCW_WRITE_KD_MT:
620                 data->auxiliary.last_bytes_used = 0x1;
621                 data->length = reclen;
622                 data->operation.operation = 0x01;
623                 break;
624         case DASD_ECKD_CCW_WRITE_CKD:
625         case DASD_ECKD_CCW_WRITE_CKD_MT:
626                 data->auxiliary.last_bytes_used = 0x1;
627                 data->length = reclen;
628                 data->operation.operation = 0x03;
629                 break;
630         case DASD_ECKD_CCW_READ:
631         case DASD_ECKD_CCW_READ_MT:
632         case DASD_ECKD_CCW_READ_KD:
633         case DASD_ECKD_CCW_READ_KD_MT:
634                 data->auxiliary.last_bytes_used = 0x1;
635                 data->length = reclen;
636                 data->operation.operation = 0x06;
637                 break;
638         case DASD_ECKD_CCW_READ_CKD:
639         case DASD_ECKD_CCW_READ_CKD_MT:
640                 data->auxiliary.last_bytes_used = 0x1;
641                 data->length = reclen;
642                 data->operation.operation = 0x16;
643                 break;
644         case DASD_ECKD_CCW_READ_COUNT:
645                 data->operation.operation = 0x06;
646                 break;
647         case DASD_ECKD_CCW_ERASE:
648                 data->length = reclen;
649                 data->auxiliary.last_bytes_used = 0x1;
650                 data->operation.operation = 0x0b;
651                 break;
652         default:
653                 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
654                               "opcode 0x%x", cmd);
655         }
656         set_ch_t(&data->seek_addr,
657                  trk / private->rdc_data.trk_per_cyl,
658                  trk % private->rdc_data.trk_per_cyl);
659         data->search_arg.cyl = data->seek_addr.cyl;
660         data->search_arg.head = data->seek_addr.head;
661         data->search_arg.record = rec_on_trk;
662 }
663
664 /*
665  * Returns 1 if the block is one of the special blocks that needs
666  * to get read/written with the KD variant of the command.
667  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
668  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
669  * Luckily the KD variants differ only by one bit (0x08) from the
670  * normal variant. So don't wonder about code like:
671  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
672  *         ccw->cmd_code |= 0x8;
673  */
674 static inline int
675 dasd_eckd_cdl_special(int blk_per_trk, int recid)
676 {
677         if (recid < 3)
678                 return 1;
679         if (recid < blk_per_trk)
680                 return 0;
681         if (recid < 2 * blk_per_trk)
682                 return 1;
683         return 0;
684 }
685
686 /*
687  * Returns the record size for the special blocks of the cdl format.
688  * Only returns something useful if dasd_eckd_cdl_special is true
689  * for the recid.
690  */
691 static inline int
692 dasd_eckd_cdl_reclen(int recid)
693 {
694         if (recid < 3)
695                 return sizes_trk0[recid];
696         return LABEL_SIZE;
697 }
698 /* create unique id from private structure. */
699 static void create_uid(struct dasd_eckd_private *private)
700 {
701         int count;
702         struct dasd_uid *uid;
703
704         uid = &private->uid;
705         memset(uid, 0, sizeof(struct dasd_uid));
706         memcpy(uid->vendor, private->ned->HDA_manufacturer,
707                sizeof(uid->vendor) - 1);
708         EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
709         memcpy(uid->serial, private->ned->HDA_location,
710                sizeof(uid->serial) - 1);
711         EBCASC(uid->serial, sizeof(uid->serial) - 1);
712         uid->ssid = private->gneq->subsystemID;
713         uid->real_unit_addr = private->ned->unit_addr;
714         if (private->sneq) {
715                 uid->type = private->sneq->sua_flags;
716                 if (uid->type == UA_BASE_PAV_ALIAS)
717                         uid->base_unit_addr = private->sneq->base_unit_addr;
718         } else {
719                 uid->type = UA_BASE_DEVICE;
720         }
721         if (private->vdsneq) {
722                 for (count = 0; count < 16; count++) {
723                         sprintf(uid->vduit+2*count, "%02x",
724                                 private->vdsneq->uit[count]);
725                 }
726         }
727 }
728
729 /*
730  * Generate device unique id that specifies the physical device.
731  */
732 static int dasd_eckd_generate_uid(struct dasd_device *device)
733 {
734         struct dasd_eckd_private *private = device->private;
735         unsigned long flags;
736
737         if (!private)
738                 return -ENODEV;
739         if (!private->ned || !private->gneq)
740                 return -ENODEV;
741         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
742         create_uid(private);
743         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
744         return 0;
745 }
746
747 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
748 {
749         struct dasd_eckd_private *private = device->private;
750         unsigned long flags;
751
752         if (private) {
753                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
754                 *uid = private->uid;
755                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
756                 return 0;
757         }
758         return -EINVAL;
759 }
760
761 /*
762  * compare device UID with data of a given dasd_eckd_private structure
763  * return 0 for match
764  */
765 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
766                                       struct dasd_eckd_private *private)
767 {
768         struct dasd_uid device_uid;
769
770         create_uid(private);
771         dasd_eckd_get_uid(device, &device_uid);
772
773         return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
774 }
775
776 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
777                                    struct dasd_ccw_req *cqr,
778                                    __u8 *rcd_buffer,
779                                    __u8 lpm)
780 {
781         struct ccw1 *ccw;
782         /*
783          * buffer has to start with EBCDIC "V1.0" to show
784          * support for virtual device SNEQ
785          */
786         rcd_buffer[0] = 0xE5;
787         rcd_buffer[1] = 0xF1;
788         rcd_buffer[2] = 0x4B;
789         rcd_buffer[3] = 0xF0;
790
791         ccw = cqr->cpaddr;
792         ccw->cmd_code = DASD_ECKD_CCW_RCD;
793         ccw->flags = 0;
794         ccw->cda = (__u32)(addr_t)rcd_buffer;
795         ccw->count = DASD_ECKD_RCD_DATA_SIZE;
796         cqr->magic = DASD_ECKD_MAGIC;
797
798         cqr->startdev = device;
799         cqr->memdev = device;
800         cqr->block = NULL;
801         cqr->expires = 10*HZ;
802         cqr->lpm = lpm;
803         cqr->retries = 256;
804         cqr->buildclk = get_tod_clock();
805         cqr->status = DASD_CQR_FILLED;
806         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
807 }
808
809 /*
810  * Wakeup helper for read_conf
811  * if the cqr is not done and needs some error recovery
812  * the buffer has to be re-initialized with the EBCDIC "V1.0"
813  * to show support for virtual device SNEQ
814  */
815 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
816 {
817         struct ccw1 *ccw;
818         __u8 *rcd_buffer;
819
820         if (cqr->status !=  DASD_CQR_DONE) {
821                 ccw = cqr->cpaddr;
822                 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
823                 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
824
825                 rcd_buffer[0] = 0xE5;
826                 rcd_buffer[1] = 0xF1;
827                 rcd_buffer[2] = 0x4B;
828                 rcd_buffer[3] = 0xF0;
829         }
830         dasd_wakeup_cb(cqr, data);
831 }
832
833 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
834                                            struct dasd_ccw_req *cqr,
835                                            __u8 *rcd_buffer,
836                                            __u8 lpm)
837 {
838         struct ciw *ciw;
839         int rc;
840         /*
841          * sanity check: scan for RCD command in extended SenseID data
842          * some devices do not support RCD
843          */
844         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
845         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
846                 return -EOPNOTSUPP;
847
848         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
849         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
850         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
851         cqr->retries = 5;
852         cqr->callback = read_conf_cb;
853         rc = dasd_sleep_on_immediatly(cqr);
854         return rc;
855 }
856
857 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
858                                    void **rcd_buffer,
859                                    int *rcd_buffer_size, __u8 lpm)
860 {
861         struct ciw *ciw;
862         char *rcd_buf = NULL;
863         int ret;
864         struct dasd_ccw_req *cqr;
865
866         /*
867          * sanity check: scan for RCD command in extended SenseID data
868          * some devices do not support RCD
869          */
870         ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
871         if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
872                 ret = -EOPNOTSUPP;
873                 goto out_error;
874         }
875         rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
876         if (!rcd_buf) {
877                 ret = -ENOMEM;
878                 goto out_error;
879         }
880         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
881                                    0, /* use rcd_buf as data ara */
882                                    device, NULL);
883         if (IS_ERR(cqr)) {
884                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
885                               "Could not allocate RCD request");
886                 ret = -ENOMEM;
887                 goto out_error;
888         }
889         dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
890         cqr->callback = read_conf_cb;
891         ret = dasd_sleep_on(cqr);
892         /*
893          * on success we update the user input parms
894          */
895         dasd_sfree_request(cqr, cqr->memdev);
896         if (ret)
897                 goto out_error;
898
899         *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
900         *rcd_buffer = rcd_buf;
901         return 0;
902 out_error:
903         kfree(rcd_buf);
904         *rcd_buffer = NULL;
905         *rcd_buffer_size = 0;
906         return ret;
907 }
908
909 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
910 {
911
912         struct dasd_sneq *sneq;
913         int i, count;
914
915         private->ned = NULL;
916         private->sneq = NULL;
917         private->vdsneq = NULL;
918         private->gneq = NULL;
919         count = private->conf_len / sizeof(struct dasd_sneq);
920         sneq = (struct dasd_sneq *)private->conf_data;
921         for (i = 0; i < count; ++i) {
922                 if (sneq->flags.identifier == 1 && sneq->format == 1)
923                         private->sneq = sneq;
924                 else if (sneq->flags.identifier == 1 && sneq->format == 4)
925                         private->vdsneq = (struct vd_sneq *)sneq;
926                 else if (sneq->flags.identifier == 2)
927                         private->gneq = (struct dasd_gneq *)sneq;
928                 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
929                         private->ned = (struct dasd_ned *)sneq;
930                 sneq++;
931         }
932         if (!private->ned || !private->gneq) {
933                 private->ned = NULL;
934                 private->sneq = NULL;
935                 private->vdsneq = NULL;
936                 private->gneq = NULL;
937                 return -EINVAL;
938         }
939         return 0;
940
941 };
942
943 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
944 {
945         struct dasd_gneq *gneq;
946         int i, count, found;
947
948         count = conf_len / sizeof(*gneq);
949         gneq = (struct dasd_gneq *)conf_data;
950         found = 0;
951         for (i = 0; i < count; ++i) {
952                 if (gneq->flags.identifier == 2) {
953                         found = 1;
954                         break;
955                 }
956                 gneq++;
957         }
958         if (found)
959                 return ((char *)gneq)[18] & 0x07;
960         else
961                 return 0;
962 }
963
964 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
965 {
966         struct dasd_eckd_private *private = device->private;
967         int i;
968
969         private->conf_data = NULL;
970         private->conf_len = 0;
971         for (i = 0; i < 8; i++) {
972                 kfree(device->path[i].conf_data);
973                 device->path[i].conf_data = NULL;
974                 device->path[i].cssid = 0;
975                 device->path[i].ssid = 0;
976                 device->path[i].chpid = 0;
977         }
978 }
979
980
981 static int dasd_eckd_read_conf(struct dasd_device *device)
982 {
983         void *conf_data;
984         int conf_len, conf_data_saved;
985         int rc, path_err, pos;
986         __u8 lpm, opm;
987         struct dasd_eckd_private *private, path_private;
988         struct dasd_uid *uid;
989         char print_path_uid[60], print_device_uid[60];
990         struct channel_path_desc_fmt0 *chp_desc;
991         struct subchannel_id sch_id;
992
993         private = device->private;
994         opm = ccw_device_get_path_mask(device->cdev);
995         ccw_device_get_schid(device->cdev, &sch_id);
996         conf_data_saved = 0;
997         path_err = 0;
998         /* get configuration data per operational path */
999         for (lpm = 0x80; lpm; lpm>>= 1) {
1000                 if (!(lpm & opm))
1001                         continue;
1002                 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1003                                              &conf_len, lpm);
1004                 if (rc && rc != -EOPNOTSUPP) {  /* -EOPNOTSUPP is ok */
1005                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1006                                         "Read configuration data returned "
1007                                         "error %d", rc);
1008                         return rc;
1009                 }
1010                 if (conf_data == NULL) {
1011                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1012                                         "No configuration data "
1013                                         "retrieved");
1014                         /* no further analysis possible */
1015                         dasd_path_add_opm(device, opm);
1016                         continue;       /* no error */
1017                 }
1018                 /* save first valid configuration data */
1019                 if (!conf_data_saved) {
1020                         /* initially clear previously stored conf_data */
1021                         dasd_eckd_clear_conf_data(device);
1022                         private->conf_data = conf_data;
1023                         private->conf_len = conf_len;
1024                         if (dasd_eckd_identify_conf_parts(private)) {
1025                                 private->conf_data = NULL;
1026                                 private->conf_len = 0;
1027                                 kfree(conf_data);
1028                                 continue;
1029                         }
1030                         pos = pathmask_to_pos(lpm);
1031                         /* store per path conf_data */
1032                         device->path[pos].conf_data = conf_data;
1033                         device->path[pos].cssid = sch_id.cssid;
1034                         device->path[pos].ssid = sch_id.ssid;
1035                         chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1036                         if (chp_desc)
1037                                 device->path[pos].chpid = chp_desc->chpid;
1038                         kfree(chp_desc);
1039                         /*
1040                          * build device UID that other path data
1041                          * can be compared to it
1042                          */
1043                         dasd_eckd_generate_uid(device);
1044                         conf_data_saved++;
1045                 } else {
1046                         path_private.conf_data = conf_data;
1047                         path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1048                         if (dasd_eckd_identify_conf_parts(
1049                                     &path_private)) {
1050                                 path_private.conf_data = NULL;
1051                                 path_private.conf_len = 0;
1052                                 kfree(conf_data);
1053                                 continue;
1054                         }
1055                         if (dasd_eckd_compare_path_uid(
1056                                     device, &path_private)) {
1057                                 uid = &path_private.uid;
1058                                 if (strlen(uid->vduit) > 0)
1059                                         snprintf(print_path_uid,
1060                                                  sizeof(print_path_uid),
1061                                                  "%s.%s.%04x.%02x.%s",
1062                                                  uid->vendor, uid->serial,
1063                                                  uid->ssid, uid->real_unit_addr,
1064                                                  uid->vduit);
1065                                 else
1066                                         snprintf(print_path_uid,
1067                                                  sizeof(print_path_uid),
1068                                                  "%s.%s.%04x.%02x",
1069                                                  uid->vendor, uid->serial,
1070                                                  uid->ssid,
1071                                                  uid->real_unit_addr);
1072                                 uid = &private->uid;
1073                                 if (strlen(uid->vduit) > 0)
1074                                         snprintf(print_device_uid,
1075                                                  sizeof(print_device_uid),
1076                                                  "%s.%s.%04x.%02x.%s",
1077                                                  uid->vendor, uid->serial,
1078                                                  uid->ssid, uid->real_unit_addr,
1079                                                  uid->vduit);
1080                                 else
1081                                         snprintf(print_device_uid,
1082                                                  sizeof(print_device_uid),
1083                                                  "%s.%s.%04x.%02x",
1084                                                  uid->vendor, uid->serial,
1085                                                  uid->ssid,
1086                                                  uid->real_unit_addr);
1087                                 dev_err(&device->cdev->dev,
1088                                         "Not all channel paths lead to "
1089                                         "the same device, path %02X leads to "
1090                                         "device %s instead of %s\n", lpm,
1091                                         print_path_uid, print_device_uid);
1092                                 path_err = -EINVAL;
1093                                 dasd_path_add_cablepm(device, lpm);
1094                                 continue;
1095                         }
1096                         pos = pathmask_to_pos(lpm);
1097                         /* store per path conf_data */
1098                         device->path[pos].conf_data = conf_data;
1099                         device->path[pos].cssid = sch_id.cssid;
1100                         device->path[pos].ssid = sch_id.ssid;
1101                         chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1102                         if (chp_desc)
1103                                 device->path[pos].chpid = chp_desc->chpid;
1104                         kfree(chp_desc);
1105                         path_private.conf_data = NULL;
1106                         path_private.conf_len = 0;
1107                 }
1108                 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1109                 case 0x02:
1110                         dasd_path_add_nppm(device, lpm);
1111                         break;
1112                 case 0x03:
1113                         dasd_path_add_ppm(device, lpm);
1114                         break;
1115                 }
1116                 if (!dasd_path_get_opm(device)) {
1117                         dasd_path_set_opm(device, lpm);
1118                         dasd_generic_path_operational(device);
1119                 } else {
1120                         dasd_path_add_opm(device, lpm);
1121                 }
1122         }
1123
1124         return path_err;
1125 }
1126
1127 static u32 get_fcx_max_data(struct dasd_device *device)
1128 {
1129         struct dasd_eckd_private *private = device->private;
1130         int fcx_in_css, fcx_in_gneq, fcx_in_features;
1131         unsigned int mdc;
1132         int tpm;
1133
1134         if (dasd_nofcx)
1135                 return 0;
1136         /* is transport mode supported? */
1137         fcx_in_css = css_general_characteristics.fcx;
1138         fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1139         fcx_in_features = private->features.feature[40] & 0x80;
1140         tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1141
1142         if (!tpm)
1143                 return 0;
1144
1145         mdc = ccw_device_get_mdc(device->cdev, 0);
1146         if (mdc == 0) {
1147                 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1148                 return 0;
1149         } else {
1150                 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1151         }
1152 }
1153
1154 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1155 {
1156         struct dasd_eckd_private *private = device->private;
1157         unsigned int mdc;
1158         u32 fcx_max_data;
1159
1160         if (private->fcx_max_data) {
1161                 mdc = ccw_device_get_mdc(device->cdev, lpm);
1162                 if (mdc == 0) {
1163                         dev_warn(&device->cdev->dev,
1164                                  "Detecting the maximum data size for zHPF "
1165                                  "requests failed (rc=%d) for a new path %x\n",
1166                                  mdc, lpm);
1167                         return mdc;
1168                 }
1169                 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1170                 if (fcx_max_data < private->fcx_max_data) {
1171                         dev_warn(&device->cdev->dev,
1172                                  "The maximum data size for zHPF requests %u "
1173                                  "on a new path %x is below the active maximum "
1174                                  "%u\n", fcx_max_data, lpm,
1175                                  private->fcx_max_data);
1176                         return -EACCES;
1177                 }
1178         }
1179         return 0;
1180 }
1181
1182 static int rebuild_device_uid(struct dasd_device *device,
1183                               struct path_verification_work_data *data)
1184 {
1185         struct dasd_eckd_private *private = device->private;
1186         __u8 lpm, opm = dasd_path_get_opm(device);
1187         int rc = -ENODEV;
1188
1189         for (lpm = 0x80; lpm; lpm >>= 1) {
1190                 if (!(lpm & opm))
1191                         continue;
1192                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1193                 memset(&data->cqr, 0, sizeof(data->cqr));
1194                 data->cqr.cpaddr = &data->ccw;
1195                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1196                                                      data->rcd_buffer,
1197                                                      lpm);
1198
1199                 if (rc) {
1200                         if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1201                                 continue;
1202                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1203                                         "Read configuration data "
1204                                         "returned error %d", rc);
1205                         break;
1206                 }
1207                 memcpy(private->conf_data, data->rcd_buffer,
1208                        DASD_ECKD_RCD_DATA_SIZE);
1209                 if (dasd_eckd_identify_conf_parts(private)) {
1210                         rc = -ENODEV;
1211                 } else /* first valid path is enough */
1212                         break;
1213         }
1214
1215         if (!rc)
1216                 rc = dasd_eckd_generate_uid(device);
1217
1218         return rc;
1219 }
1220
1221 static void do_path_verification_work(struct work_struct *work)
1222 {
1223         struct path_verification_work_data *data;
1224         struct dasd_device *device;
1225         struct dasd_eckd_private path_private;
1226         struct dasd_uid *uid;
1227         __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1228         __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1229         unsigned long flags;
1230         char print_uid[60];
1231         int rc;
1232
1233         data = container_of(work, struct path_verification_work_data, worker);
1234         device = data->device;
1235
1236         /* delay path verification until device was resumed */
1237         if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1238                 schedule_work(work);
1239                 return;
1240         }
1241         /* check if path verification already running and delay if so */
1242         if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1243                 schedule_work(work);
1244                 return;
1245         }
1246         opm = 0;
1247         npm = 0;
1248         ppm = 0;
1249         epm = 0;
1250         hpfpm = 0;
1251         cablepm = 0;
1252
1253         for (lpm = 0x80; lpm; lpm >>= 1) {
1254                 if (!(lpm & data->tbvpm))
1255                         continue;
1256                 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1257                 memset(&data->cqr, 0, sizeof(data->cqr));
1258                 data->cqr.cpaddr = &data->ccw;
1259                 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1260                                                      data->rcd_buffer,
1261                                                      lpm);
1262                 if (!rc) {
1263                         switch (dasd_eckd_path_access(data->rcd_buffer,
1264                                                       DASD_ECKD_RCD_DATA_SIZE)
1265                                 ) {
1266                         case 0x02:
1267                                 npm |= lpm;
1268                                 break;
1269                         case 0x03:
1270                                 ppm |= lpm;
1271                                 break;
1272                         }
1273                         opm |= lpm;
1274                 } else if (rc == -EOPNOTSUPP) {
1275                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1276                                         "path verification: No configuration "
1277                                         "data retrieved");
1278                         opm |= lpm;
1279                 } else if (rc == -EAGAIN) {
1280                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1281                                         "path verification: device is stopped,"
1282                                         " try again later");
1283                         epm |= lpm;
1284                 } else {
1285                         dev_warn(&device->cdev->dev,
1286                                  "Reading device feature codes failed "
1287                                  "(rc=%d) for new path %x\n", rc, lpm);
1288                         continue;
1289                 }
1290                 if (verify_fcx_max_data(device, lpm)) {
1291                         opm &= ~lpm;
1292                         npm &= ~lpm;
1293                         ppm &= ~lpm;
1294                         hpfpm |= lpm;
1295                         continue;
1296                 }
1297
1298                 /*
1299                  * save conf_data for comparison after
1300                  * rebuild_device_uid may have changed
1301                  * the original data
1302                  */
1303                 memcpy(&path_rcd_buf, data->rcd_buffer,
1304                        DASD_ECKD_RCD_DATA_SIZE);
1305                 path_private.conf_data = (void *) &path_rcd_buf;
1306                 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1307                 if (dasd_eckd_identify_conf_parts(&path_private)) {
1308                         path_private.conf_data = NULL;
1309                         path_private.conf_len = 0;
1310                         continue;
1311                 }
1312
1313                 /*
1314                  * compare path UID with device UID only if at least
1315                  * one valid path is left
1316                  * in other case the device UID may have changed and
1317                  * the first working path UID will be used as device UID
1318                  */
1319                 if (dasd_path_get_opm(device) &&
1320                     dasd_eckd_compare_path_uid(device, &path_private)) {
1321                         /*
1322                          * the comparison was not successful
1323                          * rebuild the device UID with at least one
1324                          * known path in case a z/VM hyperswap command
1325                          * has changed the device
1326                          *
1327                          * after this compare again
1328                          *
1329                          * if either the rebuild or the recompare fails
1330                          * the path can not be used
1331                          */
1332                         if (rebuild_device_uid(device, data) ||
1333                             dasd_eckd_compare_path_uid(
1334                                     device, &path_private)) {
1335                                 uid = &path_private.uid;
1336                                 if (strlen(uid->vduit) > 0)
1337                                         snprintf(print_uid, sizeof(print_uid),
1338                                                  "%s.%s.%04x.%02x.%s",
1339                                                  uid->vendor, uid->serial,
1340                                                  uid->ssid, uid->real_unit_addr,
1341                                                  uid->vduit);
1342                                 else
1343                                         snprintf(print_uid, sizeof(print_uid),
1344                                                  "%s.%s.%04x.%02x",
1345                                                  uid->vendor, uid->serial,
1346                                                  uid->ssid,
1347                                                  uid->real_unit_addr);
1348                                 dev_err(&device->cdev->dev,
1349                                         "The newly added channel path %02X "
1350                                         "will not be used because it leads "
1351                                         "to a different device %s\n",
1352                                         lpm, print_uid);
1353                                 opm &= ~lpm;
1354                                 npm &= ~lpm;
1355                                 ppm &= ~lpm;
1356                                 cablepm |= lpm;
1357                                 continue;
1358                         }
1359                 }
1360
1361                 /*
1362                  * There is a small chance that a path is lost again between
1363                  * above path verification and the following modification of
1364                  * the device opm mask. We could avoid that race here by using
1365                  * yet another path mask, but we rather deal with this unlikely
1366                  * situation in dasd_start_IO.
1367                  */
1368                 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1369                 if (!dasd_path_get_opm(device) && opm) {
1370                         dasd_path_set_opm(device, opm);
1371                         dasd_generic_path_operational(device);
1372                 } else {
1373                         dasd_path_add_opm(device, opm);
1374                 }
1375                 dasd_path_add_nppm(device, npm);
1376                 dasd_path_add_ppm(device, ppm);
1377                 dasd_path_add_tbvpm(device, epm);
1378                 dasd_path_add_cablepm(device, cablepm);
1379                 dasd_path_add_nohpfpm(device, hpfpm);
1380                 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1381         }
1382         clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1383         dasd_put_device(device);
1384         if (data->isglobal)
1385                 mutex_unlock(&dasd_path_verification_mutex);
1386         else
1387                 kfree(data);
1388 }
1389
1390 static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1391 {
1392         struct path_verification_work_data *data;
1393
1394         data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1395         if (!data) {
1396                 if (mutex_trylock(&dasd_path_verification_mutex)) {
1397                         data = path_verification_worker;
1398                         data->isglobal = 1;
1399                 } else
1400                         return -ENOMEM;
1401         } else {
1402                 memset(data, 0, sizeof(*data));
1403                 data->isglobal = 0;
1404         }
1405         INIT_WORK(&data->worker, do_path_verification_work);
1406         dasd_get_device(device);
1407         data->device = device;
1408         data->tbvpm = lpm;
1409         schedule_work(&data->worker);
1410         return 0;
1411 }
1412
1413 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1414 {
1415         struct dasd_eckd_private *private = device->private;
1416         unsigned long flags;
1417
1418         if (!private->fcx_max_data)
1419                 private->fcx_max_data = get_fcx_max_data(device);
1420         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1421         dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1422         dasd_schedule_device_bh(device);
1423         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1424 }
1425
1426 static int dasd_eckd_read_features(struct dasd_device *device)
1427 {
1428         struct dasd_eckd_private *private = device->private;
1429         struct dasd_psf_prssd_data *prssdp;
1430         struct dasd_rssd_features *features;
1431         struct dasd_ccw_req *cqr;
1432         struct ccw1 *ccw;
1433         int rc;
1434
1435         memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1436         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1437                                    (sizeof(struct dasd_psf_prssd_data) +
1438                                     sizeof(struct dasd_rssd_features)),
1439                                    device, NULL);
1440         if (IS_ERR(cqr)) {
1441                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1442                                 "allocate initialization request");
1443                 return PTR_ERR(cqr);
1444         }
1445         cqr->startdev = device;
1446         cqr->memdev = device;
1447         cqr->block = NULL;
1448         cqr->retries = 256;
1449         cqr->expires = 10 * HZ;
1450
1451         /* Prepare for Read Subsystem Data */
1452         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1453         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1454         prssdp->order = PSF_ORDER_PRSSD;
1455         prssdp->suborder = 0x41;        /* Read Feature Codes */
1456         /* all other bytes of prssdp must be zero */
1457
1458         ccw = cqr->cpaddr;
1459         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1460         ccw->count = sizeof(struct dasd_psf_prssd_data);
1461         ccw->flags |= CCW_FLAG_CC;
1462         ccw->cda = (__u32)(addr_t) prssdp;
1463
1464         /* Read Subsystem Data - feature codes */
1465         features = (struct dasd_rssd_features *) (prssdp + 1);
1466         memset(features, 0, sizeof(struct dasd_rssd_features));
1467
1468         ccw++;
1469         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1470         ccw->count = sizeof(struct dasd_rssd_features);
1471         ccw->cda = (__u32)(addr_t) features;
1472
1473         cqr->buildclk = get_tod_clock();
1474         cqr->status = DASD_CQR_FILLED;
1475         rc = dasd_sleep_on(cqr);
1476         if (rc == 0) {
1477                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1478                 features = (struct dasd_rssd_features *) (prssdp + 1);
1479                 memcpy(&private->features, features,
1480                        sizeof(struct dasd_rssd_features));
1481         } else
1482                 dev_warn(&device->cdev->dev, "Reading device feature codes"
1483                          " failed with rc=%d\n", rc);
1484         dasd_sfree_request(cqr, cqr->memdev);
1485         return rc;
1486 }
1487
1488 /* Read Volume Information - Volume Storage Query */
1489 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1490 {
1491         struct dasd_eckd_private *private = device->private;
1492         struct dasd_psf_prssd_data *prssdp;
1493         struct dasd_rssd_vsq *vsq;
1494         struct dasd_ccw_req *cqr;
1495         struct ccw1 *ccw;
1496         int useglobal;
1497         int rc;
1498
1499         /* This command cannot be executed on an alias device */
1500         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1501             private->uid.type == UA_HYPER_PAV_ALIAS)
1502                 return 0;
1503
1504         useglobal = 0;
1505         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1506                                    sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1507         if (IS_ERR(cqr)) {
1508                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1509                                 "Could not allocate initialization request");
1510                 mutex_lock(&dasd_vol_info_mutex);
1511                 useglobal = 1;
1512                 cqr = &dasd_vol_info_req->cqr;
1513                 memset(cqr, 0, sizeof(*cqr));
1514                 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1515                 cqr->cpaddr = &dasd_vol_info_req->ccw;
1516                 cqr->data = &dasd_vol_info_req->data;
1517                 cqr->magic = DASD_ECKD_MAGIC;
1518         }
1519
1520         /* Prepare for Read Subsystem Data */
1521         prssdp = cqr->data;
1522         prssdp->order = PSF_ORDER_PRSSD;
1523         prssdp->suborder = PSF_SUBORDER_VSQ;    /* Volume Storage Query */
1524         prssdp->lss = private->ned->ID;
1525         prssdp->volume = private->ned->unit_addr;
1526
1527         ccw = cqr->cpaddr;
1528         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1529         ccw->count = sizeof(*prssdp);
1530         ccw->flags |= CCW_FLAG_CC;
1531         ccw->cda = (__u32)(addr_t)prssdp;
1532
1533         /* Read Subsystem Data - Volume Storage Query */
1534         vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1535         memset(vsq, 0, sizeof(*vsq));
1536
1537         ccw++;
1538         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1539         ccw->count = sizeof(*vsq);
1540         ccw->flags |= CCW_FLAG_SLI;
1541         ccw->cda = (__u32)(addr_t)vsq;
1542
1543         cqr->buildclk = get_tod_clock();
1544         cqr->status = DASD_CQR_FILLED;
1545         cqr->startdev = device;
1546         cqr->memdev = device;
1547         cqr->block = NULL;
1548         cqr->retries = 256;
1549         cqr->expires = device->default_expires * HZ;
1550         /* The command might not be supported. Suppress the error output */
1551         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1552
1553         rc = dasd_sleep_on_interruptible(cqr);
1554         if (rc == 0) {
1555                 memcpy(&private->vsq, vsq, sizeof(*vsq));
1556         } else {
1557                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1558                                 "Reading the volume storage information failed with rc=%d", rc);
1559         }
1560
1561         if (useglobal)
1562                 mutex_unlock(&dasd_vol_info_mutex);
1563         else
1564                 dasd_sfree_request(cqr, cqr->memdev);
1565
1566         return rc;
1567 }
1568
1569 static int dasd_eckd_is_ese(struct dasd_device *device)
1570 {
1571         struct dasd_eckd_private *private = device->private;
1572
1573         return private->vsq.vol_info.ese;
1574 }
1575
1576 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1577 {
1578         struct dasd_eckd_private *private = device->private;
1579
1580         return private->vsq.extent_pool_id;
1581 }
1582
1583 /*
1584  * This value represents the total amount of available space. As more space is
1585  * allocated by ESE volumes, this value will decrease.
1586  * The data for this value is therefore updated on any call.
1587  */
1588 static int dasd_eckd_space_configured(struct dasd_device *device)
1589 {
1590         struct dasd_eckd_private *private = device->private;
1591         int rc;
1592
1593         rc = dasd_eckd_read_vol_info(device);
1594
1595         return rc ? : private->vsq.space_configured;
1596 }
1597
1598 /*
1599  * The value of space allocated by an ESE volume may have changed and is
1600  * therefore updated on any call.
1601  */
1602 static int dasd_eckd_space_allocated(struct dasd_device *device)
1603 {
1604         struct dasd_eckd_private *private = device->private;
1605         int rc;
1606
1607         rc = dasd_eckd_read_vol_info(device);
1608
1609         return rc ? : private->vsq.space_allocated;
1610 }
1611
1612 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1613 {
1614         struct dasd_eckd_private *private = device->private;
1615
1616         return private->vsq.logical_capacity;
1617 }
1618
1619 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1620 {
1621         struct ext_pool_exhaust_work_data *data;
1622         struct dasd_device *device;
1623         struct dasd_device *base;
1624
1625         data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1626         device = data->device;
1627         base = data->base;
1628
1629         if (!base)
1630                 base = device;
1631         if (dasd_eckd_space_configured(base) != 0) {
1632                 dasd_generic_space_avail(device);
1633         } else {
1634                 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1635                 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1636         }
1637
1638         dasd_put_device(device);
1639         kfree(data);
1640 }
1641
1642 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1643                                       struct dasd_ccw_req *cqr)
1644 {
1645         struct ext_pool_exhaust_work_data *data;
1646
1647         data = kzalloc(sizeof(*data), GFP_ATOMIC);
1648         if (!data)
1649                 return -ENOMEM;
1650         INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1651         dasd_get_device(device);
1652         data->device = device;
1653
1654         if (cqr->block)
1655                 data->base = cqr->block->base;
1656         else if (cqr->basedev)
1657                 data->base = cqr->basedev;
1658         else
1659                 data->base = NULL;
1660
1661         schedule_work(&data->worker);
1662
1663         return 0;
1664 }
1665
1666 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1667                                         struct dasd_rssd_lcq *lcq)
1668 {
1669         struct dasd_eckd_private *private = device->private;
1670         int pool_id = dasd_eckd_ext_pool_id(device);
1671         struct dasd_ext_pool_sum eps;
1672         int i;
1673
1674         for (i = 0; i < lcq->pool_count; i++) {
1675                 eps = lcq->ext_pool_sum[i];
1676                 if (eps.pool_id == pool_id) {
1677                         memcpy(&private->eps, &eps,
1678                                sizeof(struct dasd_ext_pool_sum));
1679                 }
1680         }
1681 }
1682
1683 /* Read Extent Pool Information - Logical Configuration Query */
1684 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1685 {
1686         struct dasd_eckd_private *private = device->private;
1687         struct dasd_psf_prssd_data *prssdp;
1688         struct dasd_rssd_lcq *lcq;
1689         struct dasd_ccw_req *cqr;
1690         struct ccw1 *ccw;
1691         int rc;
1692
1693         /* This command cannot be executed on an alias device */
1694         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1695             private->uid.type == UA_HYPER_PAV_ALIAS)
1696                 return 0;
1697
1698         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1699                                    sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1700         if (IS_ERR(cqr)) {
1701                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1702                                 "Could not allocate initialization request");
1703                 return PTR_ERR(cqr);
1704         }
1705
1706         /* Prepare for Read Subsystem Data */
1707         prssdp = cqr->data;
1708         memset(prssdp, 0, sizeof(*prssdp));
1709         prssdp->order = PSF_ORDER_PRSSD;
1710         prssdp->suborder = PSF_SUBORDER_LCQ;    /* Logical Configuration Query */
1711
1712         ccw = cqr->cpaddr;
1713         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1714         ccw->count = sizeof(*prssdp);
1715         ccw->flags |= CCW_FLAG_CC;
1716         ccw->cda = (__u32)(addr_t)prssdp;
1717
1718         lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1719         memset(lcq, 0, sizeof(*lcq));
1720
1721         ccw++;
1722         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1723         ccw->count = sizeof(*lcq);
1724         ccw->flags |= CCW_FLAG_SLI;
1725         ccw->cda = (__u32)(addr_t)lcq;
1726
1727         cqr->buildclk = get_tod_clock();
1728         cqr->status = DASD_CQR_FILLED;
1729         cqr->startdev = device;
1730         cqr->memdev = device;
1731         cqr->block = NULL;
1732         cqr->retries = 256;
1733         cqr->expires = device->default_expires * HZ;
1734         /* The command might not be supported. Suppress the error output */
1735         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1736
1737         rc = dasd_sleep_on_interruptible(cqr);
1738         if (rc == 0) {
1739                 dasd_eckd_cpy_ext_pool_data(device, lcq);
1740         } else {
1741                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1742                                 "Reading the logical configuration failed with rc=%d", rc);
1743         }
1744
1745         dasd_sfree_request(cqr, cqr->memdev);
1746
1747         return rc;
1748 }
1749
1750 /*
1751  * Depending on the device type, the extent size is specified either as
1752  * cylinders per extent (CKD) or size per extent (FBA)
1753  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1754  */
1755 static int dasd_eckd_ext_size(struct dasd_device *device)
1756 {
1757         struct dasd_eckd_private *private = device->private;
1758         struct dasd_ext_pool_sum eps = private->eps;
1759
1760         if (!eps.flags.extent_size_valid)
1761                 return 0;
1762         if (eps.extent_size.size_1G)
1763                 return 1113;
1764         if (eps.extent_size.size_16M)
1765                 return 21;
1766
1767         return 0;
1768 }
1769
1770 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1771 {
1772         struct dasd_eckd_private *private = device->private;
1773
1774         return private->eps.warn_thrshld;
1775 }
1776
1777 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1778 {
1779         struct dasd_eckd_private *private = device->private;
1780
1781         return private->eps.flags.capacity_at_warnlevel;
1782 }
1783
1784 /*
1785  * Extent Pool out of space
1786  */
1787 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1788 {
1789         struct dasd_eckd_private *private = device->private;
1790
1791         return private->eps.flags.pool_oos;
1792 }
1793
1794 /*
1795  * Build CP for Perform Subsystem Function - SSC.
1796  */
1797 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1798                                                     int enable_pav)
1799 {
1800         struct dasd_ccw_req *cqr;
1801         struct dasd_psf_ssc_data *psf_ssc_data;
1802         struct ccw1 *ccw;
1803
1804         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1805                                   sizeof(struct dasd_psf_ssc_data),
1806                                    device, NULL);
1807
1808         if (IS_ERR(cqr)) {
1809                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1810                            "Could not allocate PSF-SSC request");
1811                 return cqr;
1812         }
1813         psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1814         psf_ssc_data->order = PSF_ORDER_SSC;
1815         psf_ssc_data->suborder = 0xc0;
1816         if (enable_pav) {
1817                 psf_ssc_data->suborder |= 0x08;
1818                 psf_ssc_data->reserved[0] = 0x88;
1819         }
1820         ccw = cqr->cpaddr;
1821         ccw->cmd_code = DASD_ECKD_CCW_PSF;
1822         ccw->cda = (__u32)(addr_t)psf_ssc_data;
1823         ccw->count = 66;
1824
1825         cqr->startdev = device;
1826         cqr->memdev = device;
1827         cqr->block = NULL;
1828         cqr->retries = 256;
1829         cqr->expires = 10*HZ;
1830         cqr->buildclk = get_tod_clock();
1831         cqr->status = DASD_CQR_FILLED;
1832         return cqr;
1833 }
1834
1835 /*
1836  * Perform Subsystem Function.
1837  * It is necessary to trigger CIO for channel revalidation since this
1838  * call might change behaviour of DASD devices.
1839  */
1840 static int
1841 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1842                   unsigned long flags)
1843 {
1844         struct dasd_ccw_req *cqr;
1845         int rc;
1846
1847         cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1848         if (IS_ERR(cqr))
1849                 return PTR_ERR(cqr);
1850
1851         /*
1852          * set flags e.g. turn on failfast, to prevent blocking
1853          * the calling function should handle failed requests
1854          */
1855         cqr->flags |= flags;
1856
1857         rc = dasd_sleep_on(cqr);
1858         if (!rc)
1859                 /* trigger CIO to reprobe devices */
1860                 css_schedule_reprobe();
1861         else if (cqr->intrc == -EAGAIN)
1862                 rc = -EAGAIN;
1863
1864         dasd_sfree_request(cqr, cqr->memdev);
1865         return rc;
1866 }
1867
1868 /*
1869  * Valide storage server of current device.
1870  */
1871 static int dasd_eckd_validate_server(struct dasd_device *device,
1872                                      unsigned long flags)
1873 {
1874         struct dasd_eckd_private *private = device->private;
1875         int enable_pav, rc;
1876
1877         if (private->uid.type == UA_BASE_PAV_ALIAS ||
1878             private->uid.type == UA_HYPER_PAV_ALIAS)
1879                 return 0;
1880         if (dasd_nopav || MACHINE_IS_VM)
1881                 enable_pav = 0;
1882         else
1883                 enable_pav = 1;
1884         rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1885
1886         /* may be requested feature is not available on server,
1887          * therefore just report error and go ahead */
1888         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1889                         "returned rc=%d", private->uid.ssid, rc);
1890         return rc;
1891 }
1892
1893 /*
1894  * worker to do a validate server in case of a lost pathgroup
1895  */
1896 static void dasd_eckd_do_validate_server(struct work_struct *work)
1897 {
1898         struct dasd_device *device = container_of(work, struct dasd_device,
1899                                                   kick_validate);
1900         unsigned long flags = 0;
1901
1902         set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1903         if (dasd_eckd_validate_server(device, flags)
1904             == -EAGAIN) {
1905                 /* schedule worker again if failed */
1906                 schedule_work(&device->kick_validate);
1907                 return;
1908         }
1909
1910         dasd_put_device(device);
1911 }
1912
1913 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1914 {
1915         dasd_get_device(device);
1916         /* exit if device not online or in offline processing */
1917         if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1918            device->state < DASD_STATE_ONLINE) {
1919                 dasd_put_device(device);
1920                 return;
1921         }
1922         /* queue call to do_validate_server to the kernel event daemon. */
1923         if (!schedule_work(&device->kick_validate))
1924                 dasd_put_device(device);
1925 }
1926
1927 /*
1928  * Check device characteristics.
1929  * If the device is accessible using ECKD discipline, the device is enabled.
1930  */
1931 static int
1932 dasd_eckd_check_characteristics(struct dasd_device *device)
1933 {
1934         struct dasd_eckd_private *private = device->private;
1935         struct dasd_block *block;
1936         struct dasd_uid temp_uid;
1937         int rc, i;
1938         int readonly;
1939         unsigned long value;
1940
1941         /* setup work queue for validate server*/
1942         INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1943         /* setup work queue for summary unit check */
1944         INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
1945
1946         if (!ccw_device_is_pathgroup(device->cdev)) {
1947                 dev_warn(&device->cdev->dev,
1948                          "A channel path group could not be established\n");
1949                 return -EIO;
1950         }
1951         if (!ccw_device_is_multipath(device->cdev)) {
1952                 dev_info(&device->cdev->dev,
1953                          "The DASD is not operating in multipath mode\n");
1954         }
1955         if (!private) {
1956                 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1957                 if (!private) {
1958                         dev_warn(&device->cdev->dev,
1959                                  "Allocating memory for private DASD data "
1960                                  "failed\n");
1961                         return -ENOMEM;
1962                 }
1963                 device->private = private;
1964         } else {
1965                 memset(private, 0, sizeof(*private));
1966         }
1967         /* Invalidate status of initial analysis. */
1968         private->init_cqr_status = -1;
1969         /* Set default cache operations. */
1970         private->attrib.operation = DASD_NORMAL_CACHE;
1971         private->attrib.nr_cyl = 0;
1972
1973         /* Read Configuration Data */
1974         rc = dasd_eckd_read_conf(device);
1975         if (rc)
1976                 goto out_err1;
1977
1978         /* set some default values */
1979         device->default_expires = DASD_EXPIRES;
1980         device->default_retries = DASD_RETRIES;
1981         device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1982         device->path_interval = DASD_ECKD_PATH_INTERVAL;
1983
1984         if (private->gneq) {
1985                 value = 1;
1986                 for (i = 0; i < private->gneq->timeout.value; i++)
1987                         value = 10 * value;
1988                 value = value * private->gneq->timeout.number;
1989                 /* do not accept useless values */
1990                 if (value != 0 && value <= DASD_EXPIRES_MAX)
1991                         device->default_expires = value;
1992         }
1993
1994         dasd_eckd_get_uid(device, &temp_uid);
1995         if (temp_uid.type == UA_BASE_DEVICE) {
1996                 block = dasd_alloc_block();
1997                 if (IS_ERR(block)) {
1998                         DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1999                                         "could not allocate dasd "
2000                                         "block structure");
2001                         rc = PTR_ERR(block);
2002                         goto out_err1;
2003                 }
2004                 device->block = block;
2005                 block->base = device;
2006         }
2007
2008         /* register lcu with alias handling, enable PAV */
2009         rc = dasd_alias_make_device_known_to_lcu(device);
2010         if (rc)
2011                 goto out_err2;
2012
2013         dasd_eckd_validate_server(device, 0);
2014
2015         /* device may report different configuration data after LCU setup */
2016         rc = dasd_eckd_read_conf(device);
2017         if (rc)
2018                 goto out_err3;
2019
2020         /* Read Feature Codes */
2021         dasd_eckd_read_features(device);
2022
2023         /* Read Volume Information */
2024         dasd_eckd_read_vol_info(device);
2025
2026         /* Read Extent Pool Information */
2027         dasd_eckd_read_ext_pool_info(device);
2028
2029         /* Read Device Characteristics */
2030         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2031                                          &private->rdc_data, 64);
2032         if (rc) {
2033                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2034                                 "Read device characteristic failed, rc=%d", rc);
2035                 goto out_err3;
2036         }
2037
2038         if ((device->features & DASD_FEATURE_USERAW) &&
2039             !(private->rdc_data.facilities.RT_in_LR)) {
2040                 dev_err(&device->cdev->dev, "The storage server does not "
2041                         "support raw-track access\n");
2042                 rc = -EINVAL;
2043                 goto out_err3;
2044         }
2045
2046         /* find the valid cylinder size */
2047         if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2048             private->rdc_data.long_no_cyl)
2049                 private->real_cyl = private->rdc_data.long_no_cyl;
2050         else
2051                 private->real_cyl = private->rdc_data.no_cyl;
2052
2053         private->fcx_max_data = get_fcx_max_data(device);
2054
2055         readonly = dasd_device_is_ro(device);
2056         if (readonly)
2057                 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2058
2059         dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2060                  "with %d cylinders, %d heads, %d sectors%s\n",
2061                  private->rdc_data.dev_type,
2062                  private->rdc_data.dev_model,
2063                  private->rdc_data.cu_type,
2064                  private->rdc_data.cu_model.model,
2065                  private->real_cyl,
2066                  private->rdc_data.trk_per_cyl,
2067                  private->rdc_data.sec_per_trk,
2068                  readonly ? ", read-only device" : "");
2069         return 0;
2070
2071 out_err3:
2072         dasd_alias_disconnect_device_from_lcu(device);
2073 out_err2:
2074         dasd_free_block(device->block);
2075         device->block = NULL;
2076 out_err1:
2077         dasd_eckd_clear_conf_data(device);
2078         kfree(device->private);
2079         device->private = NULL;
2080         return rc;
2081 }
2082
2083 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2084 {
2085         struct dasd_eckd_private *private = device->private;
2086
2087         if (!private)
2088                 return;
2089
2090         dasd_alias_disconnect_device_from_lcu(device);
2091         private->ned = NULL;
2092         private->sneq = NULL;
2093         private->vdsneq = NULL;
2094         private->gneq = NULL;
2095         dasd_eckd_clear_conf_data(device);
2096 }
2097
2098 static struct dasd_ccw_req *
2099 dasd_eckd_analysis_ccw(struct dasd_device *device)
2100 {
2101         struct dasd_eckd_private *private = device->private;
2102         struct eckd_count *count_data;
2103         struct LO_eckd_data *LO_data;
2104         struct dasd_ccw_req *cqr;
2105         struct ccw1 *ccw;
2106         int cplength, datasize;
2107         int i;
2108
2109         cplength = 8;
2110         datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2111         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2112                                    NULL);
2113         if (IS_ERR(cqr))
2114                 return cqr;
2115         ccw = cqr->cpaddr;
2116         /* Define extent for the first 2 tracks. */
2117         define_extent(ccw++, cqr->data, 0, 1,
2118                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2119         LO_data = cqr->data + sizeof(struct DE_eckd_data);
2120         /* Locate record for the first 4 records on track 0. */
2121         ccw[-1].flags |= CCW_FLAG_CC;
2122         locate_record(ccw++, LO_data++, 0, 0, 4,
2123                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2124
2125         count_data = private->count_area;
2126         for (i = 0; i < 4; i++) {
2127                 ccw[-1].flags |= CCW_FLAG_CC;
2128                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2129                 ccw->flags = 0;
2130                 ccw->count = 8;
2131                 ccw->cda = (__u32)(addr_t) count_data;
2132                 ccw++;
2133                 count_data++;
2134         }
2135
2136         /* Locate record for the first record on track 1. */
2137         ccw[-1].flags |= CCW_FLAG_CC;
2138         locate_record(ccw++, LO_data++, 1, 0, 1,
2139                       DASD_ECKD_CCW_READ_COUNT, device, 0);
2140         /* Read count ccw. */
2141         ccw[-1].flags |= CCW_FLAG_CC;
2142         ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2143         ccw->flags = 0;
2144         ccw->count = 8;
2145         ccw->cda = (__u32)(addr_t) count_data;
2146
2147         cqr->block = NULL;
2148         cqr->startdev = device;
2149         cqr->memdev = device;
2150         cqr->retries = 255;
2151         cqr->buildclk = get_tod_clock();
2152         cqr->status = DASD_CQR_FILLED;
2153         /* Set flags to suppress output for expected errors */
2154         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2155
2156         return cqr;
2157 }
2158
2159 /* differentiate between 'no record found' and any other error */
2160 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2161 {
2162         char *sense;
2163         if (init_cqr->status == DASD_CQR_DONE)
2164                 return INIT_CQR_OK;
2165         else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2166                  init_cqr->status == DASD_CQR_FAILED) {
2167                 sense = dasd_get_sense(&init_cqr->irb);
2168                 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2169                         return INIT_CQR_UNFORMATTED;
2170                 else
2171                         return INIT_CQR_ERROR;
2172         } else
2173                 return INIT_CQR_ERROR;
2174 }
2175
2176 /*
2177  * This is the callback function for the init_analysis cqr. It saves
2178  * the status of the initial analysis ccw before it frees it and kicks
2179  * the device to continue the startup sequence. This will call
2180  * dasd_eckd_do_analysis again (if the devices has not been marked
2181  * for deletion in the meantime).
2182  */
2183 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2184                                         void *data)
2185 {
2186         struct dasd_device *device = init_cqr->startdev;
2187         struct dasd_eckd_private *private = device->private;
2188
2189         private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2190         dasd_sfree_request(init_cqr, device);
2191         dasd_kick_device(device);
2192 }
2193
2194 static int dasd_eckd_start_analysis(struct dasd_block *block)
2195 {
2196         struct dasd_ccw_req *init_cqr;
2197
2198         init_cqr = dasd_eckd_analysis_ccw(block->base);
2199         if (IS_ERR(init_cqr))
2200                 return PTR_ERR(init_cqr);
2201         init_cqr->callback = dasd_eckd_analysis_callback;
2202         init_cqr->callback_data = NULL;
2203         init_cqr->expires = 5*HZ;
2204         /* first try without ERP, so we can later handle unformatted
2205          * devices as special case
2206          */
2207         clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2208         init_cqr->retries = 0;
2209         dasd_add_request_head(init_cqr);
2210         return -EAGAIN;
2211 }
2212
2213 static int dasd_eckd_end_analysis(struct dasd_block *block)
2214 {
2215         struct dasd_device *device = block->base;
2216         struct dasd_eckd_private *private = device->private;
2217         struct eckd_count *count_area;
2218         unsigned int sb, blk_per_trk;
2219         int status, i;
2220         struct dasd_ccw_req *init_cqr;
2221
2222         status = private->init_cqr_status;
2223         private->init_cqr_status = -1;
2224         if (status == INIT_CQR_ERROR) {
2225                 /* try again, this time with full ERP */
2226                 init_cqr = dasd_eckd_analysis_ccw(device);
2227                 dasd_sleep_on(init_cqr);
2228                 status = dasd_eckd_analysis_evaluation(init_cqr);
2229                 dasd_sfree_request(init_cqr, device);
2230         }
2231
2232         if (device->features & DASD_FEATURE_USERAW) {
2233                 block->bp_block = DASD_RAW_BLOCKSIZE;
2234                 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2235                 block->s2b_shift = 3;
2236                 goto raw;
2237         }
2238
2239         if (status == INIT_CQR_UNFORMATTED) {
2240                 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2241                 return -EMEDIUMTYPE;
2242         } else if (status == INIT_CQR_ERROR) {
2243                 dev_err(&device->cdev->dev,
2244                         "Detecting the DASD disk layout failed because "
2245                         "of an I/O error\n");
2246                 return -EIO;
2247         }
2248
2249         private->uses_cdl = 1;
2250         /* Check Track 0 for Compatible Disk Layout */
2251         count_area = NULL;
2252         for (i = 0; i < 3; i++) {
2253                 if (private->count_area[i].kl != 4 ||
2254                     private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2255                     private->count_area[i].cyl != 0 ||
2256                     private->count_area[i].head != count_area_head[i] ||
2257                     private->count_area[i].record != count_area_rec[i]) {
2258                         private->uses_cdl = 0;
2259                         break;
2260                 }
2261         }
2262         if (i == 3)
2263                 count_area = &private->count_area[3];
2264
2265         if (private->uses_cdl == 0) {
2266                 for (i = 0; i < 5; i++) {
2267                         if ((private->count_area[i].kl != 0) ||
2268                             (private->count_area[i].dl !=
2269                              private->count_area[0].dl) ||
2270                             private->count_area[i].cyl !=  0 ||
2271                             private->count_area[i].head != count_area_head[i] ||
2272                             private->count_area[i].record != count_area_rec[i])
2273                                 break;
2274                 }
2275                 if (i == 5)
2276                         count_area = &private->count_area[0];
2277         } else {
2278                 if (private->count_area[3].record == 1)
2279                         dev_warn(&device->cdev->dev,
2280                                  "Track 0 has no records following the VTOC\n");
2281         }
2282
2283         if (count_area != NULL && count_area->kl == 0) {
2284                 /* we found notthing violating our disk layout */
2285                 if (dasd_check_blocksize(count_area->dl) == 0)
2286                         block->bp_block = count_area->dl;
2287         }
2288         if (block->bp_block == 0) {
2289                 dev_warn(&device->cdev->dev,
2290                          "The disk layout of the DASD is not supported\n");
2291                 return -EMEDIUMTYPE;
2292         }
2293         block->s2b_shift = 0;   /* bits to shift 512 to get a block */
2294         for (sb = 512; sb < block->bp_block; sb = sb << 1)
2295                 block->s2b_shift++;
2296
2297         blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2298
2299 raw:
2300         block->blocks = ((unsigned long) private->real_cyl *
2301                           private->rdc_data.trk_per_cyl *
2302                           blk_per_trk);
2303
2304         dev_info(&device->cdev->dev,
2305                  "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2306                  "%s\n", (block->bp_block >> 10),
2307                  (((unsigned long) private->real_cyl *
2308                    private->rdc_data.trk_per_cyl *
2309                    blk_per_trk * (block->bp_block >> 9)) >> 1),
2310                  ((blk_per_trk * block->bp_block) >> 10),
2311                  private->uses_cdl ?
2312                  "compatible disk layout" : "linux disk layout");
2313
2314         return 0;
2315 }
2316
2317 static int dasd_eckd_do_analysis(struct dasd_block *block)
2318 {
2319         struct dasd_eckd_private *private = block->base->private;
2320
2321         if (private->init_cqr_status < 0)
2322                 return dasd_eckd_start_analysis(block);
2323         else
2324                 return dasd_eckd_end_analysis(block);
2325 }
2326
2327 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2328 {
2329         return dasd_alias_add_device(device);
2330 };
2331
2332 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2333 {
2334         if (cancel_work_sync(&device->reload_device))
2335                 dasd_put_device(device);
2336         if (cancel_work_sync(&device->kick_validate))
2337                 dasd_put_device(device);
2338
2339         return 0;
2340 };
2341
2342 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2343 {
2344         return dasd_alias_remove_device(device);
2345 };
2346
2347 static int
2348 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2349 {
2350         struct dasd_eckd_private *private = block->base->private;
2351
2352         if (dasd_check_blocksize(block->bp_block) == 0) {
2353                 geo->sectors = recs_per_track(&private->rdc_data,
2354                                               0, block->bp_block);
2355         }
2356         geo->cylinders = private->rdc_data.no_cyl;
2357         geo->heads = private->rdc_data.trk_per_cyl;
2358         return 0;
2359 }
2360
2361 /*
2362  * Build the TCW request for the format check
2363  */
2364 static struct dasd_ccw_req *
2365 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2366                           int enable_pav, struct eckd_count *fmt_buffer,
2367                           int rpt)
2368 {
2369         struct dasd_eckd_private *start_priv;
2370         struct dasd_device *startdev = NULL;
2371         struct tidaw *last_tidaw = NULL;
2372         struct dasd_ccw_req *cqr;
2373         struct itcw *itcw;
2374         int itcw_size;
2375         int count;
2376         int rc;
2377         int i;
2378
2379         if (enable_pav)
2380                 startdev = dasd_alias_get_start_dev(base);
2381
2382         if (!startdev)
2383                 startdev = base;
2384
2385         start_priv = startdev->private;
2386
2387         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2388
2389         /*
2390          * we're adding 'count' amount of tidaw to the itcw.
2391          * calculate the corresponding itcw_size
2392          */
2393         itcw_size = itcw_calc_size(0, count, 0);
2394
2395         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2396         if (IS_ERR(cqr))
2397                 return cqr;
2398
2399         start_priv->count++;
2400
2401         itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2402         if (IS_ERR(itcw)) {
2403                 rc = -EINVAL;
2404                 goto out_err;
2405         }
2406
2407         cqr->cpaddr = itcw_get_tcw(itcw);
2408         rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2409                           DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2410                           sizeof(struct eckd_count),
2411                           count * sizeof(struct eckd_count), 0, rpt);
2412         if (rc)
2413                 goto out_err;
2414
2415         for (i = 0; i < count; i++) {
2416                 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2417                                             sizeof(struct eckd_count));
2418                 if (IS_ERR(last_tidaw)) {
2419                         rc = -EINVAL;
2420                         goto out_err;
2421                 }
2422         }
2423
2424         last_tidaw->flags |= TIDAW_FLAGS_LAST;
2425         itcw_finalize(itcw);
2426
2427         cqr->cpmode = 1;
2428         cqr->startdev = startdev;
2429         cqr->memdev = startdev;
2430         cqr->basedev = base;
2431         cqr->retries = startdev->default_retries;
2432         cqr->expires = startdev->default_expires * HZ;
2433         cqr->buildclk = get_tod_clock();
2434         cqr->status = DASD_CQR_FILLED;
2435         /* Set flags to suppress output for expected errors */
2436         set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2437         set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2438
2439         return cqr;
2440
2441 out_err:
2442         dasd_sfree_request(cqr, startdev);
2443
2444         return ERR_PTR(rc);
2445 }
2446
2447 /*
2448  * Build the CCW request for the format check
2449  */
2450 static struct dasd_ccw_req *
2451 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2452                       int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2453 {
2454         struct dasd_eckd_private *start_priv;
2455         struct dasd_eckd_private *base_priv;
2456         struct dasd_device *startdev = NULL;
2457         struct dasd_ccw_req *cqr;
2458         struct ccw1 *ccw;
2459         void *data;
2460         int cplength, datasize;
2461         int use_prefix;
2462         int count;
2463         int i;
2464
2465         if (enable_pav)
2466                 startdev = dasd_alias_get_start_dev(base);
2467
2468         if (!startdev)
2469                 startdev = base;
2470
2471         start_priv = startdev->private;
2472         base_priv = base->private;
2473
2474         count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2475
2476         use_prefix = base_priv->features.feature[8] & 0x01;
2477
2478         if (use_prefix) {
2479                 cplength = 1;
2480                 datasize = sizeof(struct PFX_eckd_data);
2481         } else {
2482                 cplength = 2;
2483                 datasize = sizeof(struct DE_eckd_data) +
2484                         sizeof(struct LO_eckd_data);
2485         }
2486         cplength += count;
2487
2488         cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2489         if (IS_ERR(cqr))
2490                 return cqr;
2491
2492         start_priv->count++;
2493         data = cqr->data;
2494         ccw = cqr->cpaddr;
2495
2496         if (use_prefix) {
2497                 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2498                            DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2499                            count, 0, 0);
2500         } else {
2501                 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2502                               DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2503
2504                 data += sizeof(struct DE_eckd_data);
2505                 ccw[-1].flags |= CCW_FLAG_CC;
2506
2507                 locate_record(ccw++, data, fdata->start_unit, 0, count,
2508                               DASD_ECKD_CCW_READ_COUNT, base, 0);
2509         }
2510
2511         for (i = 0; i < count; i++) {
2512                 ccw[-1].flags |= CCW_FLAG_CC;
2513                 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2514                 ccw->flags = CCW_FLAG_SLI;
2515                 ccw->count = 8;
2516                 ccw->cda = (__u32)(addr_t) fmt_buffer;
2517                 ccw++;
2518                 fmt_buffer++;
2519         }
2520
2521         cqr->startdev = startdev;
2522         cqr->memdev = startdev;
2523         cqr->basedev = base;
2524         cqr->retries = DASD_RETRIES;
2525         cqr->expires = startdev->default_expires * HZ;
2526         cqr->buildclk = get_tod_clock();
2527         cqr->status = DASD_CQR_FILLED;
2528         /* Set flags to suppress output for expected errors */
2529         set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2530
2531         return cqr;
2532 }
2533
2534 static struct dasd_ccw_req *
2535 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2536                        struct format_data_t *fdata, int enable_pav)
2537 {
2538         struct dasd_eckd_private *base_priv;
2539         struct dasd_eckd_private *start_priv;
2540         struct dasd_ccw_req *fcp;
2541         struct eckd_count *ect;
2542         struct ch_t address;
2543         struct ccw1 *ccw;
2544         void *data;
2545         int rpt;
2546         int cplength, datasize;
2547         int i, j;
2548         int intensity = 0;
2549         int r0_perm;
2550         int nr_tracks;
2551         int use_prefix;
2552
2553         if (enable_pav)
2554                 startdev = dasd_alias_get_start_dev(base);
2555
2556         if (!startdev)
2557                 startdev = base;
2558
2559         start_priv = startdev->private;
2560         base_priv = base->private;
2561
2562         rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2563
2564         nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2565
2566         /*
2567          * fdata->intensity is a bit string that tells us what to do:
2568          *   Bit 0: write record zero
2569          *   Bit 1: write home address, currently not supported
2570          *   Bit 2: invalidate tracks
2571          *   Bit 3: use OS/390 compatible disk layout (cdl)
2572          *   Bit 4: do not allow storage subsystem to modify record zero
2573          * Only some bit combinations do make sense.
2574          */
2575         if (fdata->intensity & 0x10) {
2576                 r0_perm = 0;
2577                 intensity = fdata->intensity & ~0x10;
2578         } else {
2579                 r0_perm = 1;
2580                 intensity = fdata->intensity;
2581         }
2582
2583         use_prefix = base_priv->features.feature[8] & 0x01;
2584
2585         switch (intensity) {
2586         case 0x00:      /* Normal format */
2587         case 0x08:      /* Normal format, use cdl. */
2588                 cplength = 2 + (rpt*nr_tracks);
2589                 if (use_prefix)
2590                         datasize = sizeof(struct PFX_eckd_data) +
2591                                 sizeof(struct LO_eckd_data) +
2592                                 rpt * nr_tracks * sizeof(struct eckd_count);
2593                 else
2594                         datasize = sizeof(struct DE_eckd_data) +
2595                                 sizeof(struct LO_eckd_data) +
2596                                 rpt * nr_tracks * sizeof(struct eckd_count);
2597                 break;
2598         case 0x01:      /* Write record zero and format track. */
2599         case 0x09:      /* Write record zero and format track, use cdl. */
2600                 cplength = 2 + rpt * nr_tracks;
2601                 if (use_prefix)
2602                         datasize = sizeof(struct PFX_eckd_data) +
2603                                 sizeof(struct LO_eckd_data) +
2604                                 sizeof(struct eckd_count) +
2605                                 rpt * nr_tracks * sizeof(struct eckd_count);
2606                 else
2607                         datasize = sizeof(struct DE_eckd_data) +
2608                                 sizeof(struct LO_eckd_data) +
2609                                 sizeof(struct eckd_count) +
2610                                 rpt * nr_tracks * sizeof(struct eckd_count);
2611                 break;
2612         case 0x04:      /* Invalidate track. */
2613         case 0x0c:      /* Invalidate track, use cdl. */
2614                 cplength = 3;
2615                 if (use_prefix)
2616                         datasize = sizeof(struct PFX_eckd_data) +
2617                                 sizeof(struct LO_eckd_data) +
2618                                 sizeof(struct eckd_count);
2619                 else
2620                         datasize = sizeof(struct DE_eckd_data) +
2621                                 sizeof(struct LO_eckd_data) +
2622                                 sizeof(struct eckd_count);
2623                 break;
2624         default:
2625                 dev_warn(&startdev->cdev->dev,
2626                          "An I/O control call used incorrect flags 0x%x\n",
2627                          fdata->intensity);
2628                 return ERR_PTR(-EINVAL);
2629         }
2630
2631         fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2632         if (IS_ERR(fcp))
2633                 return fcp;
2634
2635         start_priv->count++;
2636         data = fcp->data;
2637         ccw = fcp->cpaddr;
2638
2639         switch (intensity & ~0x08) {
2640         case 0x00: /* Normal format. */
2641                 if (use_prefix) {
2642                         prefix(ccw++, (struct PFX_eckd_data *) data,
2643                                fdata->start_unit, fdata->stop_unit,
2644                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2645                         /* grant subsystem permission to format R0 */
2646                         if (r0_perm)
2647                                 ((struct PFX_eckd_data *)data)
2648                                         ->define_extent.ga_extended |= 0x04;
2649                         data += sizeof(struct PFX_eckd_data);
2650                 } else {
2651                         define_extent(ccw++, (struct DE_eckd_data *) data,
2652                                       fdata->start_unit, fdata->stop_unit,
2653                                       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2654                         /* grant subsystem permission to format R0 */
2655                         if (r0_perm)
2656                                 ((struct DE_eckd_data *) data)
2657                                         ->ga_extended |= 0x04;
2658                         data += sizeof(struct DE_eckd_data);
2659                 }
2660                 ccw[-1].flags |= CCW_FLAG_CC;
2661                 locate_record(ccw++, (struct LO_eckd_data *) data,
2662                               fdata->start_unit, 0, rpt*nr_tracks,
2663                               DASD_ECKD_CCW_WRITE_CKD, base,
2664                               fdata->blksize);
2665                 data += sizeof(struct LO_eckd_data);
2666                 break;
2667         case 0x01: /* Write record zero + format track. */
2668                 if (use_prefix) {
2669                         prefix(ccw++, (struct PFX_eckd_data *) data,
2670                                fdata->start_unit, fdata->stop_unit,
2671                                DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2672                                base, startdev);
2673                         data += sizeof(struct PFX_eckd_data);
2674                 } else {
2675                         define_extent(ccw++, (struct DE_eckd_data *) data,
2676                                fdata->start_unit, fdata->stop_unit,
2677                                DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2678                         data += sizeof(struct DE_eckd_data);
2679                 }
2680                 ccw[-1].flags |= CCW_FLAG_CC;
2681                 locate_record(ccw++, (struct LO_eckd_data *) data,
2682                               fdata->start_unit, 0, rpt * nr_tracks + 1,
2683                               DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2684                               base->block->bp_block);
2685                 data += sizeof(struct LO_eckd_data);
2686                 break;
2687         case 0x04: /* Invalidate track. */
2688                 if (use_prefix) {
2689                         prefix(ccw++, (struct PFX_eckd_data *) data,
2690                                fdata->start_unit, fdata->stop_unit,
2691                                DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2692                         data += sizeof(struct PFX_eckd_data);
2693                 } else {
2694                         define_extent(ccw++, (struct DE_eckd_data *) data,
2695                                fdata->start_unit, fdata->stop_unit,
2696                                DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2697                         data += sizeof(struct DE_eckd_data);
2698                 }
2699                 ccw[-1].flags |= CCW_FLAG_CC;
2700                 locate_record(ccw++, (struct LO_eckd_data *) data,
2701                               fdata->start_unit, 0, 1,
2702                               DASD_ECKD_CCW_WRITE_CKD, base, 8);
2703                 data += sizeof(struct LO_eckd_data);
2704                 break;
2705         }
2706
2707         for (j = 0; j < nr_tracks; j++) {
2708                 /* calculate cylinder and head for the current track */
2709                 set_ch_t(&address,
2710                          (fdata->start_unit + j) /
2711                          base_priv->rdc_data.trk_per_cyl,
2712                          (fdata->start_unit + j) %
2713                          base_priv->rdc_data.trk_per_cyl);
2714                 if (intensity & 0x01) { /* write record zero */
2715                         ect = (struct eckd_count *) data;
2716                         data += sizeof(struct eckd_count);
2717                         ect->cyl = address.cyl;
2718                         ect->head = address.head;
2719                         ect->record = 0;
2720                         ect->kl = 0;
2721                         ect->dl = 8;
2722                         ccw[-1].flags |= CCW_FLAG_CC;
2723                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2724                         ccw->flags = CCW_FLAG_SLI;
2725                         ccw->count = 8;
2726                         ccw->cda = (__u32)(addr_t) ect;
2727                         ccw++;
2728                 }
2729                 if ((intensity & ~0x08) & 0x04) {       /* erase track */
2730                         ect = (struct eckd_count *) data;
2731                         data += sizeof(struct eckd_count);
2732                         ect->cyl = address.cyl;
2733                         ect->head = address.head;
2734                         ect->record = 1;
2735                         ect->kl = 0;
2736                         ect->dl = 0;
2737                         ccw[-1].flags |= CCW_FLAG_CC;
2738                         ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2739                         ccw->flags = CCW_FLAG_SLI;
2740                         ccw->count = 8;
2741                         ccw->cda = (__u32)(addr_t) ect;
2742                 } else {                /* write remaining records */
2743                         for (i = 0; i < rpt; i++) {
2744                                 ect = (struct eckd_count *) data;
2745                                 data += sizeof(struct eckd_count);
2746                                 ect->cyl = address.cyl;
2747                                 ect->head = address.head;
2748                                 ect->record = i + 1;
2749                                 ect->kl = 0;
2750                                 ect->dl = fdata->blksize;
2751                                 /*
2752                                  * Check for special tracks 0-1
2753                                  * when formatting CDL
2754                                  */
2755                                 if ((intensity & 0x08) &&
2756                                     address.cyl == 0 && address.head == 0) {
2757                                         if (i < 3) {
2758                                                 ect->kl = 4;
2759                                                 ect->dl = sizes_trk0[i] - 4;
2760                                         }
2761                                 }
2762                                 if ((intensity & 0x08) &&
2763                                     address.cyl == 0 && address.head == 1) {
2764                                         ect->kl = 44;
2765                                         ect->dl = LABEL_SIZE - 44;
2766                                 }
2767                                 ccw[-1].flags |= CCW_FLAG_CC;
2768                                 if (i != 0 || j == 0)
2769                                         ccw->cmd_code =
2770                                                 DASD_ECKD_CCW_WRITE_CKD;
2771                                 else
2772                                         ccw->cmd_code =
2773                                                 DASD_ECKD_CCW_WRITE_CKD_MT;
2774                                 ccw->flags = CCW_FLAG_SLI;
2775                                 ccw->count = 8;
2776                                 ccw->cda = (__u32)(addr_t) ect;
2777                                 ccw++;
2778                         }
2779                 }
2780         }
2781
2782         fcp->startdev = startdev;
2783         fcp->memdev = startdev;
2784         fcp->basedev = base;
2785         fcp->retries = 256;
2786         fcp->expires = startdev->default_expires * HZ;
2787         fcp->buildclk = get_tod_clock();
2788         fcp->status = DASD_CQR_FILLED;
2789
2790         return fcp;
2791 }
2792
2793 /*
2794  * Wrapper function to build a CCW request depending on input data
2795  */
2796 static struct dasd_ccw_req *
2797 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2798                                struct format_data_t *fdata, int enable_pav,
2799                                int tpm, struct eckd_count *fmt_buffer, int rpt)
2800 {
2801         struct dasd_ccw_req *ccw_req;
2802
2803         if (!fmt_buffer) {
2804                 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2805         } else {
2806                 if (tpm)
2807                         ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2808                                                             enable_pav,
2809                                                             fmt_buffer, rpt);
2810                 else
2811                         ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2812                                                         fmt_buffer, rpt);
2813         }
2814
2815         return ccw_req;
2816 }
2817
2818 /*
2819  * Sanity checks on format_data
2820  */
2821 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2822                                           struct format_data_t *fdata)
2823 {
2824         struct dasd_eckd_private *private = base->private;
2825
2826         if (fdata->start_unit >=
2827             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2828                 dev_warn(&base->cdev->dev,
2829                          "Start track number %u used in formatting is too big\n",
2830                          fdata->start_unit);
2831                 return -EINVAL;
2832         }
2833         if (fdata->stop_unit >=
2834             (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2835                 dev_warn(&base->cdev->dev,
2836                          "Stop track number %u used in formatting is too big\n",
2837                          fdata->stop_unit);
2838                 return -EINVAL;
2839         }
2840         if (fdata->start_unit > fdata->stop_unit) {
2841                 dev_warn(&base->cdev->dev,
2842                          "Start track %u used in formatting exceeds end track\n",
2843                          fdata->start_unit);
2844                 return -EINVAL;
2845         }
2846         if (dasd_check_blocksize(fdata->blksize) != 0) {
2847                 dev_warn(&base->cdev->dev,
2848                          "The DASD cannot be formatted with block size %u\n",
2849                          fdata->blksize);
2850                 return -EINVAL;
2851         }
2852         return 0;
2853 }
2854
2855 /*
2856  * This function will process format_data originally coming from an IOCTL
2857  */
2858 static int dasd_eckd_format_process_data(struct dasd_device *base,
2859                                          struct format_data_t *fdata,
2860                                          int enable_pav, int tpm,
2861                                          struct eckd_count *fmt_buffer, int rpt,
2862                                          struct irb *irb)
2863 {
2864         struct dasd_eckd_private *private = base->private;
2865         struct dasd_ccw_req *cqr, *n;
2866         struct list_head format_queue;
2867         struct dasd_device *device;
2868         char *sense = NULL;
2869         int old_start, old_stop, format_step;
2870         int step, retry;
2871         int rc;
2872
2873         rc = dasd_eckd_format_sanity_checks(base, fdata);
2874         if (rc)
2875                 return rc;
2876
2877         INIT_LIST_HEAD(&format_queue);
2878
2879         old_start = fdata->start_unit;
2880         old_stop = fdata->stop_unit;
2881
2882         if (!tpm && fmt_buffer != NULL) {
2883                 /* Command Mode / Format Check */
2884                 format_step = 1;
2885         } else if (tpm && fmt_buffer != NULL) {
2886                 /* Transport Mode / Format Check */
2887                 format_step = DASD_CQR_MAX_CCW / rpt;
2888         } else {
2889                 /* Normal Formatting */
2890                 format_step = DASD_CQR_MAX_CCW /
2891                         recs_per_track(&private->rdc_data, 0, fdata->blksize);
2892         }
2893
2894         do {
2895                 retry = 0;
2896                 while (fdata->start_unit <= old_stop) {
2897                         step = fdata->stop_unit - fdata->start_unit + 1;
2898                         if (step > format_step) {
2899                                 fdata->stop_unit =
2900                                         fdata->start_unit + format_step - 1;
2901                         }
2902
2903                         cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2904                                                              enable_pav, tpm,
2905                                                              fmt_buffer, rpt);
2906                         if (IS_ERR(cqr)) {
2907                                 rc = PTR_ERR(cqr);
2908                                 if (rc == -ENOMEM) {
2909                                         if (list_empty(&format_queue))
2910                                                 goto out;
2911                                         /*
2912                                          * not enough memory available, start
2913                                          * requests retry after first requests
2914                                          * were finished
2915                                          */
2916                                         retry = 1;
2917                                         break;
2918                                 }
2919                                 goto out_err;
2920                         }
2921                         list_add_tail(&cqr->blocklist, &format_queue);
2922
2923                         if (fmt_buffer) {
2924                                 step = fdata->stop_unit - fdata->start_unit + 1;
2925                                 fmt_buffer += rpt * step;
2926                         }
2927                         fdata->start_unit = fdata->stop_unit + 1;
2928                         fdata->stop_unit = old_stop;
2929                 }
2930
2931                 rc = dasd_sleep_on_queue(&format_queue);
2932
2933 out_err:
2934                 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2935                         device = cqr->startdev;
2936                         private = device->private;
2937
2938                         if (cqr->status == DASD_CQR_FAILED) {
2939                                 /*
2940                                  * Only get sense data if called by format
2941                                  * check
2942                                  */
2943                                 if (fmt_buffer && irb) {
2944                                         sense = dasd_get_sense(&cqr->irb);
2945                                         memcpy(irb, &cqr->irb, sizeof(*irb));
2946                                 }
2947                                 rc = -EIO;
2948                         }
2949                         list_del_init(&cqr->blocklist);
2950                         dasd_ffree_request(cqr, device);
2951                         private->count--;
2952                 }
2953
2954                 if (rc && rc != -EIO)
2955                         goto out;
2956                 if (rc == -EIO) {
2957                         /*
2958                          * In case fewer than the expected records are on the
2959                          * track, we will most likely get a 'No Record Found'
2960                          * error (in command mode) or a 'File Protected' error
2961                          * (in transport mode). Those particular cases shouldn't
2962                          * pass the -EIO to the IOCTL, therefore reset the rc
2963                          * and continue.
2964                          */
2965                         if (sense &&
2966                             (sense[1] & SNS1_NO_REC_FOUND ||
2967                              sense[1] & SNS1_FILE_PROTECTED))
2968                                 retry = 1;
2969                         else
2970                                 goto out;
2971                 }
2972
2973         } while (retry);
2974
2975 out:
2976         fdata->start_unit = old_start;
2977         fdata->stop_unit = old_stop;
2978
2979         return rc;
2980 }
2981
2982 static int dasd_eckd_format_device(struct dasd_device *base,
2983                                    struct format_data_t *fdata, int enable_pav)
2984 {
2985         return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
2986                                              0, NULL);
2987 }
2988
2989 /*
2990  * Callback function to free ESE format requests.
2991  */
2992 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
2993 {
2994         struct dasd_device *device = cqr->startdev;
2995         struct dasd_eckd_private *private = device->private;
2996
2997         private->count--;
2998         dasd_ffree_request(cqr, device);
2999 }
3000
3001 static struct dasd_ccw_req *
3002 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
3003 {
3004         struct dasd_eckd_private *private;
3005         struct format_data_t fdata;
3006         unsigned int recs_per_trk;
3007         struct dasd_ccw_req *fcqr;
3008         struct dasd_device *base;
3009         struct dasd_block *block;
3010         unsigned int blksize;
3011         struct request *req;
3012         sector_t first_trk;
3013         sector_t last_trk;
3014         int rc;
3015
3016         req = cqr->callback_data;
3017         base = cqr->block->base;
3018         private = base->private;
3019         block = base->block;
3020         blksize = block->bp_block;
3021         recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3022
3023         first_trk = blk_rq_pos(req) >> block->s2b_shift;
3024         sector_div(first_trk, recs_per_trk);
3025         last_trk =
3026                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3027         sector_div(last_trk, recs_per_trk);
3028
3029         fdata.start_unit = first_trk;
3030         fdata.stop_unit = last_trk;
3031         fdata.blksize = blksize;
3032         fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3033
3034         rc = dasd_eckd_format_sanity_checks(base, &fdata);
3035         if (rc)
3036                 return ERR_PTR(-EINVAL);
3037
3038         /*
3039          * We're building the request with PAV disabled as we're reusing
3040          * the former startdev.
3041          */
3042         fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3043         if (IS_ERR(fcqr))
3044                 return fcqr;
3045
3046         fcqr->callback = dasd_eckd_ese_format_cb;
3047
3048         return fcqr;
3049 }
3050
3051 /*
3052  * When data is read from an unformatted area of an ESE volume, this function
3053  * returns zeroed data and thereby mimics a read of zero data.
3054  */
3055 static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
3056 {
3057         unsigned int blksize, off;
3058         struct dasd_device *base;
3059         struct req_iterator iter;
3060         struct request *req;
3061         struct bio_vec bv;
3062         char *dst;
3063
3064         req = (struct request *) cqr->callback_data;
3065         base = cqr->block->base;
3066         blksize = base->block->bp_block;
3067
3068         rq_for_each_segment(bv, req, iter) {
3069                 dst = page_address(bv.bv_page) + bv.bv_offset;
3070                 for (off = 0; off < bv.bv_len; off += blksize) {
3071                         if (dst && rq_data_dir(req) == READ) {
3072                                 dst += off;
3073                                 memset(dst, 0, blksize);
3074                         }
3075                 }
3076         }
3077 }
3078
3079 /*
3080  * Helper function to count consecutive records of a single track.
3081  */
3082 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3083                                    int max)
3084 {
3085         int head;
3086         int i;
3087
3088         head = fmt_buffer[start].head;
3089
3090         /*
3091          * There are 3 conditions where we stop counting:
3092          * - if data reoccurs (same head and record may reoccur), which may
3093          *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3094          * - when the head changes, because we're iterating over several tracks
3095          *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3096          * - when we've reached the end of sensible data in the buffer (the
3097          *   record will be 0 then)
3098          */
3099         for (i = start; i < max; i++) {
3100                 if (i > start) {
3101                         if ((fmt_buffer[i].head == head &&
3102                             fmt_buffer[i].record == 1) ||
3103                             fmt_buffer[i].head != head ||
3104                             fmt_buffer[i].record == 0)
3105                                 break;
3106                 }
3107         }
3108
3109         return i - start;
3110 }
3111
3112 /*
3113  * Evaluate a given range of tracks. Data like number of records, blocksize,
3114  * record ids, and key length are compared with expected data.
3115  *
3116  * If a mismatch occurs, the corresponding error bit is set, as well as
3117  * additional information, depending on the error.
3118  */
3119 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3120                                              struct format_check_t *cdata,
3121                                              int rpt_max, int rpt_exp,
3122                                              int trk_per_cyl, int tpm)
3123 {
3124         struct ch_t geo;
3125         int max_entries;
3126         int count = 0;
3127         int trkcount;
3128         int blksize;
3129         int pos = 0;
3130         int i, j;
3131         int kl;
3132
3133         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3134         max_entries = trkcount * rpt_max;
3135
3136         for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3137                 /* Calculate the correct next starting position in the buffer */
3138                 if (tpm) {
3139                         while (fmt_buffer[pos].record == 0 &&
3140                                fmt_buffer[pos].dl == 0) {
3141                                 if (pos++ > max_entries)
3142                                         break;
3143                         }
3144                 } else {
3145                         if (i != cdata->expect.start_unit)
3146                                 pos += rpt_max - count;
3147                 }
3148
3149                 /* Calculate the expected geo values for the current track */
3150                 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3151
3152                 /* Count and check number of records */
3153                 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3154
3155                 if (count < rpt_exp) {
3156                         cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3157                         break;
3158                 }
3159                 if (count > rpt_exp) {
3160                         cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3161                         break;
3162                 }
3163
3164                 for (j = 0; j < count; j++, pos++) {
3165                         blksize = cdata->expect.blksize;
3166                         kl = 0;
3167
3168                         /*
3169                          * Set special values when checking CDL formatted
3170                          * devices.
3171                          */
3172                         if ((cdata->expect.intensity & 0x08) &&
3173                             geo.cyl == 0 && geo.head == 0) {
3174                                 if (j < 3) {
3175                                         blksize = sizes_trk0[j] - 4;
3176                                         kl = 4;
3177                                 }
3178                         }
3179                         if ((cdata->expect.intensity & 0x08) &&
3180                             geo.cyl == 0 && geo.head == 1) {
3181                                 blksize = LABEL_SIZE - 44;
3182                                 kl = 44;
3183                         }
3184
3185                         /* Check blocksize */
3186                         if (fmt_buffer[pos].dl != blksize) {
3187                                 cdata->result = DASD_FMT_ERR_BLKSIZE;
3188                                 goto out;
3189                         }
3190                         /* Check if key length is 0 */
3191                         if (fmt_buffer[pos].kl != kl) {
3192                                 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3193                                 goto out;
3194                         }
3195                         /* Check if record_id is correct */
3196                         if (fmt_buffer[pos].cyl != geo.cyl ||
3197                             fmt_buffer[pos].head != geo.head ||
3198                             fmt_buffer[pos].record != (j + 1)) {
3199                                 cdata->result = DASD_FMT_ERR_RECORD_ID;
3200                                 goto out;
3201                         }
3202                 }
3203         }
3204
3205 out:
3206         /*
3207          * In case of no errors, we need to decrease by one
3208          * to get the correct positions.
3209          */
3210         if (!cdata->result) {
3211                 i--;
3212                 pos--;
3213         }
3214
3215         cdata->unit = i;
3216         cdata->num_records = count;
3217         cdata->rec = fmt_buffer[pos].record;
3218         cdata->blksize = fmt_buffer[pos].dl;
3219         cdata->key_length = fmt_buffer[pos].kl;
3220 }
3221
3222 /*
3223  * Check the format of a range of tracks of a DASD.
3224  */
3225 static int dasd_eckd_check_device_format(struct dasd_device *base,
3226                                          struct format_check_t *cdata,
3227                                          int enable_pav)
3228 {
3229         struct dasd_eckd_private *private = base->private;
3230         struct eckd_count *fmt_buffer;
3231         struct irb irb;
3232         int rpt_max, rpt_exp;
3233         int fmt_buffer_size;
3234         int trk_per_cyl;
3235         int trkcount;
3236         int tpm = 0;
3237         int rc;
3238
3239         trk_per_cyl = private->rdc_data.trk_per_cyl;
3240
3241         /* Get maximum and expected amount of records per track */
3242         rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3243         rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3244
3245         trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3246         fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3247
3248         fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3249         if (!fmt_buffer)
3250                 return -ENOMEM;
3251
3252         /*
3253          * A certain FICON feature subset is needed to operate in transport
3254          * mode. Additionally, the support for transport mode is implicitly
3255          * checked by comparing the buffer size with fcx_max_data. As long as
3256          * the buffer size is smaller we can operate in transport mode and
3257          * process multiple tracks. If not, only one track at once is being
3258          * processed using command mode.
3259          */
3260         if ((private->features.feature[40] & 0x04) &&
3261             fmt_buffer_size <= private->fcx_max_data)
3262                 tpm = 1;
3263
3264         rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3265                                            tpm, fmt_buffer, rpt_max, &irb);
3266         if (rc && rc != -EIO)
3267                 goto out;
3268         if (rc == -EIO) {
3269                 /*
3270                  * If our first attempt with transport mode enabled comes back
3271                  * with an incorrect length error, we're going to retry the
3272                  * check with command mode.
3273                  */
3274                 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3275                         tpm = 0;
3276                         rc = dasd_eckd_format_process_data(base, &cdata->expect,
3277                                                            enable_pav, tpm,
3278                                                            fmt_buffer, rpt_max,
3279                                                            &irb);
3280                         if (rc)
3281                                 goto out;
3282                 } else {
3283                         goto out;
3284                 }
3285         }
3286
3287         dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3288                                          trk_per_cyl, tpm);
3289
3290 out:
3291         kfree(fmt_buffer);
3292
3293         return rc;
3294 }
3295
3296 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3297 {
3298         if (cqr->retries < 0) {
3299                 cqr->status = DASD_CQR_FAILED;
3300                 return;
3301         }
3302         cqr->status = DASD_CQR_FILLED;
3303         if (cqr->block && (cqr->startdev != cqr->block->base)) {
3304                 dasd_eckd_reset_ccw_to_base_io(cqr);
3305                 cqr->startdev = cqr->block->base;
3306                 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3307         }
3308 };
3309
3310 static dasd_erp_fn_t
3311 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3312 {
3313         struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3314         struct ccw_device *cdev = device->cdev;
3315
3316         switch (cdev->id.cu_type) {
3317         case 0x3990:
3318         case 0x2105:
3319         case 0x2107:
3320         case 0x1750:
3321                 return dasd_3990_erp_action;
3322         case 0x9343:
3323         case 0x3880:
3324         default:
3325                 return dasd_default_erp_action;
3326         }
3327 }
3328
3329 static dasd_erp_fn_t
3330 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3331 {
3332         return dasd_default_erp_postaction;
3333 }
3334
3335 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3336                                               struct dasd_ccw_req *cqr,
3337                                               struct irb *irb)
3338 {
3339         char mask;
3340         char *sense = NULL;
3341         struct dasd_eckd_private *private = device->private;
3342
3343         /* first of all check for state change pending interrupt */
3344         mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3345         if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3346                 /*
3347                  * for alias only, not in offline processing
3348                  * and only if not suspended
3349                  */
3350                 if (!device->block && private->lcu &&
3351                     device->state == DASD_STATE_ONLINE &&
3352                     !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3353                     !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3354                         /* schedule worker to reload device */
3355                         dasd_reload_device(device);
3356                 }
3357                 dasd_generic_handle_state_change(device);
3358                 return;
3359         }
3360
3361         sense = dasd_get_sense(irb);
3362         if (!sense)
3363                 return;
3364
3365         /* summary unit check */
3366         if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3367             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3368                 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3369                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3370                                       "eckd suc: device already notified");
3371                         return;
3372                 }
3373                 sense = dasd_get_sense(irb);
3374                 if (!sense) {
3375                         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3376                                       "eckd suc: no reason code available");
3377                         clear_bit(DASD_FLAG_SUC, &device->flags);
3378                         return;
3379
3380                 }
3381                 private->suc_reason = sense[8];
3382                 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3383                               "eckd handle summary unit check: reason",
3384                               private->suc_reason);
3385                 dasd_get_device(device);
3386                 if (!schedule_work(&device->suc_work))
3387                         dasd_put_device(device);
3388
3389                 return;
3390         }
3391
3392         /* service information message SIM */
3393         if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3394             ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3395                 dasd_3990_erp_handle_sim(device, sense);
3396                 return;
3397         }
3398
3399         /* loss of device reservation is handled via base devices only
3400          * as alias devices may be used with several bases
3401          */
3402         if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3403             (sense[7] == 0x3F) &&
3404             (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3405             test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3406                 if (device->features & DASD_FEATURE_FAILONSLCK)
3407                         set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3408                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3409                 dev_err(&device->cdev->dev,
3410                         "The device reservation was lost\n");
3411         }
3412 }
3413
3414 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3415                                        unsigned int first_trk,
3416                                        unsigned int last_trk)
3417 {
3418         struct dasd_eckd_private *private = device->private;
3419         unsigned int trks_per_vol;
3420         int rc = 0;
3421
3422         trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3423
3424         if (first_trk >= trks_per_vol) {
3425                 dev_warn(&device->cdev->dev,
3426                          "Start track number %u used in the space release command is too big\n",
3427                          first_trk);
3428                 rc = -EINVAL;
3429         } else if (last_trk >= trks_per_vol) {
3430                 dev_warn(&device->cdev->dev,
3431                          "Stop track number %u used in the space release command is too big\n",
3432                          last_trk);
3433                 rc = -EINVAL;
3434         } else if (first_trk > last_trk) {
3435                 dev_warn(&device->cdev->dev,
3436                          "Start track %u used in the space release command exceeds the end track\n",
3437                          first_trk);
3438                 rc = -EINVAL;
3439         }
3440         return rc;
3441 }
3442
3443 /*
3444  * Helper function to count the amount of involved extents within a given range
3445  * with extent alignment in mind.
3446  */
3447 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3448 {
3449         int cur_pos = 0;
3450         int count = 0;
3451         int tmp;
3452
3453         if (from == to)
3454                 return 1;
3455
3456         /* Count first partial extent */
3457         if (from % trks_per_ext != 0) {
3458                 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3459                 if (tmp > to)
3460                         tmp = to;
3461                 cur_pos = tmp - from + 1;
3462                 count++;
3463         }
3464         /* Count full extents */
3465         if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3466                 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3467                 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3468                 cur_pos = tmp;
3469         }
3470         /* Count last partial extent */
3471         if (cur_pos < to)
3472                 count++;
3473
3474         return count;
3475 }
3476
3477 /*
3478  * Release allocated space for a given range or an entire volume.
3479  */
3480 static struct dasd_ccw_req *
3481 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3482                   struct request *req, unsigned int first_trk,
3483                   unsigned int last_trk, int by_extent)
3484 {
3485         struct dasd_eckd_private *private = device->private;
3486         struct dasd_dso_ras_ext_range *ras_range;
3487         struct dasd_rssd_features *features;
3488         struct dasd_dso_ras_data *ras_data;
3489         u16 heads, beg_head, end_head;
3490         int cur_to_trk, cur_from_trk;
3491         struct dasd_ccw_req *cqr;
3492         u32 beg_cyl, end_cyl;
3493         struct ccw1 *ccw;
3494         int trks_per_ext;
3495         size_t ras_size;
3496         size_t size;
3497         int nr_exts;
3498         void *rq;
3499         int i;
3500
3501         if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3502                 return ERR_PTR(-EINVAL);
3503
3504         rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3505
3506         features = &private->features;
3507
3508         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3509         nr_exts = 0;
3510         if (by_extent)
3511                 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3512         ras_size = sizeof(*ras_data);
3513         size = ras_size + (nr_exts * sizeof(*ras_range));
3514
3515         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3516         if (IS_ERR(cqr)) {
3517                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3518                                 "Could not allocate RAS request");
3519                 return cqr;
3520         }
3521
3522         ras_data = cqr->data;
3523         memset(ras_data, 0, size);
3524
3525         ras_data->order = DSO_ORDER_RAS;
3526         ras_data->flags.vol_type = 0; /* CKD volume */
3527         /* Release specified extents or entire volume */
3528         ras_data->op_flags.by_extent = by_extent;
3529         /*
3530          * This bit guarantees initialisation of tracks within an extent that is
3531          * not fully specified, but is only supported with a certain feature
3532          * subset.
3533          */
3534         ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3535         ras_data->lss = private->ned->ID;
3536         ras_data->dev_addr = private->ned->unit_addr;
3537         ras_data->nr_exts = nr_exts;
3538
3539         if (by_extent) {
3540                 heads = private->rdc_data.trk_per_cyl;
3541                 cur_from_trk = first_trk;
3542                 cur_to_trk = first_trk + trks_per_ext -
3543                         (first_trk % trks_per_ext) - 1;
3544                 if (cur_to_trk > last_trk)
3545                         cur_to_trk = last_trk;
3546                 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3547
3548                 for (i = 0; i < nr_exts; i++) {
3549                         beg_cyl = cur_from_trk / heads;
3550                         beg_head = cur_from_trk % heads;
3551                         end_cyl = cur_to_trk / heads;
3552                         end_head = cur_to_trk % heads;
3553
3554                         set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3555                         set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3556
3557                         cur_from_trk = cur_to_trk + 1;
3558                         cur_to_trk = cur_from_trk + trks_per_ext - 1;
3559                         if (cur_to_trk > last_trk)
3560                                 cur_to_trk = last_trk;
3561                         ras_range++;
3562                 }
3563         }
3564
3565         ccw = cqr->cpaddr;
3566         ccw->cda = (__u32)(addr_t)cqr->data;
3567         ccw->cmd_code = DASD_ECKD_CCW_DSO;
3568         ccw->count = size;
3569
3570         cqr->startdev = device;
3571         cqr->memdev = device;
3572         cqr->block = block;
3573         cqr->retries = 256;
3574         cqr->expires = device->default_expires * HZ;
3575         cqr->buildclk = get_tod_clock();
3576         cqr->status = DASD_CQR_FILLED;
3577
3578         return cqr;
3579 }
3580
3581 static int dasd_eckd_release_space_full(struct dasd_device *device)
3582 {
3583         struct dasd_ccw_req *cqr;
3584         int rc;
3585
3586         cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3587         if (IS_ERR(cqr))
3588                 return PTR_ERR(cqr);
3589
3590         rc = dasd_sleep_on_interruptible(cqr);
3591
3592         dasd_sfree_request(cqr, cqr->memdev);
3593
3594         return rc;
3595 }
3596
3597 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3598                                         unsigned int from, unsigned int to)
3599 {
3600         struct dasd_eckd_private *private = device->private;
3601         struct dasd_block *block = device->block;
3602         struct dasd_ccw_req *cqr, *n;
3603         struct list_head ras_queue;
3604         unsigned int device_exts;
3605         int trks_per_ext;
3606         int stop, step;
3607         int cur_pos;
3608         int rc = 0;
3609         int retry;
3610
3611         INIT_LIST_HEAD(&ras_queue);
3612
3613         device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3614         trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3615
3616         /* Make sure device limits are not exceeded */
3617         step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3618         cur_pos = from;
3619
3620         do {
3621                 retry = 0;
3622                 while (cur_pos < to) {
3623                         stop = cur_pos + step -
3624                                 ((cur_pos + step) % trks_per_ext) - 1;
3625                         if (stop > to)
3626                                 stop = to;
3627
3628                         cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3629                         if (IS_ERR(cqr)) {
3630                                 rc = PTR_ERR(cqr);
3631                                 if (rc == -ENOMEM) {
3632                                         if (list_empty(&ras_queue))
3633                                                 goto out;
3634                                         retry = 1;
3635                                         break;
3636                                 }
3637                                 goto err_out;
3638                         }
3639
3640                         spin_lock_irq(&block->queue_lock);
3641                         list_add_tail(&cqr->blocklist, &ras_queue);
3642                         spin_unlock_irq(&block->queue_lock);
3643                         cur_pos = stop + 1;
3644                 }
3645
3646                 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3647
3648 err_out:
3649                 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3650                         device = cqr->startdev;
3651                         private = device->private;
3652
3653                         spin_lock_irq(&block->queue_lock);
3654                         list_del_init(&cqr->blocklist);
3655                         spin_unlock_irq(&block->queue_lock);
3656                         dasd_sfree_request(cqr, device);
3657                         private->count--;
3658                 }
3659         } while (retry);
3660
3661 out:
3662         return rc;
3663 }
3664
3665 static int dasd_eckd_release_space(struct dasd_device *device,
3666                                    struct format_data_t *rdata)
3667 {
3668         if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3669                 return dasd_eckd_release_space_full(device);
3670         else if (rdata->intensity == 0)
3671                 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3672                                                     rdata->stop_unit);
3673         else
3674                 return -EINVAL;
3675 }
3676
3677 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3678                                                struct dasd_device *startdev,
3679                                                struct dasd_block *block,
3680                                                struct request *req,
3681                                                sector_t first_rec,
3682                                                sector_t last_rec,
3683                                                sector_t first_trk,
3684                                                sector_t last_trk,
3685                                                unsigned int first_offs,
3686                                                unsigned int last_offs,
3687                                                unsigned int blk_per_trk,
3688                                                unsigned int blksize)
3689 {
3690         struct dasd_eckd_private *private;
3691         unsigned long *idaws;
3692         struct LO_eckd_data *LO_data;
3693         struct dasd_ccw_req *cqr;
3694         struct ccw1 *ccw;
3695         struct req_iterator iter;
3696         struct bio_vec bv;
3697         char *dst;
3698         unsigned int off;
3699         int count, cidaw, cplength, datasize;
3700         sector_t recid;
3701         unsigned char cmd, rcmd;
3702         int use_prefix;
3703         struct dasd_device *basedev;
3704
3705         basedev = block->base;
3706         private = basedev->private;
3707         if (rq_data_dir(req) == READ)
3708                 cmd = DASD_ECKD_CCW_READ_MT;
3709         else if (rq_data_dir(req) == WRITE)
3710                 cmd = DASD_ECKD_CCW_WRITE_MT;
3711         else
3712                 return ERR_PTR(-EINVAL);
3713
3714         /* Check struct bio and count the number of blocks for the request. */
3715         count = 0;
3716         cidaw = 0;
3717         rq_for_each_segment(bv, req, iter) {
3718                 if (bv.bv_len & (blksize - 1))
3719                         /* Eckd can only do full blocks. */
3720                         return ERR_PTR(-EINVAL);
3721                 count += bv.bv_len >> (block->s2b_shift + 9);
3722                 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3723                         cidaw += bv.bv_len >> (block->s2b_shift + 9);
3724         }
3725         /* Paranoia. */
3726         if (count != last_rec - first_rec + 1)
3727                 return ERR_PTR(-EINVAL);
3728
3729         /* use the prefix command if available */
3730         use_prefix = private->features.feature[8] & 0x01;
3731         if (use_prefix) {
3732                 /* 1x prefix + number of blocks */
3733                 cplength = 2 + count;
3734                 /* 1x prefix + cidaws*sizeof(long) */
3735                 datasize = sizeof(struct PFX_eckd_data) +
3736                         sizeof(struct LO_eckd_data) +
3737                         cidaw * sizeof(unsigned long);
3738         } else {
3739                 /* 1x define extent + 1x locate record + number of blocks */
3740                 cplength = 2 + count;
3741                 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3742                 datasize = sizeof(struct DE_eckd_data) +
3743                         sizeof(struct LO_eckd_data) +
3744                         cidaw * sizeof(unsigned long);
3745         }
3746         /* Find out the number of additional locate record ccws for cdl. */
3747         if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3748                 if (last_rec >= 2*blk_per_trk)
3749                         count = 2*blk_per_trk - first_rec;
3750                 cplength += count;
3751                 datasize += count*sizeof(struct LO_eckd_data);
3752         }
3753         /* Allocate the ccw request. */
3754         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3755                                    startdev, blk_mq_rq_to_pdu(req));
3756         if (IS_ERR(cqr))
3757                 return cqr;
3758         ccw = cqr->cpaddr;
3759         /* First ccw is define extent or prefix. */
3760         if (use_prefix) {
3761                 if (prefix(ccw++, cqr->data, first_trk,
3762                            last_trk, cmd, basedev, startdev) == -EAGAIN) {
3763                         /* Clock not in sync and XRC is enabled.
3764                          * Try again later.
3765                          */
3766                         dasd_sfree_request(cqr, startdev);
3767                         return ERR_PTR(-EAGAIN);
3768                 }
3769                 idaws = (unsigned long *) (cqr->data +
3770                                            sizeof(struct PFX_eckd_data));
3771         } else {
3772                 if (define_extent(ccw++, cqr->data, first_trk,
3773                                   last_trk, cmd, basedev, 0) == -EAGAIN) {
3774                         /* Clock not in sync and XRC is enabled.
3775                          * Try again later.
3776                          */
3777                         dasd_sfree_request(cqr, startdev);
3778                         return ERR_PTR(-EAGAIN);
3779                 }
3780                 idaws = (unsigned long *) (cqr->data +
3781                                            sizeof(struct DE_eckd_data));
3782         }
3783         /* Build locate_record+read/write/ccws. */
3784         LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3785         recid = first_rec;
3786         if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3787                 /* Only standard blocks so there is just one locate record. */
3788                 ccw[-1].flags |= CCW_FLAG_CC;
3789                 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3790                               last_rec - recid + 1, cmd, basedev, blksize);
3791         }
3792         rq_for_each_segment(bv, req, iter) {
3793                 dst = page_address(bv.bv_page) + bv.bv_offset;
3794                 if (dasd_page_cache) {
3795                         char *copy = kmem_cache_alloc(dasd_page_cache,
3796                                                       GFP_DMA | __GFP_NOWARN);
3797                         if (copy && rq_data_dir(req) == WRITE)
3798                                 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
3799                         if (copy)
3800                                 dst = copy + bv.bv_offset;
3801                 }
3802                 for (off = 0; off < bv.bv_len; off += blksize) {
3803                         sector_t trkid = recid;
3804                         unsigned int recoffs = sector_div(trkid, blk_per_trk);
3805                         rcmd = cmd;
3806                         count = blksize;
3807                         /* Locate record for cdl special block ? */
3808                         if (private->uses_cdl && recid < 2*blk_per_trk) {
3809                                 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
3810                                         rcmd |= 0x8;
3811                                         count = dasd_eckd_cdl_reclen(recid);
3812                                         if (count < blksize &&
3813                                             rq_data_dir(req) == READ)
3814                                                 memset(dst + count, 0xe5,
3815                                                        blksize - count);
3816                                 }
3817                                 ccw[-1].flags |= CCW_FLAG_CC;
3818                                 locate_record(ccw++, LO_data++,
3819                                               trkid, recoffs + 1,
3820                                               1, rcmd, basedev, count);
3821                         }
3822                         /* Locate record for standard blocks ? */
3823                         if (private->uses_cdl && recid == 2*blk_per_trk) {
3824                                 ccw[-1].flags |= CCW_FLAG_CC;
3825                                 locate_record(ccw++, LO_data++,
3826                                               trkid, recoffs + 1,
3827                                               last_rec - recid + 1,
3828                                               cmd, basedev, count);
3829                         }
3830                         /* Read/write ccw. */
3831                         ccw[-1].flags |= CCW_FLAG_CC;
3832                         ccw->cmd_code = rcmd;
3833                         ccw->count = count;
3834                         if (idal_is_needed(dst, blksize)) {
3835                                 ccw->cda = (__u32)(addr_t) idaws;
3836                                 ccw->flags = CCW_FLAG_IDA;
3837                                 idaws = idal_create_words(idaws, dst, blksize);
3838                         } else {
3839                                 ccw->cda = (__u32)(addr_t) dst;
3840                                 ccw->flags = 0;
3841                         }
3842                         ccw++;
3843                         dst += blksize;
3844                         recid++;
3845                 }
3846         }
3847         if (blk_noretry_request(req) ||
3848             block->base->features & DASD_FEATURE_FAILFAST)
3849                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3850         cqr->startdev = startdev;
3851         cqr->memdev = startdev;
3852         cqr->block = block;
3853         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
3854         cqr->lpm = dasd_path_get_ppm(startdev);
3855         cqr->retries = startdev->default_retries;
3856         cqr->buildclk = get_tod_clock();
3857         cqr->status = DASD_CQR_FILLED;
3858
3859         /* Set flags to suppress output for expected errors */
3860         if (dasd_eckd_is_ese(basedev)) {
3861                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
3862                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
3863                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3864         }
3865
3866         return cqr;
3867 }
3868
3869 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3870                                                struct dasd_device *startdev,
3871                                                struct dasd_block *block,
3872                                                struct request *req,
3873                                                sector_t first_rec,
3874                                                sector_t last_rec,
3875                                                sector_t first_trk,
3876                                                sector_t last_trk,
3877                                                unsigned int first_offs,
3878                                                unsigned int last_offs,
3879                                                unsigned int blk_per_trk,
3880                                                unsigned int blksize)
3881 {
3882         unsigned long *idaws;
3883         struct dasd_ccw_req *cqr;
3884         struct ccw1 *ccw;
3885         struct req_iterator iter;
3886         struct bio_vec bv;
3887         char *dst, *idaw_dst;
3888         unsigned int cidaw, cplength, datasize;
3889         unsigned int tlf;
3890         sector_t recid;
3891         unsigned char cmd;
3892         struct dasd_device *basedev;
3893         unsigned int trkcount, count, count_to_trk_end;
3894         unsigned int idaw_len, seg_len, part_len, len_to_track_end;
3895         unsigned char new_track, end_idaw;
3896         sector_t trkid;
3897         unsigned int recoffs;
3898
3899         basedev = block->base;
3900         if (rq_data_dir(req) == READ)
3901                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3902         else if (rq_data_dir(req) == WRITE)
3903                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3904         else
3905                 return ERR_PTR(-EINVAL);
3906
3907         /* Track based I/O needs IDAWs for each page, and not just for
3908          * 64 bit addresses. We need additional idals for pages
3909          * that get filled from two tracks, so we use the number
3910          * of records as upper limit.
3911          */
3912         cidaw = last_rec - first_rec + 1;
3913         trkcount = last_trk - first_trk + 1;
3914
3915         /* 1x prefix + one read/write ccw per track */
3916         cplength = 1 + trkcount;
3917
3918         datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
3919
3920         /* Allocate the ccw request. */
3921         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3922                                    startdev, blk_mq_rq_to_pdu(req));
3923         if (IS_ERR(cqr))
3924                 return cqr;
3925         ccw = cqr->cpaddr;
3926         /* transfer length factor: how many bytes to read from the last track */
3927         if (first_trk == last_trk)
3928                 tlf = last_offs - first_offs + 1;
3929         else
3930                 tlf = last_offs + 1;
3931         tlf *= blksize;
3932
3933         if (prefix_LRE(ccw++, cqr->data, first_trk,
3934                        last_trk, cmd, basedev, startdev,
3935                        1 /* format */, first_offs + 1,
3936                        trkcount, blksize,
3937                        tlf) == -EAGAIN) {
3938                 /* Clock not in sync and XRC is enabled.
3939                  * Try again later.
3940                  */
3941                 dasd_sfree_request(cqr, startdev);
3942                 return ERR_PTR(-EAGAIN);
3943         }
3944
3945         /*
3946          * The translation of request into ccw programs must meet the
3947          * following conditions:
3948          * - all idaws but the first and the last must address full pages
3949          *   (or 2K blocks on 31-bit)
3950          * - the scope of a ccw and it's idal ends with the track boundaries
3951          */
3952         idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
3953         recid = first_rec;
3954         new_track = 1;
3955         end_idaw = 0;
3956         len_to_track_end = 0;
3957         idaw_dst = NULL;
3958         idaw_len = 0;
3959         rq_for_each_segment(bv, req, iter) {
3960                 dst = page_address(bv.bv_page) + bv.bv_offset;
3961                 seg_len = bv.bv_len;
3962                 while (seg_len) {
3963                         if (new_track) {
3964                                 trkid = recid;
3965                                 recoffs = sector_div(trkid, blk_per_trk);
3966                                 count_to_trk_end = blk_per_trk - recoffs;
3967                                 count = min((last_rec - recid + 1),
3968                                             (sector_t)count_to_trk_end);
3969                                 len_to_track_end = count * blksize;
3970                                 ccw[-1].flags |= CCW_FLAG_CC;
3971                                 ccw->cmd_code = cmd;
3972                                 ccw->count = len_to_track_end;
3973                                 ccw->cda = (__u32)(addr_t)idaws;
3974                                 ccw->flags = CCW_FLAG_IDA;
3975                                 ccw++;
3976                                 recid += count;
3977                                 new_track = 0;
3978                                 /* first idaw for a ccw may start anywhere */
3979                                 if (!idaw_dst)
3980                                         idaw_dst = dst;
3981                         }
3982                         /* If we start a new idaw, we must make sure that it
3983                          * starts on an IDA_BLOCK_SIZE boundary.
3984                          * If we continue an idaw, we must make sure that the
3985                          * current segment begins where the so far accumulated
3986                          * idaw ends
3987                          */
3988                         if (!idaw_dst) {
3989                                 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
3990                                         dasd_sfree_request(cqr, startdev);
3991                                         return ERR_PTR(-ERANGE);
3992                                 } else
3993                                         idaw_dst = dst;
3994                         }
3995                         if ((idaw_dst + idaw_len) != dst) {
3996                                 dasd_sfree_request(cqr, startdev);
3997                                 return ERR_PTR(-ERANGE);
3998                         }
3999                         part_len = min(seg_len, len_to_track_end);
4000                         seg_len -= part_len;
4001                         dst += part_len;
4002                         idaw_len += part_len;
4003                         len_to_track_end -= part_len;
4004                         /* collected memory area ends on an IDA_BLOCK border,
4005                          * -> create an idaw
4006                          * idal_create_words will handle cases where idaw_len
4007                          * is larger then IDA_BLOCK_SIZE
4008                          */
4009                         if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4010                                 end_idaw = 1;
4011                         /* We also need to end the idaw at track end */
4012                         if (!len_to_track_end) {
4013                                 new_track = 1;
4014                                 end_idaw = 1;
4015                         }
4016                         if (end_idaw) {
4017                                 idaws = idal_create_words(idaws, idaw_dst,
4018                                                           idaw_len);
4019                                 idaw_dst = NULL;
4020                                 idaw_len = 0;
4021                                 end_idaw = 0;
4022                         }
4023                 }
4024         }
4025
4026         if (blk_noretry_request(req) ||
4027             block->base->features & DASD_FEATURE_FAILFAST)
4028                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4029         cqr->startdev = startdev;
4030         cqr->memdev = startdev;
4031         cqr->block = block;
4032         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4033         cqr->lpm = dasd_path_get_ppm(startdev);
4034         cqr->retries = startdev->default_retries;
4035         cqr->buildclk = get_tod_clock();
4036         cqr->status = DASD_CQR_FILLED;
4037
4038         /* Set flags to suppress output for expected errors */
4039         if (dasd_eckd_is_ese(basedev))
4040                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4041
4042         return cqr;
4043 }
4044
4045 static int prepare_itcw(struct itcw *itcw,
4046                         unsigned int trk, unsigned int totrk, int cmd,
4047                         struct dasd_device *basedev,
4048                         struct dasd_device *startdev,
4049                         unsigned int rec_on_trk, int count,
4050                         unsigned int blksize,
4051                         unsigned int total_data_size,
4052                         unsigned int tlf,
4053                         unsigned int blk_per_trk)
4054 {
4055         struct PFX_eckd_data pfxdata;
4056         struct dasd_eckd_private *basepriv, *startpriv;
4057         struct DE_eckd_data *dedata;
4058         struct LRE_eckd_data *lredata;
4059         struct dcw *dcw;
4060
4061         u32 begcyl, endcyl;
4062         u16 heads, beghead, endhead;
4063         u8 pfx_cmd;
4064
4065         int rc = 0;
4066         int sector = 0;
4067         int dn, d;
4068
4069
4070         /* setup prefix data */
4071         basepriv = basedev->private;
4072         startpriv = startdev->private;
4073         dedata = &pfxdata.define_extent;
4074         lredata = &pfxdata.locate_record;
4075
4076         memset(&pfxdata, 0, sizeof(pfxdata));
4077         pfxdata.format = 1; /* PFX with LRE */
4078         pfxdata.base_address = basepriv->ned->unit_addr;
4079         pfxdata.base_lss = basepriv->ned->ID;
4080         pfxdata.validity.define_extent = 1;
4081
4082         /* private uid is kept up to date, conf_data may be outdated */
4083         if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4084                 pfxdata.validity.verify_base = 1;
4085
4086         if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4087                 pfxdata.validity.verify_base = 1;
4088                 pfxdata.validity.hyper_pav = 1;
4089         }
4090
4091         switch (cmd) {
4092         case DASD_ECKD_CCW_READ_TRACK_DATA:
4093                 dedata->mask.perm = 0x1;
4094                 dedata->attributes.operation = basepriv->attrib.operation;
4095                 dedata->blk_size = blksize;
4096                 dedata->ga_extended |= 0x42;
4097                 lredata->operation.orientation = 0x0;
4098                 lredata->operation.operation = 0x0C;
4099                 lredata->auxiliary.check_bytes = 0x01;
4100                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4101                 break;
4102         case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4103                 dedata->mask.perm = 0x02;
4104                 dedata->attributes.operation = basepriv->attrib.operation;
4105                 dedata->blk_size = blksize;
4106                 rc = set_timestamp(NULL, dedata, basedev);
4107                 dedata->ga_extended |= 0x42;
4108                 lredata->operation.orientation = 0x0;
4109                 lredata->operation.operation = 0x3F;
4110                 lredata->extended_operation = 0x23;
4111                 lredata->auxiliary.check_bytes = 0x2;
4112                 /*
4113                  * If XRC is supported the System Time Stamp is set. The
4114                  * validity of the time stamp must be reflected in the prefix
4115                  * data as well.
4116                  */
4117                 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4118                         pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
4119                 pfx_cmd = DASD_ECKD_CCW_PFX;
4120                 break;
4121         case DASD_ECKD_CCW_READ_COUNT_MT:
4122                 dedata->mask.perm = 0x1;
4123                 dedata->attributes.operation = DASD_BYPASS_CACHE;
4124                 dedata->ga_extended |= 0x42;
4125                 dedata->blk_size = blksize;
4126                 lredata->operation.orientation = 0x2;
4127                 lredata->operation.operation = 0x16;
4128                 lredata->auxiliary.check_bytes = 0x01;
4129                 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4130                 break;
4131         default:
4132                 DBF_DEV_EVENT(DBF_ERR, basedev,
4133                               "prepare itcw, unknown opcode 0x%x", cmd);
4134                 BUG();
4135                 break;
4136         }
4137         if (rc)
4138                 return rc;
4139
4140         dedata->attributes.mode = 0x3;  /* ECKD */
4141
4142         heads = basepriv->rdc_data.trk_per_cyl;
4143         begcyl = trk / heads;
4144         beghead = trk % heads;
4145         endcyl = totrk / heads;
4146         endhead = totrk % heads;
4147
4148         /* check for sequential prestage - enhance cylinder range */
4149         if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4150             dedata->attributes.operation == DASD_SEQ_ACCESS) {
4151
4152                 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4153                         endcyl += basepriv->attrib.nr_cyl;
4154                 else
4155                         endcyl = (basepriv->real_cyl - 1);
4156         }
4157
4158         set_ch_t(&dedata->beg_ext, begcyl, beghead);
4159         set_ch_t(&dedata->end_ext, endcyl, endhead);
4160
4161         dedata->ep_format = 0x20; /* records per track is valid */
4162         dedata->ep_rec_per_track = blk_per_trk;
4163
4164         if (rec_on_trk) {
4165                 switch (basepriv->rdc_data.dev_type) {
4166                 case 0x3390:
4167                         dn = ceil_quot(blksize + 6, 232);
4168                         d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4169                         sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4170                         break;
4171                 case 0x3380:
4172                         d = 7 + ceil_quot(blksize + 12, 32);
4173                         sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4174                         break;
4175                 }
4176         }
4177
4178         if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4179                 lredata->auxiliary.length_valid = 0;
4180                 lredata->auxiliary.length_scope = 0;
4181                 lredata->sector = 0xff;
4182         } else {
4183                 lredata->auxiliary.length_valid = 1;
4184                 lredata->auxiliary.length_scope = 1;
4185                 lredata->sector = sector;
4186         }
4187         lredata->auxiliary.imbedded_ccw_valid = 1;
4188         lredata->length = tlf;
4189         lredata->imbedded_ccw = cmd;
4190         lredata->count = count;
4191         set_ch_t(&lredata->seek_addr, begcyl, beghead);
4192         lredata->search_arg.cyl = lredata->seek_addr.cyl;
4193         lredata->search_arg.head = lredata->seek_addr.head;
4194         lredata->search_arg.record = rec_on_trk;
4195
4196         dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4197                      &pfxdata, sizeof(pfxdata), total_data_size);
4198         return PTR_ERR_OR_ZERO(dcw);
4199 }
4200
4201 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4202                                                struct dasd_device *startdev,
4203                                                struct dasd_block *block,
4204                                                struct request *req,
4205                                                sector_t first_rec,
4206                                                sector_t last_rec,
4207                                                sector_t first_trk,
4208                                                sector_t last_trk,
4209                                                unsigned int first_offs,
4210                                                unsigned int last_offs,
4211                                                unsigned int blk_per_trk,
4212                                                unsigned int blksize)
4213 {
4214         struct dasd_ccw_req *cqr;
4215         struct req_iterator iter;
4216         struct bio_vec bv;
4217         char *dst;
4218         unsigned int trkcount, ctidaw;
4219         unsigned char cmd;
4220         struct dasd_device *basedev;
4221         unsigned int tlf;
4222         struct itcw *itcw;
4223         struct tidaw *last_tidaw = NULL;
4224         int itcw_op;
4225         size_t itcw_size;
4226         u8 tidaw_flags;
4227         unsigned int seg_len, part_len, len_to_track_end;
4228         unsigned char new_track;
4229         sector_t recid, trkid;
4230         unsigned int offs;
4231         unsigned int count, count_to_trk_end;
4232         int ret;
4233
4234         basedev = block->base;
4235         if (rq_data_dir(req) == READ) {
4236                 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4237                 itcw_op = ITCW_OP_READ;
4238         } else if (rq_data_dir(req) == WRITE) {
4239                 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4240                 itcw_op = ITCW_OP_WRITE;
4241         } else
4242                 return ERR_PTR(-EINVAL);
4243
4244         /* trackbased I/O needs address all memory via TIDAWs,
4245          * not just for 64 bit addresses. This allows us to map
4246          * each segment directly to one tidaw.
4247          * In the case of write requests, additional tidaws may
4248          * be needed when a segment crosses a track boundary.
4249          */
4250         trkcount = last_trk - first_trk + 1;
4251         ctidaw = 0;
4252         rq_for_each_segment(bv, req, iter) {
4253                 ++ctidaw;
4254         }
4255         if (rq_data_dir(req) == WRITE)
4256                 ctidaw += (last_trk - first_trk);
4257
4258         /* Allocate the ccw request. */
4259         itcw_size = itcw_calc_size(0, ctidaw, 0);
4260         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4261                                    blk_mq_rq_to_pdu(req));
4262         if (IS_ERR(cqr))
4263                 return cqr;
4264
4265         /* transfer length factor: how many bytes to read from the last track */
4266         if (first_trk == last_trk)
4267                 tlf = last_offs - first_offs + 1;
4268         else
4269                 tlf = last_offs + 1;
4270         tlf *= blksize;
4271
4272         itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4273         if (IS_ERR(itcw)) {
4274                 ret = -EINVAL;
4275                 goto out_error;
4276         }
4277         cqr->cpaddr = itcw_get_tcw(itcw);
4278         if (prepare_itcw(itcw, first_trk, last_trk,
4279                          cmd, basedev, startdev,
4280                          first_offs + 1,
4281                          trkcount, blksize,
4282                          (last_rec - first_rec + 1) * blksize,
4283                          tlf, blk_per_trk) == -EAGAIN) {
4284                 /* Clock not in sync and XRC is enabled.
4285                  * Try again later.
4286                  */
4287                 ret = -EAGAIN;
4288                 goto out_error;
4289         }
4290         len_to_track_end = 0;
4291         /*
4292          * A tidaw can address 4k of memory, but must not cross page boundaries
4293          * We can let the block layer handle this by setting
4294          * blk_queue_segment_boundary to page boundaries and
4295          * blk_max_segment_size to page size when setting up the request queue.
4296          * For write requests, a TIDAW must not cross track boundaries, because
4297          * we have to set the CBC flag on the last tidaw for each track.
4298          */
4299         if (rq_data_dir(req) == WRITE) {
4300                 new_track = 1;
4301                 recid = first_rec;
4302                 rq_for_each_segment(bv, req, iter) {
4303                         dst = page_address(bv.bv_page) + bv.bv_offset;
4304                         seg_len = bv.bv_len;
4305                         while (seg_len) {
4306                                 if (new_track) {
4307                                         trkid = recid;
4308                                         offs = sector_div(trkid, blk_per_trk);
4309                                         count_to_trk_end = blk_per_trk - offs;
4310                                         count = min((last_rec - recid + 1),
4311                                                     (sector_t)count_to_trk_end);
4312                                         len_to_track_end = count * blksize;
4313                                         recid += count;
4314                                         new_track = 0;
4315                                 }
4316                                 part_len = min(seg_len, len_to_track_end);
4317                                 seg_len -= part_len;
4318                                 len_to_track_end -= part_len;
4319                                 /* We need to end the tidaw at track end */
4320                                 if (!len_to_track_end) {
4321                                         new_track = 1;
4322                                         tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4323                                 } else
4324                                         tidaw_flags = 0;
4325                                 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4326                                                             dst, part_len);
4327                                 if (IS_ERR(last_tidaw)) {
4328                                         ret = -EINVAL;
4329                                         goto out_error;
4330                                 }
4331                                 dst += part_len;
4332                         }
4333                 }
4334         } else {
4335                 rq_for_each_segment(bv, req, iter) {
4336                         dst = page_address(bv.bv_page) + bv.bv_offset;
4337                         last_tidaw = itcw_add_tidaw(itcw, 0x00,
4338                                                     dst, bv.bv_len);
4339                         if (IS_ERR(last_tidaw)) {
4340                                 ret = -EINVAL;
4341                                 goto out_error;
4342                         }
4343                 }
4344         }
4345         last_tidaw->flags |= TIDAW_FLAGS_LAST;
4346         last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4347         itcw_finalize(itcw);
4348
4349         if (blk_noretry_request(req) ||
4350             block->base->features & DASD_FEATURE_FAILFAST)
4351                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4352         cqr->cpmode = 1;
4353         cqr->startdev = startdev;
4354         cqr->memdev = startdev;
4355         cqr->block = block;
4356         cqr->expires = startdev->default_expires * HZ;  /* default 5 minutes */
4357         cqr->lpm = dasd_path_get_ppm(startdev);
4358         cqr->retries = startdev->default_retries;
4359         cqr->buildclk = get_tod_clock();
4360         cqr->status = DASD_CQR_FILLED;
4361
4362         /* Set flags to suppress output for expected errors */
4363         if (dasd_eckd_is_ese(basedev)) {
4364                 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4365                 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4366                 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4367         }
4368
4369         return cqr;
4370 out_error:
4371         dasd_sfree_request(cqr, startdev);
4372         return ERR_PTR(ret);
4373 }
4374
4375 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4376                                                struct dasd_block *block,
4377                                                struct request *req)
4378 {
4379         int cmdrtd, cmdwtd;
4380         int use_prefix;
4381         int fcx_multitrack;
4382         struct dasd_eckd_private *private;
4383         struct dasd_device *basedev;
4384         sector_t first_rec, last_rec;
4385         sector_t first_trk, last_trk;
4386         unsigned int first_offs, last_offs;
4387         unsigned int blk_per_trk, blksize;
4388         int cdlspecial;
4389         unsigned int data_size;
4390         struct dasd_ccw_req *cqr;
4391
4392         basedev = block->base;
4393         private = basedev->private;
4394
4395         /* Calculate number of blocks/records per track. */
4396         blksize = block->bp_block;
4397         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4398         if (blk_per_trk == 0)
4399                 return ERR_PTR(-EINVAL);
4400         /* Calculate record id of first and last block. */
4401         first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4402         first_offs = sector_div(first_trk, blk_per_trk);
4403         last_rec = last_trk =
4404                 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4405         last_offs = sector_div(last_trk, blk_per_trk);
4406         cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4407
4408         fcx_multitrack = private->features.feature[40] & 0x20;
4409         data_size = blk_rq_bytes(req);
4410         if (data_size % blksize)
4411                 return ERR_PTR(-EINVAL);
4412         /* tpm write request add CBC data on each track boundary */
4413         if (rq_data_dir(req) == WRITE)
4414                 data_size += (last_trk - first_trk) * 4;
4415
4416         /* is read track data and write track data in command mode supported? */
4417         cmdrtd = private->features.feature[9] & 0x20;
4418         cmdwtd = private->features.feature[12] & 0x40;
4419         use_prefix = private->features.feature[8] & 0x01;
4420
4421         cqr = NULL;
4422         if (cdlspecial || dasd_page_cache) {
4423                 /* do nothing, just fall through to the cmd mode single case */
4424         } else if ((data_size <= private->fcx_max_data)
4425                    && (fcx_multitrack || (first_trk == last_trk))) {
4426                 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4427                                                     first_rec, last_rec,
4428                                                     first_trk, last_trk,
4429                                                     first_offs, last_offs,
4430                                                     blk_per_trk, blksize);
4431                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4432                     (PTR_ERR(cqr) != -ENOMEM))
4433                         cqr = NULL;
4434         } else if (use_prefix &&
4435                    (((rq_data_dir(req) == READ) && cmdrtd) ||
4436                     ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4437                 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4438                                                    first_rec, last_rec,
4439                                                    first_trk, last_trk,
4440                                                    first_offs, last_offs,
4441                                                    blk_per_trk, blksize);
4442                 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4443                     (PTR_ERR(cqr) != -ENOMEM))
4444                         cqr = NULL;
4445         }
4446         if (!cqr)
4447                 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4448                                                     first_rec, last_rec,
4449                                                     first_trk, last_trk,
4450                                                     first_offs, last_offs,
4451                                                     blk_per_trk, blksize);
4452         return cqr;
4453 }
4454
4455 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4456                                                    struct dasd_block *block,
4457                                                    struct request *req)
4458 {
4459         sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4460         unsigned int seg_len, len_to_track_end;
4461         unsigned int cidaw, cplength, datasize;
4462         sector_t first_trk, last_trk, sectors;
4463         struct dasd_eckd_private *base_priv;
4464         struct dasd_device *basedev;
4465         struct req_iterator iter;
4466         struct dasd_ccw_req *cqr;
4467         unsigned int first_offs;
4468         unsigned int trkcount;
4469         unsigned long *idaws;
4470         unsigned int size;
4471         unsigned char cmd;
4472         struct bio_vec bv;
4473         struct ccw1 *ccw;
4474         int use_prefix;
4475         void *data;
4476         char *dst;
4477
4478         /*
4479          * raw track access needs to be mutiple of 64k and on 64k boundary
4480          * For read requests we can fix an incorrect alignment by padding
4481          * the request with dummy pages.
4482          */
4483         start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4484         end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4485                 DASD_RAW_SECTORS_PER_TRACK;
4486         end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4487                 DASD_RAW_SECTORS_PER_TRACK;
4488         basedev = block->base;
4489         if ((start_padding_sectors || end_padding_sectors) &&
4490             (rq_data_dir(req) == WRITE)) {
4491                 DBF_DEV_EVENT(DBF_ERR, basedev,
4492                               "raw write not track aligned (%llu,%llu) req %p",
4493                               start_padding_sectors, end_padding_sectors, req);
4494                 return ERR_PTR(-EINVAL);
4495         }
4496
4497         first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4498         last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4499                 DASD_RAW_SECTORS_PER_TRACK;
4500         trkcount = last_trk - first_trk + 1;
4501         first_offs = 0;
4502
4503         if (rq_data_dir(req) == READ)
4504                 cmd = DASD_ECKD_CCW_READ_TRACK;
4505         else if (rq_data_dir(req) == WRITE)
4506                 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4507         else
4508                 return ERR_PTR(-EINVAL);
4509
4510         /*
4511          * Raw track based I/O needs IDAWs for each page,
4512          * and not just for 64 bit addresses.
4513          */
4514         cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4515
4516         /*
4517          * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4518          * of extended parameter. This is needed for write full track.
4519          */
4520         base_priv = basedev->private;
4521         use_prefix = base_priv->features.feature[8] & 0x01;
4522         if (use_prefix) {
4523                 cplength = 1 + trkcount;
4524                 size = sizeof(struct PFX_eckd_data) + 2;
4525         } else {
4526                 cplength = 2 + trkcount;
4527                 size = sizeof(struct DE_eckd_data) +
4528                         sizeof(struct LRE_eckd_data) + 2;
4529         }
4530         size = ALIGN(size, 8);
4531
4532         datasize = size + cidaw * sizeof(unsigned long);
4533
4534         /* Allocate the ccw request. */
4535         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4536                                    datasize, startdev, blk_mq_rq_to_pdu(req));
4537         if (IS_ERR(cqr))
4538                 return cqr;
4539
4540         ccw = cqr->cpaddr;
4541         data = cqr->data;
4542
4543         if (use_prefix) {
4544                 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4545                            startdev, 1, first_offs + 1, trkcount, 0, 0);
4546         } else {
4547                 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4548                 ccw[-1].flags |= CCW_FLAG_CC;
4549
4550                 data += sizeof(struct DE_eckd_data);
4551                 locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4552                                   trkcount, cmd, basedev, 0, 0);
4553         }
4554
4555         idaws = (unsigned long *)(cqr->data + size);
4556         len_to_track_end = 0;
4557         if (start_padding_sectors) {
4558                 ccw[-1].flags |= CCW_FLAG_CC;
4559                 ccw->cmd_code = cmd;
4560                 /* maximum 3390 track size */
4561                 ccw->count = 57326;
4562                 /* 64k map to one track */
4563                 len_to_track_end = 65536 - start_padding_sectors * 512;
4564                 ccw->cda = (__u32)(addr_t)idaws;
4565                 ccw->flags |= CCW_FLAG_IDA;
4566                 ccw->flags |= CCW_FLAG_SLI;
4567                 ccw++;
4568                 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4569                         idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4570         }
4571         rq_for_each_segment(bv, req, iter) {
4572                 dst = page_address(bv.bv_page) + bv.bv_offset;
4573                 seg_len = bv.bv_len;
4574                 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4575                         memset(dst, 0, seg_len);
4576                 if (!len_to_track_end) {
4577                         ccw[-1].flags |= CCW_FLAG_CC;
4578                         ccw->cmd_code = cmd;
4579                         /* maximum 3390 track size */
4580                         ccw->count = 57326;
4581                         /* 64k map to one track */
4582                         len_to_track_end = 65536;
4583                         ccw->cda = (__u32)(addr_t)idaws;
4584                         ccw->flags |= CCW_FLAG_IDA;
4585                         ccw->flags |= CCW_FLAG_SLI;
4586                         ccw++;
4587                 }
4588                 len_to_track_end -= seg_len;
4589                 idaws = idal_create_words(idaws, dst, seg_len);
4590         }
4591         for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4592                 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4593         if (blk_noretry_request(req) ||
4594             block->base->features & DASD_FEATURE_FAILFAST)
4595                 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4596         cqr->startdev = startdev;
4597         cqr->memdev = startdev;
4598         cqr->block = block;
4599         cqr->expires = startdev->default_expires * HZ;
4600         cqr->lpm = dasd_path_get_ppm(startdev);
4601         cqr->retries = startdev->default_retries;
4602         cqr->buildclk = get_tod_clock();
4603         cqr->status = DASD_CQR_FILLED;
4604
4605         return cqr;
4606 }
4607
4608
4609 static int
4610 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4611 {
4612         struct dasd_eckd_private *private;
4613         struct ccw1 *ccw;
4614         struct req_iterator iter;
4615         struct bio_vec bv;
4616         char *dst, *cda;
4617         unsigned int blksize, blk_per_trk, off;
4618         sector_t recid;
4619         int status;
4620
4621         if (!dasd_page_cache)
4622                 goto out;
4623         private = cqr->block->base->private;
4624         blksize = cqr->block->bp_block;
4625         blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4626         recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4627         ccw = cqr->cpaddr;
4628         /* Skip over define extent & locate record. */
4629         ccw++;
4630         if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4631                 ccw++;
4632         rq_for_each_segment(bv, req, iter) {
4633                 dst = page_address(bv.bv_page) + bv.bv_offset;
4634                 for (off = 0; off < bv.bv_len; off += blksize) {
4635                         /* Skip locate record. */
4636                         if (private->uses_cdl && recid <= 2*blk_per_trk)
4637                                 ccw++;
4638                         if (dst) {
4639                                 if (ccw->flags & CCW_FLAG_IDA)
4640                                         cda = *((char **)((addr_t) ccw->cda));
4641                                 else
4642                                         cda = (char *)((addr_t) ccw->cda);
4643                                 if (dst != cda) {
4644                                         if (rq_data_dir(req) == READ)
4645                                                 memcpy(dst, cda, bv.bv_len);
4646                                         kmem_cache_free(dasd_page_cache,
4647                                             (void *)((addr_t)cda & PAGE_MASK));
4648                                 }
4649                                 dst = NULL;
4650                         }
4651                         ccw++;
4652                         recid++;
4653                 }
4654         }
4655 out:
4656         status = cqr->status == DASD_CQR_DONE;
4657         dasd_sfree_request(cqr, cqr->memdev);
4658         return status;
4659 }
4660
4661 /*
4662  * Modify ccw/tcw in cqr so it can be started on a base device.
4663  *
4664  * Note that this is not enough to restart the cqr!
4665  * Either reset cqr->startdev as well (summary unit check handling)
4666  * or restart via separate cqr (as in ERP handling).
4667  */
4668 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4669 {
4670         struct ccw1 *ccw;
4671         struct PFX_eckd_data *pfxdata;
4672         struct tcw *tcw;
4673         struct tccb *tccb;
4674         struct dcw *dcw;
4675
4676         if (cqr->cpmode == 1) {
4677                 tcw = cqr->cpaddr;
4678                 tccb = tcw_get_tccb(tcw);
4679                 dcw = (struct dcw *)&tccb->tca[0];
4680                 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4681                 pfxdata->validity.verify_base = 0;
4682                 pfxdata->validity.hyper_pav = 0;
4683         } else {
4684                 ccw = cqr->cpaddr;
4685                 pfxdata = cqr->data;
4686                 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4687                         pfxdata->validity.verify_base = 0;
4688                         pfxdata->validity.hyper_pav = 0;
4689                 }
4690         }
4691 }
4692
4693 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4694
4695 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4696                                                      struct dasd_block *block,
4697                                                      struct request *req)
4698 {
4699         struct dasd_eckd_private *private;
4700         struct dasd_device *startdev;
4701         unsigned long flags;
4702         struct dasd_ccw_req *cqr;
4703
4704         startdev = dasd_alias_get_start_dev(base);
4705         if (!startdev)
4706                 startdev = base;
4707         private = startdev->private;
4708         if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4709                 return ERR_PTR(-EBUSY);
4710
4711         spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4712         private->count++;
4713         if ((base->features & DASD_FEATURE_USERAW))
4714                 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4715         else
4716                 cqr = dasd_eckd_build_cp(startdev, block, req);
4717         if (IS_ERR(cqr))
4718                 private->count--;
4719         spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4720         return cqr;
4721 }
4722
4723 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4724                                    struct request *req)
4725 {
4726         struct dasd_eckd_private *private;
4727         unsigned long flags;
4728
4729         spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4730         private = cqr->memdev->private;
4731         private->count--;
4732         spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4733         return dasd_eckd_free_cp(cqr, req);
4734 }
4735
4736 static int
4737 dasd_eckd_fill_info(struct dasd_device * device,
4738                     struct dasd_information2_t * info)
4739 {
4740         struct dasd_eckd_private *private = device->private;
4741
4742         info->label_block = 2;
4743         info->FBA_layout = private->uses_cdl ? 0 : 1;
4744         info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4745         info->characteristics_size = sizeof(private->rdc_data);
4746         memcpy(info->characteristics, &private->rdc_data,
4747                sizeof(private->rdc_data));
4748         info->confdata_size = min((unsigned long)private->conf_len,
4749                                   sizeof(info->configuration_data));
4750         memcpy(info->configuration_data, private->conf_data,
4751                info->confdata_size);
4752         return 0;
4753 }
4754
4755 /*
4756  * SECTION: ioctl functions for eckd devices.
4757  */
4758
4759 /*
4760  * Release device ioctl.
4761  * Buils a channel programm to releases a prior reserved
4762  * (see dasd_eckd_reserve) device.
4763  */
4764 static int
4765 dasd_eckd_release(struct dasd_device *device)
4766 {
4767         struct dasd_ccw_req *cqr;
4768         int rc;
4769         struct ccw1 *ccw;
4770         int useglobal;
4771
4772         if (!capable(CAP_SYS_ADMIN))
4773                 return -EACCES;
4774
4775         useglobal = 0;
4776         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4777         if (IS_ERR(cqr)) {
4778                 mutex_lock(&dasd_reserve_mutex);
4779                 useglobal = 1;
4780                 cqr = &dasd_reserve_req->cqr;
4781                 memset(cqr, 0, sizeof(*cqr));
4782                 memset(&dasd_reserve_req->ccw, 0,
4783                        sizeof(dasd_reserve_req->ccw));
4784                 cqr->cpaddr = &dasd_reserve_req->ccw;
4785                 cqr->data = &dasd_reserve_req->data;
4786                 cqr->magic = DASD_ECKD_MAGIC;
4787         }
4788         ccw = cqr->cpaddr;
4789         ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4790         ccw->flags |= CCW_FLAG_SLI;
4791         ccw->count = 32;
4792         ccw->cda = (__u32)(addr_t) cqr->data;
4793         cqr->startdev = device;
4794         cqr->memdev = device;
4795         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4796         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4797         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4798         cqr->expires = 2 * HZ;
4799         cqr->buildclk = get_tod_clock();
4800         cqr->status = DASD_CQR_FILLED;
4801
4802         rc = dasd_sleep_on_immediatly(cqr);
4803         if (!rc)
4804                 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4805
4806         if (useglobal)
4807                 mutex_unlock(&dasd_reserve_mutex);
4808         else
4809                 dasd_sfree_request(cqr, cqr->memdev);
4810         return rc;
4811 }
4812
4813 /*
4814  * Reserve device ioctl.
4815  * Options are set to 'synchronous wait for interrupt' and
4816  * 'timeout the request'. This leads to a terminate IO if
4817  * the interrupt is outstanding for a certain time.
4818  */
4819 static int
4820 dasd_eckd_reserve(struct dasd_device *device)
4821 {
4822         struct dasd_ccw_req *cqr;
4823         int rc;
4824         struct ccw1 *ccw;
4825         int useglobal;
4826
4827         if (!capable(CAP_SYS_ADMIN))
4828                 return -EACCES;
4829
4830         useglobal = 0;
4831         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4832         if (IS_ERR(cqr)) {
4833                 mutex_lock(&dasd_reserve_mutex);
4834                 useglobal = 1;
4835                 cqr = &dasd_reserve_req->cqr;
4836                 memset(cqr, 0, sizeof(*cqr));
4837                 memset(&dasd_reserve_req->ccw, 0,
4838                        sizeof(dasd_reserve_req->ccw));
4839                 cqr->cpaddr = &dasd_reserve_req->ccw;
4840                 cqr->data = &dasd_reserve_req->data;
4841                 cqr->magic = DASD_ECKD_MAGIC;
4842         }
4843         ccw = cqr->cpaddr;
4844         ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
4845         ccw->flags |= CCW_FLAG_SLI;
4846         ccw->count = 32;
4847         ccw->cda = (__u32)(addr_t) cqr->data;
4848         cqr->startdev = device;
4849         cqr->memdev = device;
4850         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4851         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4852         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4853         cqr->expires = 2 * HZ;
4854         cqr->buildclk = get_tod_clock();
4855         cqr->status = DASD_CQR_FILLED;
4856
4857         rc = dasd_sleep_on_immediatly(cqr);
4858         if (!rc)
4859                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4860
4861         if (useglobal)
4862                 mutex_unlock(&dasd_reserve_mutex);
4863         else
4864                 dasd_sfree_request(cqr, cqr->memdev);
4865         return rc;
4866 }
4867
4868 /*
4869  * Steal lock ioctl - unconditional reserve device.
4870  * Buils a channel programm to break a device's reservation.
4871  * (unconditional reserve)
4872  */
4873 static int
4874 dasd_eckd_steal_lock(struct dasd_device *device)
4875 {
4876         struct dasd_ccw_req *cqr;
4877         int rc;
4878         struct ccw1 *ccw;
4879         int useglobal;
4880
4881         if (!capable(CAP_SYS_ADMIN))
4882                 return -EACCES;
4883
4884         useglobal = 0;
4885         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4886         if (IS_ERR(cqr)) {
4887                 mutex_lock(&dasd_reserve_mutex);
4888                 useglobal = 1;
4889                 cqr = &dasd_reserve_req->cqr;
4890                 memset(cqr, 0, sizeof(*cqr));
4891                 memset(&dasd_reserve_req->ccw, 0,
4892                        sizeof(dasd_reserve_req->ccw));
4893                 cqr->cpaddr = &dasd_reserve_req->ccw;
4894                 cqr->data = &dasd_reserve_req->data;
4895                 cqr->magic = DASD_ECKD_MAGIC;
4896         }
4897         ccw = cqr->cpaddr;
4898         ccw->cmd_code = DASD_ECKD_CCW_SLCK;
4899         ccw->flags |= CCW_FLAG_SLI;
4900         ccw->count = 32;
4901         ccw->cda = (__u32)(addr_t) cqr->data;
4902         cqr->startdev = device;
4903         cqr->memdev = device;
4904         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4905         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4906         cqr->retries = 2;       /* set retry counter to enable basic ERP */
4907         cqr->expires = 2 * HZ;
4908         cqr->buildclk = get_tod_clock();
4909         cqr->status = DASD_CQR_FILLED;
4910
4911         rc = dasd_sleep_on_immediatly(cqr);
4912         if (!rc)
4913                 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4914
4915         if (useglobal)
4916                 mutex_unlock(&dasd_reserve_mutex);
4917         else
4918                 dasd_sfree_request(cqr, cqr->memdev);
4919         return rc;
4920 }
4921
4922 /*
4923  * SNID - Sense Path Group ID
4924  * This ioctl may be used in situations where I/O is stalled due to
4925  * a reserve, so if the normal dasd_smalloc_request fails, we use the
4926  * preallocated dasd_reserve_req.
4927  */
4928 static int dasd_eckd_snid(struct dasd_device *device,
4929                           void __user *argp)
4930 {
4931         struct dasd_ccw_req *cqr;
4932         int rc;
4933         struct ccw1 *ccw;
4934         int useglobal;
4935         struct dasd_snid_ioctl_data usrparm;
4936
4937         if (!capable(CAP_SYS_ADMIN))
4938                 return -EACCES;
4939
4940         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4941                 return -EFAULT;
4942
4943         useglobal = 0;
4944         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
4945                                    sizeof(struct dasd_snid_data), device,
4946                                    NULL);
4947         if (IS_ERR(cqr)) {
4948                 mutex_lock(&dasd_reserve_mutex);
4949                 useglobal = 1;
4950                 cqr = &dasd_reserve_req->cqr;
4951                 memset(cqr, 0, sizeof(*cqr));
4952                 memset(&dasd_reserve_req->ccw, 0,
4953                        sizeof(dasd_reserve_req->ccw));
4954                 cqr->cpaddr = &dasd_reserve_req->ccw;
4955                 cqr->data = &dasd_reserve_req->data;
4956                 cqr->magic = DASD_ECKD_MAGIC;
4957         }
4958         ccw = cqr->cpaddr;
4959         ccw->cmd_code = DASD_ECKD_CCW_SNID;
4960         ccw->flags |= CCW_FLAG_SLI;
4961         ccw->count = 12;
4962         ccw->cda = (__u32)(addr_t) cqr->data;
4963         cqr->startdev = device;
4964         cqr->memdev = device;
4965         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4966         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4967         set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
4968         cqr->retries = 5;
4969         cqr->expires = 10 * HZ;
4970         cqr->buildclk = get_tod_clock();
4971         cqr->status = DASD_CQR_FILLED;
4972         cqr->lpm = usrparm.path_mask;
4973
4974         rc = dasd_sleep_on_immediatly(cqr);
4975         /* verify that I/O processing didn't modify the path mask */
4976         if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
4977                 rc = -EIO;
4978         if (!rc) {
4979                 usrparm.data = *((struct dasd_snid_data *)cqr->data);
4980                 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
4981                         rc = -EFAULT;
4982         }
4983
4984         if (useglobal)
4985                 mutex_unlock(&dasd_reserve_mutex);
4986         else
4987                 dasd_sfree_request(cqr, cqr->memdev);
4988         return rc;
4989 }
4990
4991 /*
4992  * Read performance statistics
4993  */
4994 static int
4995 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
4996 {
4997         struct dasd_psf_prssd_data *prssdp;
4998         struct dasd_rssd_perf_stats_t *stats;
4999         struct dasd_ccw_req *cqr;
5000         struct ccw1 *ccw;
5001         int rc;
5002
5003         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
5004                                    (sizeof(struct dasd_psf_prssd_data) +
5005                                     sizeof(struct dasd_rssd_perf_stats_t)),
5006                                    device, NULL);
5007         if (IS_ERR(cqr)) {
5008                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5009                             "Could not allocate initialization request");
5010                 return PTR_ERR(cqr);
5011         }
5012         cqr->startdev = device;
5013         cqr->memdev = device;
5014         cqr->retries = 0;
5015         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5016         cqr->expires = 10 * HZ;
5017
5018         /* Prepare for Read Subsystem Data */
5019         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5020         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5021         prssdp->order = PSF_ORDER_PRSSD;
5022         prssdp->suborder = 0x01;        /* Performance Statistics */
5023         prssdp->varies[1] = 0x01;       /* Perf Statistics for the Subsystem */
5024
5025         ccw = cqr->cpaddr;
5026         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5027         ccw->count = sizeof(struct dasd_psf_prssd_data);
5028         ccw->flags |= CCW_FLAG_CC;
5029         ccw->cda = (__u32)(addr_t) prssdp;
5030
5031         /* Read Subsystem Data - Performance Statistics */
5032         stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5033         memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5034
5035         ccw++;
5036         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5037         ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5038         ccw->cda = (__u32)(addr_t) stats;
5039
5040         cqr->buildclk = get_tod_clock();
5041         cqr->status = DASD_CQR_FILLED;
5042         rc = dasd_sleep_on(cqr);
5043         if (rc == 0) {
5044                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5045                 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5046                 if (copy_to_user(argp, stats,
5047                                  sizeof(struct dasd_rssd_perf_stats_t)))
5048                         rc = -EFAULT;
5049         }
5050         dasd_sfree_request(cqr, cqr->memdev);
5051         return rc;
5052 }
5053
5054 /*
5055  * Get attributes (cache operations)
5056  * Returnes the cache attributes used in Define Extend (DE).
5057  */
5058 static int
5059 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5060 {
5061         struct dasd_eckd_private *private = device->private;
5062         struct attrib_data_t attrib = private->attrib;
5063         int rc;
5064
5065         if (!capable(CAP_SYS_ADMIN))
5066                 return -EACCES;
5067         if (!argp)
5068                 return -EINVAL;
5069
5070         rc = 0;
5071         if (copy_to_user(argp, (long *) &attrib,
5072                          sizeof(struct attrib_data_t)))
5073                 rc = -EFAULT;
5074
5075         return rc;
5076 }
5077
5078 /*
5079  * Set attributes (cache operations)
5080  * Stores the attributes for cache operation to be used in Define Extend (DE).
5081  */
5082 static int
5083 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5084 {
5085         struct dasd_eckd_private *private = device->private;
5086         struct attrib_data_t attrib;
5087
5088         if (!capable(CAP_SYS_ADMIN))
5089                 return -EACCES;
5090         if (!argp)
5091                 return -EINVAL;
5092
5093         if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5094                 return -EFAULT;
5095         private->attrib = attrib;
5096
5097         dev_info(&device->cdev->dev,
5098                  "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5099                  private->attrib.operation, private->attrib.nr_cyl);
5100         return 0;
5101 }
5102
5103 /*
5104  * Issue syscall I/O to EMC Symmetrix array.
5105  * CCWs are PSF and RSSD
5106  */
5107 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5108 {
5109         struct dasd_symmio_parms usrparm;
5110         char *psf_data, *rssd_result;
5111         struct dasd_ccw_req *cqr;
5112         struct ccw1 *ccw;
5113         char psf0, psf1;
5114         int rc;
5115
5116         if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5117                 return -EACCES;
5118         psf0 = psf1 = 0;
5119
5120         /* Copy parms from caller */
5121         rc = -EFAULT;
5122         if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5123                 goto out;
5124         if (is_compat_task()) {
5125                 /* Make sure pointers are sane even on 31 bit. */
5126                 rc = -EINVAL;
5127                 if ((usrparm.psf_data >> 32) != 0)
5128                         goto out;
5129                 if ((usrparm.rssd_result >> 32) != 0)
5130                         goto out;
5131                 usrparm.psf_data &= 0x7fffffffULL;
5132                 usrparm.rssd_result &= 0x7fffffffULL;
5133         }
5134         /* at least 2 bytes are accessed and should be allocated */
5135         if (usrparm.psf_data_len < 2) {
5136                 DBF_DEV_EVENT(DBF_WARNING, device,
5137                               "Symmetrix ioctl invalid data length %d",
5138                               usrparm.psf_data_len);
5139                 rc = -EINVAL;
5140                 goto out;
5141         }
5142         /* alloc I/O data area */
5143         psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5144         rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5145         if (!psf_data || !rssd_result) {
5146                 rc = -ENOMEM;
5147                 goto out_free;
5148         }
5149
5150         /* get syscall header from user space */
5151         rc = -EFAULT;
5152         if (copy_from_user(psf_data,
5153                            (void __user *)(unsigned long) usrparm.psf_data,
5154                            usrparm.psf_data_len))
5155                 goto out_free;
5156         psf0 = psf_data[0];
5157         psf1 = psf_data[1];
5158
5159         /* setup CCWs for PSF + RSSD */
5160         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5161         if (IS_ERR(cqr)) {
5162                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5163                         "Could not allocate initialization request");
5164                 rc = PTR_ERR(cqr);
5165                 goto out_free;
5166         }
5167
5168         cqr->startdev = device;
5169         cqr->memdev = device;
5170         cqr->retries = 3;
5171         cqr->expires = 10 * HZ;
5172         cqr->buildclk = get_tod_clock();
5173         cqr->status = DASD_CQR_FILLED;
5174
5175         /* Build the ccws */
5176         ccw = cqr->cpaddr;
5177
5178         /* PSF ccw */
5179         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5180         ccw->count = usrparm.psf_data_len;
5181         ccw->flags |= CCW_FLAG_CC;
5182         ccw->cda = (__u32)(addr_t) psf_data;
5183
5184         ccw++;
5185
5186         /* RSSD ccw  */
5187         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5188         ccw->count = usrparm.rssd_result_len;
5189         ccw->flags = CCW_FLAG_SLI ;
5190         ccw->cda = (__u32)(addr_t) rssd_result;
5191
5192         rc = dasd_sleep_on(cqr);
5193         if (rc)
5194                 goto out_sfree;
5195
5196         rc = -EFAULT;
5197         if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5198                            rssd_result, usrparm.rssd_result_len))
5199                 goto out_sfree;
5200         rc = 0;
5201
5202 out_sfree:
5203         dasd_sfree_request(cqr, cqr->memdev);
5204 out_free:
5205         kfree(rssd_result);
5206         kfree(psf_data);
5207 out:
5208         DBF_DEV_EVENT(DBF_WARNING, device,
5209                       "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5210                       (int) psf0, (int) psf1, rc);
5211         return rc;
5212 }
5213
5214 static int
5215 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5216 {
5217         struct dasd_device *device = block->base;
5218
5219         switch (cmd) {
5220         case BIODASDGATTR:
5221                 return dasd_eckd_get_attrib(device, argp);
5222         case BIODASDSATTR:
5223                 return dasd_eckd_set_attrib(device, argp);
5224         case BIODASDPSRD:
5225                 return dasd_eckd_performance(device, argp);
5226         case BIODASDRLSE:
5227                 return dasd_eckd_release(device);
5228         case BIODASDRSRV:
5229                 return dasd_eckd_reserve(device);
5230         case BIODASDSLCK:
5231                 return dasd_eckd_steal_lock(device);
5232         case BIODASDSNID:
5233                 return dasd_eckd_snid(device, argp);
5234         case BIODASDSYMMIO:
5235                 return dasd_symm_io(device, argp);
5236         default:
5237                 return -ENOTTY;
5238         }
5239 }
5240
5241 /*
5242  * Dump the range of CCWs into 'page' buffer
5243  * and return number of printed chars.
5244  */
5245 static int
5246 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5247 {
5248         int len, count;
5249         char *datap;
5250
5251         len = 0;
5252         while (from <= to) {
5253                 len += sprintf(page + len, PRINTK_HEADER
5254                                " CCW %p: %08X %08X DAT:",
5255                                from, ((int *) from)[0], ((int *) from)[1]);
5256
5257                 /* get pointer to data (consider IDALs) */
5258                 if (from->flags & CCW_FLAG_IDA)
5259                         datap = (char *) *((addr_t *) (addr_t) from->cda);
5260                 else
5261                         datap = (char *) ((addr_t) from->cda);
5262
5263                 /* dump data (max 32 bytes) */
5264                 for (count = 0; count < from->count && count < 32; count++) {
5265                         if (count % 8 == 0) len += sprintf(page + len, " ");
5266                         if (count % 4 == 0) len += sprintf(page + len, " ");
5267                         len += sprintf(page + len, "%02x", datap[count]);
5268                 }
5269                 len += sprintf(page + len, "\n");
5270                 from++;
5271         }
5272         return len;
5273 }
5274
5275 static void
5276 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5277                          char *reason)
5278 {
5279         u64 *sense;
5280         u64 *stat;
5281
5282         sense = (u64 *) dasd_get_sense(irb);
5283         stat = (u64 *) &irb->scsw;
5284         if (sense) {
5285                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5286                               "%016llx %016llx %016llx %016llx",
5287                               reason, *stat, *((u32 *) (stat + 1)),
5288                               sense[0], sense[1], sense[2], sense[3]);
5289         } else {
5290                 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5291                               reason, *stat, *((u32 *) (stat + 1)),
5292                               "NO VALID SENSE");
5293         }
5294 }
5295
5296 /*
5297  * Print sense data and related channel program.
5298  * Parts are printed because printk buffer is only 1024 bytes.
5299  */
5300 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5301                                  struct dasd_ccw_req *req, struct irb *irb)
5302 {
5303         char *page;
5304         struct ccw1 *first, *last, *fail, *from, *to;
5305         int len, sl, sct;
5306
5307         page = (char *) get_zeroed_page(GFP_ATOMIC);
5308         if (page == NULL) {
5309                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5310                               "No memory to dump sense data\n");
5311                 return;
5312         }
5313         /* dump the sense data */
5314         len = sprintf(page, PRINTK_HEADER
5315                       " I/O status report for device %s:\n",
5316                       dev_name(&device->cdev->dev));
5317         len += sprintf(page + len, PRINTK_HEADER
5318                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5319                        "CS:%02X RC:%d\n",
5320                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5321                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5322                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5323                        req ? req->intrc : 0);
5324         len += sprintf(page + len, PRINTK_HEADER
5325                        " device %s: Failing CCW: %p\n",
5326                        dev_name(&device->cdev->dev),
5327                        (void *) (addr_t) irb->scsw.cmd.cpa);
5328         if (irb->esw.esw0.erw.cons) {
5329                 for (sl = 0; sl < 4; sl++) {
5330                         len += sprintf(page + len, PRINTK_HEADER
5331                                        " Sense(hex) %2d-%2d:",
5332                                        (8 * sl), ((8 * sl) + 7));
5333
5334                         for (sct = 0; sct < 8; sct++) {
5335                                 len += sprintf(page + len, " %02x",
5336                                                irb->ecw[8 * sl + sct]);
5337                         }
5338                         len += sprintf(page + len, "\n");
5339                 }
5340
5341                 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5342                         /* 24 Byte Sense Data */
5343                         sprintf(page + len, PRINTK_HEADER
5344                                 " 24 Byte: %x MSG %x, "
5345                                 "%s MSGb to SYSOP\n",
5346                                 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5347                                 irb->ecw[1] & 0x10 ? "" : "no");
5348                 } else {
5349                         /* 32 Byte Sense Data */
5350                         sprintf(page + len, PRINTK_HEADER
5351                                 " 32 Byte: Format: %x "
5352                                 "Exception class %x\n",
5353                                 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5354                 }
5355         } else {
5356                 sprintf(page + len, PRINTK_HEADER
5357                         " SORRY - NO VALID SENSE AVAILABLE\n");
5358         }
5359         printk(KERN_ERR "%s", page);
5360
5361         if (req) {
5362                 /* req == NULL for unsolicited interrupts */
5363                 /* dump the Channel Program (max 140 Bytes per line) */
5364                 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5365                 first = req->cpaddr;
5366                 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5367                 to = min(first + 6, last);
5368                 len = sprintf(page, PRINTK_HEADER
5369                               " Related CP in req: %p\n", req);
5370                 dasd_eckd_dump_ccw_range(first, to, page + len);
5371                 printk(KERN_ERR "%s", page);
5372
5373                 /* print failing CCW area (maximum 4) */
5374                 /* scsw->cda is either valid or zero  */
5375                 len = 0;
5376                 from = ++to;
5377                 fail = (struct ccw1 *)(addr_t)
5378                                 irb->scsw.cmd.cpa; /* failing CCW */
5379                 if (from <  fail - 2) {
5380                         from = fail - 2;     /* there is a gap - print header */
5381                         len += sprintf(page, PRINTK_HEADER "......\n");
5382                 }
5383                 to = min(fail + 1, last);
5384                 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5385
5386                 /* print last CCWs (maximum 2) */
5387                 from = max(from, ++to);
5388                 if (from < last - 1) {
5389                         from = last - 1;     /* there is a gap - print header */
5390                         len += sprintf(page + len, PRINTK_HEADER "......\n");
5391                 }
5392                 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5393                 if (len > 0)
5394                         printk(KERN_ERR "%s", page);
5395         }
5396         free_page((unsigned long) page);
5397 }
5398
5399
5400 /*
5401  * Print sense data from a tcw.
5402  */
5403 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5404                                  struct dasd_ccw_req *req, struct irb *irb)
5405 {
5406         char *page;
5407         int len, sl, sct, residual;
5408         struct tsb *tsb;
5409         u8 *sense, *rcq;
5410
5411         page = (char *) get_zeroed_page(GFP_ATOMIC);
5412         if (page == NULL) {
5413                 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5414                             "No memory to dump sense data");
5415                 return;
5416         }
5417         /* dump the sense data */
5418         len = sprintf(page, PRINTK_HEADER
5419                       " I/O status report for device %s:\n",
5420                       dev_name(&device->cdev->dev));
5421         len += sprintf(page + len, PRINTK_HEADER
5422                        " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5423                        "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5424                        req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5425                        scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5426                        scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5427                        irb->scsw.tm.fcxs,
5428                        (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5429                        req ? req->intrc : 0);
5430         len += sprintf(page + len, PRINTK_HEADER
5431                        " device %s: Failing TCW: %p\n",
5432                        dev_name(&device->cdev->dev),
5433                        (void *) (addr_t) irb->scsw.tm.tcw);
5434
5435         tsb = NULL;
5436         sense = NULL;
5437         if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5438                 tsb = tcw_get_tsb(
5439                         (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5440
5441         if (tsb) {
5442                 len += sprintf(page + len, PRINTK_HEADER
5443                                " tsb->length %d\n", tsb->length);
5444                 len += sprintf(page + len, PRINTK_HEADER
5445                                " tsb->flags %x\n", tsb->flags);
5446                 len += sprintf(page + len, PRINTK_HEADER
5447                                " tsb->dcw_offset %d\n", tsb->dcw_offset);
5448                 len += sprintf(page + len, PRINTK_HEADER
5449                                " tsb->count %d\n", tsb->count);
5450                 residual = tsb->count - 28;
5451                 len += sprintf(page + len, PRINTK_HEADER
5452                                " residual %d\n", residual);
5453
5454                 switch (tsb->flags & 0x07) {
5455                 case 1: /* tsa_iostat */
5456                         len += sprintf(page + len, PRINTK_HEADER
5457                                " tsb->tsa.iostat.dev_time %d\n",
5458                                        tsb->tsa.iostat.dev_time);
5459                         len += sprintf(page + len, PRINTK_HEADER
5460                                " tsb->tsa.iostat.def_time %d\n",
5461                                        tsb->tsa.iostat.def_time);
5462                         len += sprintf(page + len, PRINTK_HEADER
5463                                " tsb->tsa.iostat.queue_time %d\n",
5464                                        tsb->tsa.iostat.queue_time);
5465                         len += sprintf(page + len, PRINTK_HEADER
5466                                " tsb->tsa.iostat.dev_busy_time %d\n",
5467                                        tsb->tsa.iostat.dev_busy_time);
5468                         len += sprintf(page + len, PRINTK_HEADER
5469                                " tsb->tsa.iostat.dev_act_time %d\n",
5470                                        tsb->tsa.iostat.dev_act_time);
5471                         sense = tsb->tsa.iostat.sense;
5472                         break;
5473                 case 2: /* ts_ddpc */
5474                         len += sprintf(page + len, PRINTK_HEADER
5475                                " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5476                         for (sl = 0; sl < 2; sl++) {
5477                                 len += sprintf(page + len, PRINTK_HEADER
5478                                                " tsb->tsa.ddpc.rcq %2d-%2d: ",
5479                                                (8 * sl), ((8 * sl) + 7));
5480                                 rcq = tsb->tsa.ddpc.rcq;
5481                                 for (sct = 0; sct < 8; sct++) {
5482                                         len += sprintf(page + len, " %02x",
5483                                                        rcq[8 * sl + sct]);
5484                                 }
5485                                 len += sprintf(page + len, "\n");
5486                         }
5487                         sense = tsb->tsa.ddpc.sense;
5488                         break;
5489                 case 3: /* tsa_intrg */
5490                         len += sprintf(page + len, PRINTK_HEADER
5491                                       " tsb->tsa.intrg.: not supported yet\n");
5492                         break;
5493                 }
5494
5495                 if (sense) {
5496                         for (sl = 0; sl < 4; sl++) {
5497                                 len += sprintf(page + len, PRINTK_HEADER
5498                                                " Sense(hex) %2d-%2d:",
5499                                                (8 * sl), ((8 * sl) + 7));
5500                                 for (sct = 0; sct < 8; sct++) {
5501                                         len += sprintf(page + len, " %02x",
5502                                                        sense[8 * sl + sct]);
5503                                 }
5504                                 len += sprintf(page + len, "\n");
5505                         }
5506
5507                         if (sense[27] & DASD_SENSE_BIT_0) {
5508                                 /* 24 Byte Sense Data */
5509                                 sprintf(page + len, PRINTK_HEADER
5510                                         " 24 Byte: %x MSG %x, "
5511                                         "%s MSGb to SYSOP\n",
5512                                         sense[7] >> 4, sense[7] & 0x0f,
5513                                         sense[1] & 0x10 ? "" : "no");
5514                         } else {
5515                                 /* 32 Byte Sense Data */
5516                                 sprintf(page + len, PRINTK_HEADER
5517                                         " 32 Byte: Format: %x "
5518                                         "Exception class %x\n",
5519                                         sense[6] & 0x0f, sense[22] >> 4);
5520                         }
5521                 } else {
5522                         sprintf(page + len, PRINTK_HEADER
5523                                 " SORRY - NO VALID SENSE AVAILABLE\n");
5524                 }
5525         } else {
5526                 sprintf(page + len, PRINTK_HEADER
5527                         " SORRY - NO TSB DATA AVAILABLE\n");
5528         }
5529         printk(KERN_ERR "%s", page);
5530         free_page((unsigned long) page);
5531 }
5532
5533 static void dasd_eckd_dump_sense(struct dasd_device *device,
5534                                  struct dasd_ccw_req *req, struct irb *irb)
5535 {
5536         u8 *sense = dasd_get_sense(irb);
5537
5538         if (scsw_is_tm(&irb->scsw)) {
5539                 /*
5540                  * In some cases the 'File Protected' or 'Incorrect Length'
5541                  * error might be expected and log messages shouldn't be written
5542                  * then. Check if the according suppress bit is set.
5543                  */
5544                 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5545                     test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5546                         return;
5547                 if (scsw_cstat(&irb->scsw) == 0x40 &&
5548                     test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5549                         return;
5550
5551                 dasd_eckd_dump_sense_tcw(device, req, irb);
5552         } else {
5553                 /*
5554                  * In some cases the 'Command Reject' or 'No Record Found'
5555                  * error might be expected and log messages shouldn't be
5556                  * written then. Check if the according suppress bit is set.
5557                  */
5558                 if (sense && sense[0] & SNS0_CMD_REJECT &&
5559                     test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5560                         return;
5561
5562                 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5563                     test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5564                         return;
5565
5566                 dasd_eckd_dump_sense_ccw(device, req, irb);
5567         }
5568 }
5569
5570 static int dasd_eckd_pm_freeze(struct dasd_device *device)
5571 {
5572         /*
5573          * the device should be disconnected from our LCU structure
5574          * on restore we will reconnect it and reread LCU specific
5575          * information like PAV support that might have changed
5576          */
5577         dasd_alias_remove_device(device);
5578         dasd_alias_disconnect_device_from_lcu(device);
5579
5580         return 0;
5581 }
5582
5583 static int dasd_eckd_restore_device(struct dasd_device *device)
5584 {
5585         struct dasd_eckd_private *private = device->private;
5586         struct dasd_eckd_characteristics temp_rdc_data;
5587         int rc;
5588         struct dasd_uid temp_uid;
5589         unsigned long flags;
5590         unsigned long cqr_flags = 0;
5591
5592         /* Read Configuration Data */
5593         rc = dasd_eckd_read_conf(device);
5594         if (rc) {
5595                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5596                                 "Read configuration data failed, rc=%d", rc);
5597                 goto out_err;
5598         }
5599
5600         dasd_eckd_get_uid(device, &temp_uid);
5601         /* Generate device unique id */
5602         rc = dasd_eckd_generate_uid(device);
5603         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5604         if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
5605                 dev_err(&device->cdev->dev, "The UID of the DASD has "
5606                         "changed\n");
5607         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5608         if (rc)
5609                 goto out_err;
5610
5611         /* register lcu with alias handling, enable PAV if this is a new lcu */
5612         rc = dasd_alias_make_device_known_to_lcu(device);
5613         if (rc)
5614                 goto out_err;
5615
5616         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
5617         dasd_eckd_validate_server(device, cqr_flags);
5618
5619         /* RE-Read Configuration Data */
5620         rc = dasd_eckd_read_conf(device);
5621         if (rc) {
5622                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5623                         "Read configuration data failed, rc=%d", rc);
5624                 goto out_err2;
5625         }
5626
5627         /* Read Feature Codes */
5628         dasd_eckd_read_features(device);
5629
5630         /* Read Volume Information */
5631         dasd_eckd_read_vol_info(device);
5632
5633         /* Read Extent Pool Information */
5634         dasd_eckd_read_ext_pool_info(device);
5635
5636         /* Read Device Characteristics */
5637         rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
5638                                          &temp_rdc_data, 64);
5639         if (rc) {
5640                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5641                                 "Read device characteristic failed, rc=%d", rc);
5642                 goto out_err2;
5643         }
5644         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5645         memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
5646         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5647
5648         /* add device to alias management */
5649         dasd_alias_add_device(device);
5650
5651         return 0;
5652
5653 out_err2:
5654         dasd_alias_disconnect_device_from_lcu(device);
5655 out_err:
5656         return -1;
5657 }
5658
5659 static int dasd_eckd_reload_device(struct dasd_device *device)
5660 {
5661         struct dasd_eckd_private *private = device->private;
5662         int rc, old_base;
5663         char print_uid[60];
5664         struct dasd_uid uid;
5665         unsigned long flags;
5666
5667         /*
5668          * remove device from alias handling to prevent new requests
5669          * from being scheduled on the wrong alias device
5670          */
5671         dasd_alias_remove_device(device);
5672
5673         spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5674         old_base = private->uid.base_unit_addr;
5675         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5676
5677         /* Read Configuration Data */
5678         rc = dasd_eckd_read_conf(device);
5679         if (rc)
5680                 goto out_err;
5681
5682         rc = dasd_eckd_generate_uid(device);
5683         if (rc)
5684                 goto out_err;
5685         /*
5686          * update unit address configuration and
5687          * add device to alias management
5688          */
5689         dasd_alias_update_add_device(device);
5690
5691         dasd_eckd_get_uid(device, &uid);
5692
5693         if (old_base != uid.base_unit_addr) {
5694                 if (strlen(uid.vduit) > 0)
5695                         snprintf(print_uid, sizeof(print_uid),
5696                                  "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5697                                  uid.ssid, uid.base_unit_addr, uid.vduit);
5698                 else
5699                         snprintf(print_uid, sizeof(print_uid),
5700                                  "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5701                                  uid.ssid, uid.base_unit_addr);
5702
5703                 dev_info(&device->cdev->dev,
5704                          "An Alias device was reassigned to a new base device "
5705                          "with UID: %s\n", print_uid);
5706         }
5707         return 0;
5708
5709 out_err:
5710         return -1;
5711 }
5712
5713 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5714                                          struct dasd_rssd_messages *messages,
5715                                          __u8 lpum)
5716 {
5717         struct dasd_rssd_messages *message_buf;
5718         struct dasd_psf_prssd_data *prssdp;
5719         struct dasd_ccw_req *cqr;
5720         struct ccw1 *ccw;
5721         int rc;
5722
5723         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5724                                    (sizeof(struct dasd_psf_prssd_data) +
5725                                     sizeof(struct dasd_rssd_messages)),
5726                                    device, NULL);
5727         if (IS_ERR(cqr)) {
5728                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5729                                 "Could not allocate read message buffer request");
5730                 return PTR_ERR(cqr);
5731         }
5732
5733         cqr->lpm = lpum;
5734 retry:
5735         cqr->startdev = device;
5736         cqr->memdev = device;
5737         cqr->block = NULL;
5738         cqr->expires = 10 * HZ;
5739         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5740         /* dasd_sleep_on_immediatly does not do complex error
5741          * recovery so clear erp flag and set retry counter to
5742          * do basic erp */
5743         clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5744         cqr->retries = 256;
5745
5746         /* Prepare for Read Subsystem Data */
5747         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5748         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5749         prssdp->order = PSF_ORDER_PRSSD;
5750         prssdp->suborder = 0x03;        /* Message Buffer */
5751         /* all other bytes of prssdp must be zero */
5752
5753         ccw = cqr->cpaddr;
5754         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5755         ccw->count = sizeof(struct dasd_psf_prssd_data);
5756         ccw->flags |= CCW_FLAG_CC;
5757         ccw->flags |= CCW_FLAG_SLI;
5758         ccw->cda = (__u32)(addr_t) prssdp;
5759
5760         /* Read Subsystem Data - message buffer */
5761         message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5762         memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5763
5764         ccw++;
5765         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5766         ccw->count = sizeof(struct dasd_rssd_messages);
5767         ccw->flags |= CCW_FLAG_SLI;
5768         ccw->cda = (__u32)(addr_t) message_buf;
5769
5770         cqr->buildclk = get_tod_clock();
5771         cqr->status = DASD_CQR_FILLED;
5772         rc = dasd_sleep_on_immediatly(cqr);
5773         if (rc == 0) {
5774                 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5775                 message_buf = (struct dasd_rssd_messages *)
5776                         (prssdp + 1);
5777                 memcpy(messages, message_buf,
5778                        sizeof(struct dasd_rssd_messages));
5779         } else if (cqr->lpm) {
5780                 /*
5781                  * on z/VM we might not be able to do I/O on the requested path
5782                  * but instead we get the required information on any path
5783                  * so retry with open path mask
5784                  */
5785                 cqr->lpm = 0;
5786                 goto retry;
5787         } else
5788                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5789                                 "Reading messages failed with rc=%d\n"
5790                                 , rc);
5791         dasd_sfree_request(cqr, cqr->memdev);
5792         return rc;
5793 }
5794
5795 static int dasd_eckd_query_host_access(struct dasd_device *device,
5796                                        struct dasd_psf_query_host_access *data)
5797 {
5798         struct dasd_eckd_private *private = device->private;
5799         struct dasd_psf_query_host_access *host_access;
5800         struct dasd_psf_prssd_data *prssdp;
5801         struct dasd_ccw_req *cqr;
5802         struct ccw1 *ccw;
5803         int rc;
5804
5805         /* not available for HYPER PAV alias devices */
5806         if (!device->block && private->lcu->pav == HYPER_PAV)
5807                 return -EOPNOTSUPP;
5808
5809         /* may not be supported by the storage server */
5810         if (!(private->features.feature[14] & 0x80))
5811                 return -EOPNOTSUPP;
5812
5813         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5814                                    sizeof(struct dasd_psf_prssd_data) + 1,
5815                                    device, NULL);
5816         if (IS_ERR(cqr)) {
5817                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5818                                 "Could not allocate read message buffer request");
5819                 return PTR_ERR(cqr);
5820         }
5821         host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5822         if (!host_access) {
5823                 dasd_sfree_request(cqr, device);
5824                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5825                                 "Could not allocate host_access buffer");
5826                 return -ENOMEM;
5827         }
5828         cqr->startdev = device;
5829         cqr->memdev = device;
5830         cqr->block = NULL;
5831         cqr->retries = 256;
5832         cqr->expires = 10 * HZ;
5833
5834         /* Prepare for Read Subsystem Data */
5835         prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5836         memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5837         prssdp->order = PSF_ORDER_PRSSD;
5838         prssdp->suborder = PSF_SUBORDER_QHA;    /* query host access */
5839         /* LSS and Volume that will be queried */
5840         prssdp->lss = private->ned->ID;
5841         prssdp->volume = private->ned->unit_addr;
5842         /* all other bytes of prssdp must be zero */
5843
5844         ccw = cqr->cpaddr;
5845         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5846         ccw->count = sizeof(struct dasd_psf_prssd_data);
5847         ccw->flags |= CCW_FLAG_CC;
5848         ccw->flags |= CCW_FLAG_SLI;
5849         ccw->cda = (__u32)(addr_t) prssdp;
5850
5851         /* Read Subsystem Data - query host access */
5852         ccw++;
5853         ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5854         ccw->count = sizeof(struct dasd_psf_query_host_access);
5855         ccw->flags |= CCW_FLAG_SLI;
5856         ccw->cda = (__u32)(addr_t) host_access;
5857
5858         cqr->buildclk = get_tod_clock();
5859         cqr->status = DASD_CQR_FILLED;
5860         /* the command might not be supported, suppress error message */
5861         __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5862         rc = dasd_sleep_on_interruptible(cqr);
5863         if (rc == 0) {
5864                 *data = *host_access;
5865         } else {
5866                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5867                                 "Reading host access data failed with rc=%d\n",
5868                                 rc);
5869                 rc = -EOPNOTSUPP;
5870         }
5871
5872         dasd_sfree_request(cqr, cqr->memdev);
5873         kfree(host_access);
5874         return rc;
5875 }
5876 /*
5877  * return number of grouped devices
5878  */
5879 static int dasd_eckd_host_access_count(struct dasd_device *device)
5880 {
5881         struct dasd_psf_query_host_access *access;
5882         struct dasd_ckd_path_group_entry *entry;
5883         struct dasd_ckd_host_information *info;
5884         int count = 0;
5885         int rc, i;
5886
5887         access = kzalloc(sizeof(*access), GFP_NOIO);
5888         if (!access) {
5889                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5890                                 "Could not allocate access buffer");
5891                 return -ENOMEM;
5892         }
5893         rc = dasd_eckd_query_host_access(device, access);
5894         if (rc) {
5895                 kfree(access);
5896                 return rc;
5897         }
5898
5899         info = (struct dasd_ckd_host_information *)
5900                 access->host_access_information;
5901         for (i = 0; i < info->entry_count; i++) {
5902                 entry = (struct dasd_ckd_path_group_entry *)
5903                         (info->entry + i * info->entry_size);
5904                 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
5905                         count++;
5906         }
5907
5908         kfree(access);
5909         return count;
5910 }
5911
5912 /*
5913  * write host access information to a sequential file
5914  */
5915 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5916 {
5917         struct dasd_psf_query_host_access *access;
5918         struct dasd_ckd_path_group_entry *entry;
5919         struct dasd_ckd_host_information *info;
5920         char sysplex[9] = "";
5921         int rc, i;
5922
5923         access = kzalloc(sizeof(*access), GFP_NOIO);
5924         if (!access) {
5925                 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5926                                 "Could not allocate access buffer");
5927                 return -ENOMEM;
5928         }
5929         rc = dasd_eckd_query_host_access(device, access);
5930         if (rc) {
5931                 kfree(access);
5932                 return rc;
5933         }
5934
5935         info = (struct dasd_ckd_host_information *)
5936                 access->host_access_information;
5937         for (i = 0; i < info->entry_count; i++) {
5938                 entry = (struct dasd_ckd_path_group_entry *)
5939                         (info->entry + i * info->entry_size);
5940                 /* PGID */
5941                 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
5942                 /* FLAGS */
5943                 seq_printf(m, "status_flags %02x\n", entry->status_flags);
5944                 /* SYSPLEX NAME */
5945                 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
5946                 EBCASC(sysplex, sizeof(sysplex));
5947                 seq_printf(m, "sysplex_name %8s\n", sysplex);
5948                 /* SUPPORTED CYLINDER */
5949                 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
5950                 /* TIMESTAMP */
5951                 seq_printf(m, "timestamp %lu\n", (unsigned long)
5952                            entry->timestamp);
5953         }
5954         kfree(access);
5955
5956         return 0;
5957 }
5958
5959 /*
5960  * Perform Subsystem Function - CUIR response
5961  */
5962 static int
5963 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5964                             __u32 message_id, __u8 lpum)
5965 {
5966         struct dasd_psf_cuir_response *psf_cuir;
5967         int pos = pathmask_to_pos(lpum);
5968         struct dasd_ccw_req *cqr;
5969         struct ccw1 *ccw;
5970         int rc;
5971
5972         cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
5973                                    sizeof(struct dasd_psf_cuir_response),
5974                                    device, NULL);
5975
5976         if (IS_ERR(cqr)) {
5977                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5978                            "Could not allocate PSF-CUIR request");
5979                 return PTR_ERR(cqr);
5980         }
5981
5982         psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5983         psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5984         psf_cuir->cc = response;
5985         psf_cuir->chpid = device->path[pos].chpid;
5986         psf_cuir->message_id = message_id;
5987         psf_cuir->cssid = device->path[pos].cssid;
5988         psf_cuir->ssid = device->path[pos].ssid;
5989         ccw = cqr->cpaddr;
5990         ccw->cmd_code = DASD_ECKD_CCW_PSF;
5991         ccw->cda = (__u32)(addr_t)psf_cuir;
5992         ccw->flags = CCW_FLAG_SLI;
5993         ccw->count = sizeof(struct dasd_psf_cuir_response);
5994
5995         cqr->startdev = device;
5996         cqr->memdev = device;
5997         cqr->block = NULL;
5998         cqr->retries = 256;
5999         cqr->expires = 10*HZ;
6000         cqr->buildclk = get_tod_clock();
6001         cqr->status = DASD_CQR_FILLED;
6002         set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6003
6004         rc = dasd_sleep_on(cqr);
6005
6006         dasd_sfree_request(cqr, cqr->memdev);
6007         return rc;
6008 }
6009
6010 /*
6011  * return configuration data that is referenced by record selector
6012  * if a record selector is specified or per default return the
6013  * conf_data pointer for the path specified by lpum
6014  */
6015 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6016                                                      __u8 lpum,
6017                                                      struct dasd_cuir_message *cuir)
6018 {
6019         struct dasd_conf_data *conf_data;
6020         int path, pos;
6021
6022         if (cuir->record_selector == 0)
6023                 goto out;
6024         for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6025                 conf_data = device->path[pos].conf_data;
6026                 if (conf_data->gneq.record_selector ==
6027                     cuir->record_selector)
6028                         return conf_data;
6029         }
6030 out:
6031         return device->path[pathmask_to_pos(lpum)].conf_data;
6032 }
6033
6034 /*
6035  * This function determines the scope of a reconfiguration request by
6036  * analysing the path and device selection data provided in the CUIR request.
6037  * Returns a path mask containing CUIR affected paths for the give device.
6038  *
6039  * If the CUIR request does not contain the required information return the
6040  * path mask of the path the attention message for the CUIR request was reveived
6041  * on.
6042  */
6043 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6044                                 struct dasd_cuir_message *cuir)
6045 {
6046         struct dasd_conf_data *ref_conf_data;
6047         unsigned long bitmask = 0, mask = 0;
6048         struct dasd_conf_data *conf_data;
6049         unsigned int pos, path;
6050         char *ref_gneq, *gneq;
6051         char *ref_ned, *ned;
6052         int tbcpm = 0;
6053
6054         /* if CUIR request does not specify the scope use the path
6055            the attention message was presented on */
6056         if (!cuir->ned_map ||
6057             !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6058                 return lpum;
6059
6060         /* get reference conf data */
6061         ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6062         /* reference ned is determined by ned_map field */
6063         pos = 8 - ffs(cuir->ned_map);
6064         ref_ned = (char *)&ref_conf_data->neds[pos];
6065         ref_gneq = (char *)&ref_conf_data->gneq;
6066         /* transfer 24 bit neq_map to mask */
6067         mask = cuir->neq_map[2];
6068         mask |= cuir->neq_map[1] << 8;
6069         mask |= cuir->neq_map[0] << 16;
6070
6071         for (path = 0; path < 8; path++) {
6072                 /* initialise data per path */
6073                 bitmask = mask;
6074                 conf_data = device->path[path].conf_data;
6075                 pos = 8 - ffs(cuir->ned_map);
6076                 ned = (char *) &conf_data->neds[pos];
6077                 /* compare reference ned and per path ned */
6078                 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6079                         continue;
6080                 gneq = (char *)&conf_data->gneq;
6081                 /* compare reference gneq and per_path gneq under
6082                    24 bit mask where mask bit 0 equals byte 7 of
6083                    the gneq and mask bit 24 equals byte 31 */
6084                 while (bitmask) {
6085                         pos = ffs(bitmask) - 1;
6086                         if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6087                             != 0)
6088                                 break;
6089                         clear_bit(pos, &bitmask);
6090                 }
6091                 if (bitmask)
6092                         continue;
6093                 /* device and path match the reference values
6094                    add path to CUIR scope */
6095                 tbcpm |= 0x80 >> path;
6096         }
6097         return tbcpm;
6098 }
6099
6100 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6101                                        unsigned long paths, int action)
6102 {
6103         int pos;
6104
6105         while (paths) {
6106                 /* get position of bit in mask */
6107                 pos = 8 - ffs(paths);
6108                 /* get channel path descriptor from this position */
6109                 if (action == CUIR_QUIESCE)
6110                         pr_warn("Service on the storage server caused path %x.%02x to go offline",
6111                                 device->path[pos].cssid,
6112                                 device->path[pos].chpid);
6113                 else if (action == CUIR_RESUME)
6114                         pr_info("Path %x.%02x is back online after service on the storage server",
6115                                 device->path[pos].cssid,
6116                                 device->path[pos].chpid);
6117                 clear_bit(7 - pos, &paths);
6118         }
6119 }
6120
6121 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6122                                       struct dasd_cuir_message *cuir)
6123 {
6124         unsigned long tbcpm;
6125
6126         tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6127         /* nothing to do if path is not in use */
6128         if (!(dasd_path_get_opm(device) & tbcpm))
6129                 return 0;
6130         if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6131                 /* no path would be left if the CUIR action is taken
6132                    return error */
6133                 return -EINVAL;
6134         }
6135         /* remove device from operational path mask */
6136         dasd_path_remove_opm(device, tbcpm);
6137         dasd_path_add_cuirpm(device, tbcpm);
6138         return tbcpm;
6139 }
6140
6141 /*
6142  * walk through all devices and build a path mask to quiesce them
6143  * return an error if the last path to a device would be removed
6144  *
6145  * if only part of the devices are quiesced and an error
6146  * occurs no onlining necessary, the storage server will
6147  * notify the already set offline devices again
6148  */
6149 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6150                                   struct dasd_cuir_message *cuir)
6151 {
6152         struct dasd_eckd_private *private = device->private;
6153         struct alias_pav_group *pavgroup, *tempgroup;
6154         struct dasd_device *dev, *n;
6155         unsigned long paths = 0;
6156         unsigned long flags;
6157         int tbcpm;
6158
6159         /* active devices */
6160         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6161                                  alias_list) {
6162                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6163                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6164                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6165                 if (tbcpm < 0)
6166                         goto out_err;
6167                 paths |= tbcpm;
6168         }
6169         /* inactive devices */
6170         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6171                                  alias_list) {
6172                 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6173                 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6174                 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6175                 if (tbcpm < 0)
6176                         goto out_err;
6177                 paths |= tbcpm;
6178         }
6179         /* devices in PAV groups */
6180         list_for_each_entry_safe(pavgroup, tempgroup,
6181                                  &private->lcu->grouplist, group) {
6182                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6183                                          alias_list) {
6184                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6185                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6186                         spin_unlock_irqrestore(
6187                                 get_ccwdev_lock(dev->cdev), flags);
6188                         if (tbcpm < 0)
6189                                 goto out_err;
6190                         paths |= tbcpm;
6191                 }
6192                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6193                                          alias_list) {
6194                         spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6195                         tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6196                         spin_unlock_irqrestore(
6197                                 get_ccwdev_lock(dev->cdev), flags);
6198                         if (tbcpm < 0)
6199                                 goto out_err;
6200                         paths |= tbcpm;
6201                 }
6202         }
6203         /* notify user about all paths affected by CUIR action */
6204         dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6205         return 0;
6206 out_err:
6207         return tbcpm;
6208 }
6209
6210 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6211                                  struct dasd_cuir_message *cuir)
6212 {
6213         struct dasd_eckd_private *private = device->private;
6214         struct alias_pav_group *pavgroup, *tempgroup;
6215         struct dasd_device *dev, *n;
6216         unsigned long paths = 0;
6217         int tbcpm;
6218
6219         /*
6220          * the path may have been added through a generic path event before
6221          * only trigger path verification if the path is not already in use
6222          */
6223         list_for_each_entry_safe(dev, n,
6224                                  &private->lcu->active_devices,
6225                                  alias_list) {
6226                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6227                 paths |= tbcpm;
6228                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6229                         dasd_path_add_tbvpm(dev, tbcpm);
6230                         dasd_schedule_device_bh(dev);
6231                 }
6232         }
6233         list_for_each_entry_safe(dev, n,
6234                                  &private->lcu->inactive_devices,
6235                                  alias_list) {
6236                 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6237                 paths |= tbcpm;
6238                 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6239                         dasd_path_add_tbvpm(dev, tbcpm);
6240                         dasd_schedule_device_bh(dev);
6241                 }
6242         }
6243         /* devices in PAV groups */
6244         list_for_each_entry_safe(pavgroup, tempgroup,
6245                                  &private->lcu->grouplist,
6246                                  group) {
6247                 list_for_each_entry_safe(dev, n,
6248                                          &pavgroup->baselist,
6249                                          alias_list) {
6250                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6251                         paths |= tbcpm;
6252                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6253                                 dasd_path_add_tbvpm(dev, tbcpm);
6254                                 dasd_schedule_device_bh(dev);
6255                         }
6256                 }
6257                 list_for_each_entry_safe(dev, n,
6258                                          &pavgroup->aliaslist,
6259                                          alias_list) {
6260                         tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6261                         paths |= tbcpm;
6262                         if (!(dasd_path_get_opm(dev) & tbcpm)) {
6263                                 dasd_path_add_tbvpm(dev, tbcpm);
6264                                 dasd_schedule_device_bh(dev);
6265                         }
6266                 }
6267         }
6268         /* notify user about all paths affected by CUIR action */
6269         dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6270         return 0;
6271 }
6272
6273 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6274                                  __u8 lpum)
6275 {
6276         struct dasd_cuir_message *cuir = messages;
6277         int response;
6278
6279         DBF_DEV_EVENT(DBF_WARNING, device,
6280                       "CUIR request: %016llx %016llx %016llx %08x",
6281                       ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6282                       ((u32 *)cuir)[3]);
6283
6284         if (cuir->code == CUIR_QUIESCE) {
6285                 /* quiesce */
6286                 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6287                         response = PSF_CUIR_LAST_PATH;
6288                 else
6289                         response = PSF_CUIR_COMPLETED;
6290         } else if (cuir->code == CUIR_RESUME) {
6291                 /* resume */
6292                 dasd_eckd_cuir_resume(device, lpum, cuir);
6293                 response = PSF_CUIR_COMPLETED;
6294         } else
6295                 response = PSF_CUIR_NOT_SUPPORTED;
6296
6297         dasd_eckd_psf_cuir_response(device, response,
6298                                     cuir->message_id, lpum);
6299         DBF_DEV_EVENT(DBF_WARNING, device,
6300                       "CUIR response: %d on message ID %08x", response,
6301                       cuir->message_id);
6302         /* to make sure there is no attention left schedule work again */
6303         device->discipline->check_attention(device, lpum);
6304 }
6305
6306 static void dasd_eckd_oos_resume(struct dasd_device *device)
6307 {
6308         struct dasd_eckd_private *private = device->private;
6309         struct alias_pav_group *pavgroup, *tempgroup;
6310         struct dasd_device *dev, *n;
6311         unsigned long flags;
6312
6313         spin_lock_irqsave(&private->lcu->lock, flags);
6314         list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6315                                  alias_list) {
6316                 if (dev->stopped & DASD_STOPPED_NOSPC)
6317                         dasd_generic_space_avail(dev);
6318         }
6319         list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6320                                  alias_list) {
6321                 if (dev->stopped & DASD_STOPPED_NOSPC)
6322                         dasd_generic_space_avail(dev);
6323         }
6324         /* devices in PAV groups */
6325         list_for_each_entry_safe(pavgroup, tempgroup,
6326                                  &private->lcu->grouplist,
6327                                  group) {
6328                 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6329                                          alias_list) {
6330                         if (dev->stopped & DASD_STOPPED_NOSPC)
6331                                 dasd_generic_space_avail(dev);
6332                 }
6333                 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6334                                          alias_list) {
6335                         if (dev->stopped & DASD_STOPPED_NOSPC)
6336                                 dasd_generic_space_avail(dev);
6337                 }
6338         }
6339         spin_unlock_irqrestore(&private->lcu->lock, flags);
6340 }
6341
6342 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6343                                  __u8 lpum)
6344 {
6345         struct dasd_oos_message *oos = messages;
6346
6347         switch (oos->code) {
6348         case REPO_WARN:
6349         case POOL_WARN:
6350                 dev_warn(&device->cdev->dev,
6351                          "Extent pool usage has reached a critical value\n");
6352                 dasd_eckd_oos_resume(device);
6353                 break;
6354         case REPO_EXHAUST:
6355         case POOL_EXHAUST:
6356                 dev_warn(&device->cdev->dev,
6357                          "Extent pool is exhausted\n");
6358                 break;
6359         case REPO_RELIEVE:
6360         case POOL_RELIEVE:
6361                 dev_info(&device->cdev->dev,
6362                          "Extent pool physical space constraint has been relieved\n");
6363                 break;
6364         }
6365
6366         /* In any case, update related data */
6367         dasd_eckd_read_ext_pool_info(device);
6368
6369         /* to make sure there is no attention left schedule work again */
6370         device->discipline->check_attention(device, lpum);
6371 }
6372
6373 static void dasd_eckd_check_attention_work(struct work_struct *work)
6374 {
6375         struct check_attention_work_data *data;
6376         struct dasd_rssd_messages *messages;
6377         struct dasd_device *device;
6378         int rc;
6379
6380         data = container_of(work, struct check_attention_work_data, worker);
6381         device = data->device;
6382         messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6383         if (!messages) {
6384                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6385                               "Could not allocate attention message buffer");
6386                 goto out;
6387         }
6388         rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6389         if (rc)
6390                 goto out;
6391
6392         if (messages->length == ATTENTION_LENGTH_CUIR &&
6393             messages->format == ATTENTION_FORMAT_CUIR)
6394                 dasd_eckd_handle_cuir(device, messages, data->lpum);
6395         if (messages->length == ATTENTION_LENGTH_OOS &&
6396             messages->format == ATTENTION_FORMAT_OOS)
6397                 dasd_eckd_handle_oos(device, messages, data->lpum);
6398
6399 out:
6400         dasd_put_device(device);
6401         kfree(messages);
6402         kfree(data);
6403 }
6404
6405 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6406 {
6407         struct check_attention_work_data *data;
6408
6409         data = kzalloc(sizeof(*data), GFP_ATOMIC);
6410         if (!data)
6411                 return -ENOMEM;
6412         INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6413         dasd_get_device(device);
6414         data->device = device;
6415         data->lpum = lpum;
6416         schedule_work(&data->worker);
6417         return 0;
6418 }
6419
6420 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6421 {
6422         if (~lpum & dasd_path_get_opm(device)) {
6423                 dasd_path_add_nohpfpm(device, lpum);
6424                 dasd_path_remove_opm(device, lpum);
6425                 dev_err(&device->cdev->dev,
6426                         "Channel path %02X lost HPF functionality and is disabled\n",
6427                         lpum);
6428                 return 1;
6429         }
6430         return 0;
6431 }
6432
6433 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6434 {
6435         struct dasd_eckd_private *private = device->private;
6436
6437         dev_err(&device->cdev->dev,
6438                 "High Performance FICON disabled\n");
6439         private->fcx_max_data = 0;
6440 }
6441
6442 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6443 {
6444         struct dasd_eckd_private *private = device->private;
6445
6446         return private->fcx_max_data ? 1 : 0;
6447 }
6448
6449 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6450                                        struct irb *irb)
6451 {
6452         struct dasd_eckd_private *private = device->private;
6453
6454         if (!private->fcx_max_data) {
6455                 /* sanity check for no HPF, the error makes no sense */
6456                 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6457                               "Trying to disable HPF for a non HPF device");
6458                 return;
6459         }
6460         if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6461                 dasd_eckd_disable_hpf_device(device);
6462         } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6463                 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6464                         return;
6465                 dasd_eckd_disable_hpf_device(device);
6466                 dasd_path_set_tbvpm(device,
6467                                   dasd_path_get_hpfpm(device));
6468         }
6469         /*
6470          * prevent that any new I/O ist started on the device and schedule a
6471          * requeue of existing requests
6472          */
6473         dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6474         dasd_schedule_requeue(device);
6475 }
6476
6477 /*
6478  * Initialize block layer request queue.
6479  */
6480 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6481 {
6482         unsigned int logical_block_size = block->bp_block;
6483         struct request_queue *q = block->request_queue;
6484         struct dasd_device *device = block->base;
6485         int max;
6486
6487         if (device->features & DASD_FEATURE_USERAW) {
6488                 /*
6489                  * the max_blocks value for raw_track access is 256
6490                  * it is higher than the native ECKD value because we
6491                  * only need one ccw per track
6492                  * so the max_hw_sectors are
6493                  * 2048 x 512B = 1024kB = 16 tracks
6494                  */
6495                 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6496         } else {
6497                 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6498         }
6499         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6500         q->limits.max_dev_sectors = max;
6501         blk_queue_logical_block_size(q, logical_block_size);
6502         blk_queue_max_hw_sectors(q, max);
6503         blk_queue_max_segments(q, USHRT_MAX);
6504         /* With page sized segments each segment can be translated into one idaw/tidaw */
6505         blk_queue_max_segment_size(q, PAGE_SIZE);
6506         blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6507 }
6508
6509 static struct ccw_driver dasd_eckd_driver = {
6510         .driver = {
6511                 .name   = "dasd-eckd",
6512                 .owner  = THIS_MODULE,
6513         },
6514         .ids         = dasd_eckd_ids,
6515         .probe       = dasd_eckd_probe,
6516         .remove      = dasd_generic_remove,
6517         .set_offline = dasd_generic_set_offline,
6518         .set_online  = dasd_eckd_set_online,
6519         .notify      = dasd_generic_notify,
6520         .path_event  = dasd_generic_path_event,
6521         .shutdown    = dasd_generic_shutdown,
6522         .freeze      = dasd_generic_pm_freeze,
6523         .thaw        = dasd_generic_restore_device,
6524         .restore     = dasd_generic_restore_device,
6525         .uc_handler  = dasd_generic_uc_handler,
6526         .int_class   = IRQIO_DAS,
6527 };
6528
6529 static struct dasd_discipline dasd_eckd_discipline = {
6530         .owner = THIS_MODULE,
6531         .name = "ECKD",
6532         .ebcname = "ECKD",
6533         .check_device = dasd_eckd_check_characteristics,
6534         .uncheck_device = dasd_eckd_uncheck_device,
6535         .do_analysis = dasd_eckd_do_analysis,
6536         .verify_path = dasd_eckd_verify_path,
6537         .basic_to_ready = dasd_eckd_basic_to_ready,
6538         .online_to_ready = dasd_eckd_online_to_ready,
6539         .basic_to_known = dasd_eckd_basic_to_known,
6540         .setup_blk_queue = dasd_eckd_setup_blk_queue,
6541         .fill_geometry = dasd_eckd_fill_geometry,
6542         .start_IO = dasd_start_IO,
6543         .term_IO = dasd_term_IO,
6544         .handle_terminated_request = dasd_eckd_handle_terminated_request,
6545         .format_device = dasd_eckd_format_device,
6546         .check_device_format = dasd_eckd_check_device_format,
6547         .erp_action = dasd_eckd_erp_action,
6548         .erp_postaction = dasd_eckd_erp_postaction,
6549         .check_for_device_change = dasd_eckd_check_for_device_change,
6550         .build_cp = dasd_eckd_build_alias_cp,
6551         .free_cp = dasd_eckd_free_alias_cp,
6552         .dump_sense = dasd_eckd_dump_sense,
6553         .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6554         .fill_info = dasd_eckd_fill_info,
6555         .ioctl = dasd_eckd_ioctl,
6556         .freeze = dasd_eckd_pm_freeze,
6557         .restore = dasd_eckd_restore_device,
6558         .reload = dasd_eckd_reload_device,
6559         .get_uid = dasd_eckd_get_uid,
6560         .kick_validate = dasd_eckd_kick_validate_server,
6561         .check_attention = dasd_eckd_check_attention,
6562         .host_access_count = dasd_eckd_host_access_count,
6563         .hosts_print = dasd_hosts_print,
6564         .handle_hpf_error = dasd_eckd_handle_hpf_error,
6565         .disable_hpf = dasd_eckd_disable_hpf_device,
6566         .hpf_enabled = dasd_eckd_hpf_enabled,
6567         .reset_path = dasd_eckd_reset_path,
6568         .is_ese = dasd_eckd_is_ese,
6569         .space_allocated = dasd_eckd_space_allocated,
6570         .space_configured = dasd_eckd_space_configured,
6571         .logical_capacity = dasd_eckd_logical_capacity,
6572         .release_space = dasd_eckd_release_space,
6573         .ext_pool_id = dasd_eckd_ext_pool_id,
6574         .ext_size = dasd_eckd_ext_size,
6575         .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6576         .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6577         .ext_pool_oos = dasd_eckd_ext_pool_oos,
6578         .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6579         .ese_format = dasd_eckd_ese_format,
6580         .ese_read = dasd_eckd_ese_read,
6581 };
6582
6583 static int __init
6584 dasd_eckd_init(void)
6585 {
6586         int ret;
6587
6588         ASCEBC(dasd_eckd_discipline.ebcname, 4);
6589         dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6590                                    GFP_KERNEL | GFP_DMA);
6591         if (!dasd_reserve_req)
6592                 return -ENOMEM;
6593         dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6594                                     GFP_KERNEL | GFP_DMA);
6595         if (!dasd_vol_info_req)
6596                 return -ENOMEM;
6597         path_verification_worker = kmalloc(sizeof(*path_verification_worker),
6598                                    GFP_KERNEL | GFP_DMA);
6599         if (!path_verification_worker) {
6600                 kfree(dasd_reserve_req);
6601                 kfree(dasd_vol_info_req);
6602                 return -ENOMEM;
6603         }
6604         rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6605         if (!rawpadpage) {
6606                 kfree(path_verification_worker);
6607                 kfree(dasd_reserve_req);
6608                 kfree(dasd_vol_info_req);
6609                 return -ENOMEM;
6610         }
6611         ret = ccw_driver_register(&dasd_eckd_driver);
6612         if (!ret)
6613                 wait_for_device_probe();
6614         else {
6615                 kfree(path_verification_worker);
6616                 kfree(dasd_reserve_req);
6617                 kfree(dasd_vol_info_req);
6618                 free_page((unsigned long)rawpadpage);
6619         }
6620         return ret;
6621 }
6622
6623 static void __exit
6624 dasd_eckd_cleanup(void)
6625 {
6626         ccw_driver_unregister(&dasd_eckd_driver);
6627         kfree(path_verification_worker);
6628         kfree(dasd_reserve_req);
6629         free_page((unsigned long)rawpadpage);
6630 }
6631
6632 module_init(dasd_eckd_init);
6633 module_exit(dasd_eckd_cleanup);