s390/zcrypt: utilize dev_set_name() ability to use a formatted string
[platform/kernel/linux-rpi.git] / drivers / s390 / crypto / zcrypt_api.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Copyright IBM Corp. 2001, 2018
4  *  Author(s): Robert Burroughs
5  *             Eric Rossman (edrossma@us.ibm.com)
6  *             Cornelia Huck <cornelia.huck@de.ibm.com>
7  *
8  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10  *                                Ralph Wuerthner <rwuerthn@de.ibm.com>
11  *  MSGTYPE restruct:             Holger Dengler <hd@linux.vnet.ibm.com>
12  *  Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13  */
14
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/miscdevice.h>
19 #include <linux/fs.h>
20 #include <linux/compat.h>
21 #include <linux/slab.h>
22 #include <linux/atomic.h>
23 #include <linux/uaccess.h>
24 #include <linux/hw_random.h>
25 #include <linux/debugfs.h>
26 #include <linux/cdev.h>
27 #include <linux/ctype.h>
28 #include <linux/capability.h>
29 #include <asm/debug.h>
30
31 #define CREATE_TRACE_POINTS
32 #include <asm/trace/zcrypt.h>
33
34 #include "zcrypt_api.h"
35 #include "zcrypt_debug.h"
36
37 #include "zcrypt_msgtype6.h"
38 #include "zcrypt_msgtype50.h"
39 #include "zcrypt_ccamisc.h"
40 #include "zcrypt_ep11misc.h"
41
42 /*
43  * Module description.
44  */
45 MODULE_AUTHOR("IBM Corporation");
46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
47                    "Copyright IBM Corp. 2001, 2012");
48 MODULE_LICENSE("GPL");
49
50 /*
51  * zcrypt tracepoint functions
52  */
53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
55
56 DEFINE_SPINLOCK(zcrypt_list_lock);
57 LIST_HEAD(zcrypt_card_list);
58
59 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
60 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
61
62 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
63 EXPORT_SYMBOL(zcrypt_rescan_req);
64
65 static LIST_HEAD(zcrypt_ops_list);
66
67 /* Zcrypt related debug feature stuff. */
68 debug_info_t *zcrypt_dbf_info;
69
70 /*
71  * Process a rescan of the transport layer.
72  *
73  * Returns 1, if the rescan has been processed, otherwise 0.
74  */
75 static inline int zcrypt_process_rescan(void)
76 {
77         if (atomic_read(&zcrypt_rescan_req)) {
78                 atomic_set(&zcrypt_rescan_req, 0);
79                 atomic_inc(&zcrypt_rescan_count);
80                 ap_bus_force_rescan();
81                 ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
82                                 atomic_inc_return(&zcrypt_rescan_count));
83                 return 1;
84         }
85         return 0;
86 }
87
88 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
89 {
90         list_add_tail(&zops->list, &zcrypt_ops_list);
91 }
92
93 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
94 {
95         list_del_init(&zops->list);
96 }
97
98 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
99 {
100         struct zcrypt_ops *zops;
101
102         list_for_each_entry(zops, &zcrypt_ops_list, list)
103                 if (zops->variant == variant &&
104                     (!strncmp(zops->name, name, sizeof(zops->name))))
105                         return zops;
106         return NULL;
107 }
108 EXPORT_SYMBOL(zcrypt_msgtype);
109
110 /*
111  * Multi device nodes extension functions.
112  */
113
114 struct zcdn_device;
115
116 static struct class *zcrypt_class;
117 static dev_t zcrypt_devt;
118 static struct cdev zcrypt_cdev;
119
120 struct zcdn_device {
121         struct device device;
122         struct ap_perms perms;
123 };
124
125 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
126
127 #define ZCDN_MAX_NAME 32
128
129 static int zcdn_create(const char *name);
130 static int zcdn_destroy(const char *name);
131
132 /*
133  * Find zcdn device by name.
134  * Returns reference to the zcdn device which needs to be released
135  * with put_device() after use.
136  */
137 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
138 {
139         struct device *dev = class_find_device_by_name(zcrypt_class, name);
140
141         return dev ? to_zcdn_dev(dev) : NULL;
142 }
143
144 /*
145  * Find zcdn device by devt value.
146  * Returns reference to the zcdn device which needs to be released
147  * with put_device() after use.
148  */
149 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
150 {
151         struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
152
153         return dev ? to_zcdn_dev(dev) : NULL;
154 }
155
156 static ssize_t ioctlmask_show(struct device *dev,
157                               struct device_attribute *attr,
158                               char *buf)
159 {
160         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
161         int i, n;
162
163         if (mutex_lock_interruptible(&ap_perms_mutex))
164                 return -ERESTARTSYS;
165
166         n = sysfs_emit(buf, "0x");
167         for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
168                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
169         n += sysfs_emit_at(buf, n, "\n");
170
171         mutex_unlock(&ap_perms_mutex);
172
173         return n;
174 }
175
176 static ssize_t ioctlmask_store(struct device *dev,
177                                struct device_attribute *attr,
178                                const char *buf, size_t count)
179 {
180         int rc;
181         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
182
183         rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
184                                AP_IOCTLS, &ap_perms_mutex);
185         if (rc)
186                 return rc;
187
188         return count;
189 }
190
191 static DEVICE_ATTR_RW(ioctlmask);
192
193 static ssize_t apmask_show(struct device *dev,
194                            struct device_attribute *attr,
195                            char *buf)
196 {
197         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
198         int i, n;
199
200         if (mutex_lock_interruptible(&ap_perms_mutex))
201                 return -ERESTARTSYS;
202
203         n = sysfs_emit(buf, "0x");
204         for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
205                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
206         n += sysfs_emit_at(buf, n, "\n");
207
208         mutex_unlock(&ap_perms_mutex);
209
210         return n;
211 }
212
213 static ssize_t apmask_store(struct device *dev,
214                             struct device_attribute *attr,
215                             const char *buf, size_t count)
216 {
217         int rc;
218         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
219
220         rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
221                                AP_DEVICES, &ap_perms_mutex);
222         if (rc)
223                 return rc;
224
225         return count;
226 }
227
228 static DEVICE_ATTR_RW(apmask);
229
230 static ssize_t aqmask_show(struct device *dev,
231                            struct device_attribute *attr,
232                            char *buf)
233 {
234         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
235         int i, n;
236
237         if (mutex_lock_interruptible(&ap_perms_mutex))
238                 return -ERESTARTSYS;
239
240         n = sysfs_emit(buf, "0x");
241         for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
242                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
243         n += sysfs_emit_at(buf, n, "\n");
244
245         mutex_unlock(&ap_perms_mutex);
246
247         return n;
248 }
249
250 static ssize_t aqmask_store(struct device *dev,
251                             struct device_attribute *attr,
252                             const char *buf, size_t count)
253 {
254         int rc;
255         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
256
257         rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
258                                AP_DOMAINS, &ap_perms_mutex);
259         if (rc)
260                 return rc;
261
262         return count;
263 }
264
265 static DEVICE_ATTR_RW(aqmask);
266
267 static ssize_t admask_show(struct device *dev,
268                            struct device_attribute *attr,
269                            char *buf)
270 {
271         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
272         int i, n;
273
274         if (mutex_lock_interruptible(&ap_perms_mutex))
275                 return -ERESTARTSYS;
276
277         n = sysfs_emit(buf, "0x");
278         for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
279                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
280         n += sysfs_emit_at(buf, n, "\n");
281
282         mutex_unlock(&ap_perms_mutex);
283
284         return n;
285 }
286
287 static ssize_t admask_store(struct device *dev,
288                             struct device_attribute *attr,
289                             const char *buf, size_t count)
290 {
291         int rc;
292         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
293
294         rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
295                                AP_DOMAINS, &ap_perms_mutex);
296         if (rc)
297                 return rc;
298
299         return count;
300 }
301
302 static DEVICE_ATTR_RW(admask);
303
304 static struct attribute *zcdn_dev_attrs[] = {
305         &dev_attr_ioctlmask.attr,
306         &dev_attr_apmask.attr,
307         &dev_attr_aqmask.attr,
308         &dev_attr_admask.attr,
309         NULL
310 };
311
312 static struct attribute_group zcdn_dev_attr_group = {
313         .attrs = zcdn_dev_attrs
314 };
315
316 static const struct attribute_group *zcdn_dev_attr_groups[] = {
317         &zcdn_dev_attr_group,
318         NULL
319 };
320
321 static ssize_t zcdn_create_store(const struct class *class,
322                                  const struct class_attribute *attr,
323                                  const char *buf, size_t count)
324 {
325         int rc;
326         char name[ZCDN_MAX_NAME];
327
328         strscpy(name, skip_spaces(buf), sizeof(name));
329
330         rc = zcdn_create(strim(name));
331
332         return rc ? rc : count;
333 }
334
335 static const struct class_attribute class_attr_zcdn_create =
336         __ATTR(create, 0600, NULL, zcdn_create_store);
337
338 static ssize_t zcdn_destroy_store(const struct class *class,
339                                   const struct class_attribute *attr,
340                                   const char *buf, size_t count)
341 {
342         int rc;
343         char name[ZCDN_MAX_NAME];
344
345         strscpy(name, skip_spaces(buf), sizeof(name));
346
347         rc = zcdn_destroy(strim(name));
348
349         return rc ? rc : count;
350 }
351
352 static const struct class_attribute class_attr_zcdn_destroy =
353         __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
354
355 static void zcdn_device_release(struct device *dev)
356 {
357         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
358
359         ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
360                         __func__, MAJOR(dev->devt), MINOR(dev->devt));
361
362         kfree(zcdndev);
363 }
364
365 static int zcdn_create(const char *name)
366 {
367         dev_t devt;
368         int i, rc = 0;
369         struct zcdn_device *zcdndev;
370
371         if (mutex_lock_interruptible(&ap_perms_mutex))
372                 return -ERESTARTSYS;
373
374         /* check if device node with this name already exists */
375         if (name[0]) {
376                 zcdndev = find_zcdndev_by_name(name);
377                 if (zcdndev) {
378                         put_device(&zcdndev->device);
379                         rc = -EEXIST;
380                         goto unlockout;
381                 }
382         }
383
384         /* find an unused minor number */
385         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
386                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
387                 zcdndev = find_zcdndev_by_devt(devt);
388                 if (zcdndev)
389                         put_device(&zcdndev->device);
390                 else
391                         break;
392         }
393         if (i == ZCRYPT_MAX_MINOR_NODES) {
394                 rc = -ENOSPC;
395                 goto unlockout;
396         }
397
398         /* alloc and prepare a new zcdn device */
399         zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
400         if (!zcdndev) {
401                 rc = -ENOMEM;
402                 goto unlockout;
403         }
404         zcdndev->device.release = zcdn_device_release;
405         zcdndev->device.class = zcrypt_class;
406         zcdndev->device.devt = devt;
407         zcdndev->device.groups = zcdn_dev_attr_groups;
408         if (name[0])
409                 rc = dev_set_name(&zcdndev->device, "%s", name);
410         else
411                 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
412         if (rc) {
413                 kfree(zcdndev);
414                 goto unlockout;
415         }
416         rc = device_register(&zcdndev->device);
417         if (rc) {
418                 put_device(&zcdndev->device);
419                 goto unlockout;
420         }
421
422         ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
423                         __func__, MAJOR(devt), MINOR(devt));
424
425 unlockout:
426         mutex_unlock(&ap_perms_mutex);
427         return rc;
428 }
429
430 static int zcdn_destroy(const char *name)
431 {
432         int rc = 0;
433         struct zcdn_device *zcdndev;
434
435         if (mutex_lock_interruptible(&ap_perms_mutex))
436                 return -ERESTARTSYS;
437
438         /* try to find this zcdn device */
439         zcdndev = find_zcdndev_by_name(name);
440         if (!zcdndev) {
441                 rc = -ENOENT;
442                 goto unlockout;
443         }
444
445         /*
446          * The zcdn device is not hard destroyed. It is subject to
447          * reference counting and thus just needs to be unregistered.
448          */
449         put_device(&zcdndev->device);
450         device_unregister(&zcdndev->device);
451
452 unlockout:
453         mutex_unlock(&ap_perms_mutex);
454         return rc;
455 }
456
457 static void zcdn_destroy_all(void)
458 {
459         int i;
460         dev_t devt;
461         struct zcdn_device *zcdndev;
462
463         mutex_lock(&ap_perms_mutex);
464         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
465                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
466                 zcdndev = find_zcdndev_by_devt(devt);
467                 if (zcdndev) {
468                         put_device(&zcdndev->device);
469                         device_unregister(&zcdndev->device);
470                 }
471         }
472         mutex_unlock(&ap_perms_mutex);
473 }
474
475 /*
476  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
477  *
478  * This function is not supported beyond zcrypt 1.3.1.
479  */
480 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
481                            size_t count, loff_t *f_pos)
482 {
483         return -EPERM;
484 }
485
486 /*
487  * zcrypt_write(): Not allowed.
488  *
489  * Write is not allowed
490  */
491 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
492                             size_t count, loff_t *f_pos)
493 {
494         return -EPERM;
495 }
496
497 /*
498  * zcrypt_open(): Count number of users.
499  *
500  * Device open function to count number of users.
501  */
502 static int zcrypt_open(struct inode *inode, struct file *filp)
503 {
504         struct ap_perms *perms = &ap_perms;
505
506         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
507                 struct zcdn_device *zcdndev;
508
509                 if (mutex_lock_interruptible(&ap_perms_mutex))
510                         return -ERESTARTSYS;
511                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
512                 /* find returns a reference, no get_device() needed */
513                 mutex_unlock(&ap_perms_mutex);
514                 if (zcdndev)
515                         perms = &zcdndev->perms;
516         }
517         filp->private_data = (void *)perms;
518
519         atomic_inc(&zcrypt_open_count);
520         return stream_open(inode, filp);
521 }
522
523 /*
524  * zcrypt_release(): Count number of users.
525  *
526  * Device close function to count number of users.
527  */
528 static int zcrypt_release(struct inode *inode, struct file *filp)
529 {
530         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
531                 struct zcdn_device *zcdndev;
532
533                 mutex_lock(&ap_perms_mutex);
534                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
535                 mutex_unlock(&ap_perms_mutex);
536                 if (zcdndev) {
537                         /* 2 puts here: one for find, one for open */
538                         put_device(&zcdndev->device);
539                         put_device(&zcdndev->device);
540                 }
541         }
542
543         atomic_dec(&zcrypt_open_count);
544         return 0;
545 }
546
547 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
548                                      unsigned int cmd)
549 {
550         int rc = -EPERM;
551         int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
552
553         if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
554                 if (test_bit_inv(ioctlnr, perms->ioctlm))
555                         rc = 0;
556         }
557
558         if (rc)
559                 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
560                                 __func__, ioctlnr, rc);
561
562         return rc;
563 }
564
565 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
566 {
567         return test_bit_inv(card, perms->apm) ? true : false;
568 }
569
570 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
571 {
572         return test_bit_inv(queue, perms->aqm) ? true : false;
573 }
574
575 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
576                                                      struct zcrypt_queue *zq,
577                                                      struct module **pmod,
578                                                      unsigned int weight)
579 {
580         if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
581                 return NULL;
582         zcrypt_queue_get(zq);
583         get_device(&zq->queue->ap_dev.device);
584         atomic_add(weight, &zc->load);
585         atomic_add(weight, &zq->load);
586         zq->request_count++;
587         *pmod = zq->queue->ap_dev.device.driver->owner;
588         return zq;
589 }
590
591 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
592                                      struct zcrypt_queue *zq,
593                                      struct module *mod,
594                                      unsigned int weight)
595 {
596         zq->request_count--;
597         atomic_sub(weight, &zc->load);
598         atomic_sub(weight, &zq->load);
599         put_device(&zq->queue->ap_dev.device);
600         zcrypt_queue_put(zq);
601         module_put(mod);
602 }
603
604 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
605                                        struct zcrypt_card *pref_zc,
606                                        unsigned int weight,
607                                        unsigned int pref_weight)
608 {
609         if (!pref_zc)
610                 return true;
611         weight += atomic_read(&zc->load);
612         pref_weight += atomic_read(&pref_zc->load);
613         if (weight == pref_weight)
614                 return atomic64_read(&zc->card->total_request_count) <
615                         atomic64_read(&pref_zc->card->total_request_count);
616         return weight < pref_weight;
617 }
618
619 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
620                                         struct zcrypt_queue *pref_zq,
621                                         unsigned int weight,
622                                         unsigned int pref_weight)
623 {
624         if (!pref_zq)
625                 return true;
626         weight += atomic_read(&zq->load);
627         pref_weight += atomic_read(&pref_zq->load);
628         if (weight == pref_weight)
629                 return zq->queue->total_request_count <
630                         pref_zq->queue->total_request_count;
631         return weight < pref_weight;
632 }
633
634 /*
635  * zcrypt ioctls.
636  */
637 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
638                                struct zcrypt_track *tr,
639                                struct ica_rsa_modexpo *mex)
640 {
641         struct zcrypt_card *zc, *pref_zc;
642         struct zcrypt_queue *zq, *pref_zq;
643         struct ap_message ap_msg;
644         unsigned int wgt = 0, pref_wgt = 0;
645         unsigned int func_code;
646         int cpen, qpen, qid = 0, rc = -ENODEV;
647         struct module *mod;
648
649         trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
650
651         ap_init_message(&ap_msg);
652
653         if (mex->outputdatalength < mex->inputdatalength) {
654                 func_code = 0;
655                 rc = -EINVAL;
656                 goto out;
657         }
658
659         /*
660          * As long as outputdatalength is big enough, we can set the
661          * outputdatalength equal to the inputdatalength, since that is the
662          * number of bytes we will copy in any case
663          */
664         mex->outputdatalength = mex->inputdatalength;
665
666         rc = get_rsa_modex_fc(mex, &func_code);
667         if (rc)
668                 goto out;
669
670         pref_zc = NULL;
671         pref_zq = NULL;
672         spin_lock(&zcrypt_list_lock);
673         for_each_zcrypt_card(zc) {
674                 /* Check for usable accelerator or CCA card */
675                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
676                     !(zc->card->functions & 0x18000000))
677                         continue;
678                 /* Check for size limits */
679                 if (zc->min_mod_size > mex->inputdatalength ||
680                     zc->max_mod_size < mex->inputdatalength)
681                         continue;
682                 /* check if device node has admission for this card */
683                 if (!zcrypt_check_card(perms, zc->card->id))
684                         continue;
685                 /* get weight index of the card device  */
686                 wgt = zc->speed_rating[func_code];
687                 /* penalty if this msg was previously sent via this card */
688                 cpen = (tr && tr->again_counter && tr->last_qid &&
689                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
690                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
691                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
692                         continue;
693                 for_each_zcrypt_queue(zq, zc) {
694                         /* check if device is usable and eligible */
695                         if (!zq->online || !zq->ops->rsa_modexpo ||
696                             !zq->queue->config || zq->queue->chkstop)
697                                 continue;
698                         /* check if device node has admission for this queue */
699                         if (!zcrypt_check_queue(perms,
700                                                 AP_QID_QUEUE(zq->queue->qid)))
701                                 continue;
702                         /* penalty if the msg was previously sent at this qid */
703                         qpen = (tr && tr->again_counter && tr->last_qid &&
704                                 tr->last_qid == zq->queue->qid) ?
705                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
706                         if (!zcrypt_queue_compare(zq, pref_zq,
707                                                   wgt + cpen + qpen, pref_wgt))
708                                 continue;
709                         pref_zc = zc;
710                         pref_zq = zq;
711                         pref_wgt = wgt + cpen + qpen;
712                 }
713         }
714         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
715         spin_unlock(&zcrypt_list_lock);
716
717         if (!pref_zq) {
718                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
719                                __func__);
720                 rc = -ENODEV;
721                 goto out;
722         }
723
724         qid = pref_zq->queue->qid;
725         rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
726
727         spin_lock(&zcrypt_list_lock);
728         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
729         spin_unlock(&zcrypt_list_lock);
730
731 out:
732         ap_release_message(&ap_msg);
733         if (tr) {
734                 tr->last_rc = rc;
735                 tr->last_qid = qid;
736         }
737         trace_s390_zcrypt_rep(mex, func_code, rc,
738                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
739         return rc;
740 }
741
742 static long zcrypt_rsa_crt(struct ap_perms *perms,
743                            struct zcrypt_track *tr,
744                            struct ica_rsa_modexpo_crt *crt)
745 {
746         struct zcrypt_card *zc, *pref_zc;
747         struct zcrypt_queue *zq, *pref_zq;
748         struct ap_message ap_msg;
749         unsigned int wgt = 0, pref_wgt = 0;
750         unsigned int func_code;
751         int cpen, qpen, qid = 0, rc = -ENODEV;
752         struct module *mod;
753
754         trace_s390_zcrypt_req(crt, TP_ICARSACRT);
755
756         ap_init_message(&ap_msg);
757
758         if (crt->outputdatalength < crt->inputdatalength) {
759                 func_code = 0;
760                 rc = -EINVAL;
761                 goto out;
762         }
763
764         /*
765          * As long as outputdatalength is big enough, we can set the
766          * outputdatalength equal to the inputdatalength, since that is the
767          * number of bytes we will copy in any case
768          */
769         crt->outputdatalength = crt->inputdatalength;
770
771         rc = get_rsa_crt_fc(crt, &func_code);
772         if (rc)
773                 goto out;
774
775         pref_zc = NULL;
776         pref_zq = NULL;
777         spin_lock(&zcrypt_list_lock);
778         for_each_zcrypt_card(zc) {
779                 /* Check for usable accelerator or CCA card */
780                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
781                     !(zc->card->functions & 0x18000000))
782                         continue;
783                 /* Check for size limits */
784                 if (zc->min_mod_size > crt->inputdatalength ||
785                     zc->max_mod_size < crt->inputdatalength)
786                         continue;
787                 /* check if device node has admission for this card */
788                 if (!zcrypt_check_card(perms, zc->card->id))
789                         continue;
790                 /* get weight index of the card device  */
791                 wgt = zc->speed_rating[func_code];
792                 /* penalty if this msg was previously sent via this card */
793                 cpen = (tr && tr->again_counter && tr->last_qid &&
794                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
795                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
796                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
797                         continue;
798                 for_each_zcrypt_queue(zq, zc) {
799                         /* check if device is usable and eligible */
800                         if (!zq->online || !zq->ops->rsa_modexpo_crt ||
801                             !zq->queue->config || zq->queue->chkstop)
802                                 continue;
803                         /* check if device node has admission for this queue */
804                         if (!zcrypt_check_queue(perms,
805                                                 AP_QID_QUEUE(zq->queue->qid)))
806                                 continue;
807                         /* penalty if the msg was previously sent at this qid */
808                         qpen = (tr && tr->again_counter && tr->last_qid &&
809                                 tr->last_qid == zq->queue->qid) ?
810                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
811                         if (!zcrypt_queue_compare(zq, pref_zq,
812                                                   wgt + cpen + qpen, pref_wgt))
813                                 continue;
814                         pref_zc = zc;
815                         pref_zq = zq;
816                         pref_wgt = wgt + cpen + qpen;
817                 }
818         }
819         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
820         spin_unlock(&zcrypt_list_lock);
821
822         if (!pref_zq) {
823                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
824                                __func__);
825                 rc = -ENODEV;
826                 goto out;
827         }
828
829         qid = pref_zq->queue->qid;
830         rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
831
832         spin_lock(&zcrypt_list_lock);
833         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
834         spin_unlock(&zcrypt_list_lock);
835
836 out:
837         ap_release_message(&ap_msg);
838         if (tr) {
839                 tr->last_rc = rc;
840                 tr->last_qid = qid;
841         }
842         trace_s390_zcrypt_rep(crt, func_code, rc,
843                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
844         return rc;
845 }
846
847 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
848                               struct zcrypt_track *tr,
849                               struct ica_xcRB *xcrb)
850 {
851         struct zcrypt_card *zc, *pref_zc;
852         struct zcrypt_queue *zq, *pref_zq;
853         struct ap_message ap_msg;
854         unsigned int wgt = 0, pref_wgt = 0;
855         unsigned int func_code;
856         unsigned short *domain, tdom;
857         int cpen, qpen, qid = 0, rc = -ENODEV;
858         struct module *mod;
859
860         trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
861
862         xcrb->status = 0;
863         ap_init_message(&ap_msg);
864
865         rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
866         if (rc)
867                 goto out;
868
869         tdom = *domain;
870         if (perms != &ap_perms && tdom < AP_DOMAINS) {
871                 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
872                         if (!test_bit_inv(tdom, perms->adm)) {
873                                 rc = -ENODEV;
874                                 goto out;
875                         }
876                 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
877                         rc = -EOPNOTSUPP;
878                         goto out;
879                 }
880         }
881         /*
882          * If a valid target domain is set and this domain is NOT a usage
883          * domain but a control only domain, autoselect target domain.
884          */
885         if (tdom < AP_DOMAINS &&
886             !ap_test_config_usage_domain(tdom) &&
887             ap_test_config_ctrl_domain(tdom))
888                 tdom = AUTOSEL_DOM;
889
890         pref_zc = NULL;
891         pref_zq = NULL;
892         spin_lock(&zcrypt_list_lock);
893         for_each_zcrypt_card(zc) {
894                 /* Check for usable CCA card */
895                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
896                     !(zc->card->functions & 0x10000000))
897                         continue;
898                 /* Check for user selected CCA card */
899                 if (xcrb->user_defined != AUTOSELECT &&
900                     xcrb->user_defined != zc->card->id)
901                         continue;
902                 /* check if request size exceeds card max msg size */
903                 if (ap_msg.len > zc->card->maxmsgsize)
904                         continue;
905                 /* check if device node has admission for this card */
906                 if (!zcrypt_check_card(perms, zc->card->id))
907                         continue;
908                 /* get weight index of the card device  */
909                 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
910                 /* penalty if this msg was previously sent via this card */
911                 cpen = (tr && tr->again_counter && tr->last_qid &&
912                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
913                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
914                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
915                         continue;
916                 for_each_zcrypt_queue(zq, zc) {
917                         /* check for device usable and eligible */
918                         if (!zq->online || !zq->ops->send_cprb ||
919                             !zq->queue->config || zq->queue->chkstop ||
920                             (tdom != AUTOSEL_DOM &&
921                              tdom != AP_QID_QUEUE(zq->queue->qid)))
922                                 continue;
923                         /* check if device node has admission for this queue */
924                         if (!zcrypt_check_queue(perms,
925                                                 AP_QID_QUEUE(zq->queue->qid)))
926                                 continue;
927                         /* penalty if the msg was previously sent at this qid */
928                         qpen = (tr && tr->again_counter && tr->last_qid &&
929                                 tr->last_qid == zq->queue->qid) ?
930                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
931                         if (!zcrypt_queue_compare(zq, pref_zq,
932                                                   wgt + cpen + qpen, pref_wgt))
933                                 continue;
934                         pref_zc = zc;
935                         pref_zq = zq;
936                         pref_wgt = wgt + cpen + qpen;
937                 }
938         }
939         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
940         spin_unlock(&zcrypt_list_lock);
941
942         if (!pref_zq) {
943                 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
944                                __func__, xcrb->user_defined, *domain);
945                 rc = -ENODEV;
946                 goto out;
947         }
948
949         /* in case of auto select, provide the correct domain */
950         qid = pref_zq->queue->qid;
951         if (*domain == AUTOSEL_DOM)
952                 *domain = AP_QID_QUEUE(qid);
953
954         rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
955
956         spin_lock(&zcrypt_list_lock);
957         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
958         spin_unlock(&zcrypt_list_lock);
959
960 out:
961         ap_release_message(&ap_msg);
962         if (tr) {
963                 tr->last_rc = rc;
964                 tr->last_qid = qid;
965         }
966         trace_s390_zcrypt_rep(xcrb, func_code, rc,
967                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
968         return rc;
969 }
970
971 long zcrypt_send_cprb(struct ica_xcRB *xcrb)
972 {
973         return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
974 }
975 EXPORT_SYMBOL(zcrypt_send_cprb);
976
977 static bool is_desired_ep11_card(unsigned int dev_id,
978                                  unsigned short target_num,
979                                  struct ep11_target_dev *targets)
980 {
981         while (target_num-- > 0) {
982                 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
983                         return true;
984                 targets++;
985         }
986         return false;
987 }
988
989 static bool is_desired_ep11_queue(unsigned int dev_qid,
990                                   unsigned short target_num,
991                                   struct ep11_target_dev *targets)
992 {
993         int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
994
995         while (target_num-- > 0) {
996                 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
997                     (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
998                         return true;
999                 targets++;
1000         }
1001         return false;
1002 }
1003
1004 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
1005                                    struct zcrypt_track *tr,
1006                                    struct ep11_urb *xcrb)
1007 {
1008         struct zcrypt_card *zc, *pref_zc;
1009         struct zcrypt_queue *zq, *pref_zq;
1010         struct ep11_target_dev *targets;
1011         unsigned short target_num;
1012         unsigned int wgt = 0, pref_wgt = 0;
1013         unsigned int func_code, domain;
1014         struct ap_message ap_msg;
1015         int cpen, qpen, qid = 0, rc = -ENODEV;
1016         struct module *mod;
1017
1018         trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1019
1020         ap_init_message(&ap_msg);
1021
1022         target_num = (unsigned short)xcrb->targets_num;
1023
1024         /* empty list indicates autoselect (all available targets) */
1025         targets = NULL;
1026         if (target_num != 0) {
1027                 struct ep11_target_dev __user *uptr;
1028
1029                 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1030                 if (!targets) {
1031                         func_code = 0;
1032                         rc = -ENOMEM;
1033                         goto out;
1034                 }
1035
1036                 uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
1037                 if (z_copy_from_user(userspace, targets, uptr,
1038                                      target_num * sizeof(*targets))) {
1039                         func_code = 0;
1040                         rc = -EFAULT;
1041                         goto out_free;
1042                 }
1043         }
1044
1045         rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1046         if (rc)
1047                 goto out_free;
1048
1049         if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1050                 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1051                         if (!test_bit_inv(domain, perms->adm)) {
1052                                 rc = -ENODEV;
1053                                 goto out_free;
1054                         }
1055                 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1056                         rc = -EOPNOTSUPP;
1057                         goto out_free;
1058                 }
1059         }
1060
1061         pref_zc = NULL;
1062         pref_zq = NULL;
1063         spin_lock(&zcrypt_list_lock);
1064         for_each_zcrypt_card(zc) {
1065                 /* Check for usable EP11 card */
1066                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1067                     !(zc->card->functions & 0x04000000))
1068                         continue;
1069                 /* Check for user selected EP11 card */
1070                 if (targets &&
1071                     !is_desired_ep11_card(zc->card->id, target_num, targets))
1072                         continue;
1073                 /* check if request size exceeds card max msg size */
1074                 if (ap_msg.len > zc->card->maxmsgsize)
1075                         continue;
1076                 /* check if device node has admission for this card */
1077                 if (!zcrypt_check_card(perms, zc->card->id))
1078                         continue;
1079                 /* get weight index of the card device  */
1080                 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1081                 /* penalty if this msg was previously sent via this card */
1082                 cpen = (tr && tr->again_counter && tr->last_qid &&
1083                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1084                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1085                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1086                         continue;
1087                 for_each_zcrypt_queue(zq, zc) {
1088                         /* check if device is usable and eligible */
1089                         if (!zq->online || !zq->ops->send_ep11_cprb ||
1090                             !zq->queue->config || zq->queue->chkstop ||
1091                             (targets &&
1092                              !is_desired_ep11_queue(zq->queue->qid,
1093                                                     target_num, targets)))
1094                                 continue;
1095                         /* check if device node has admission for this queue */
1096                         if (!zcrypt_check_queue(perms,
1097                                                 AP_QID_QUEUE(zq->queue->qid)))
1098                                 continue;
1099                         /* penalty if the msg was previously sent at this qid */
1100                         qpen = (tr && tr->again_counter && tr->last_qid &&
1101                                 tr->last_qid == zq->queue->qid) ?
1102                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1103                         if (!zcrypt_queue_compare(zq, pref_zq,
1104                                                   wgt + cpen + qpen, pref_wgt))
1105                                 continue;
1106                         pref_zc = zc;
1107                         pref_zq = zq;
1108                         pref_wgt = wgt + cpen + qpen;
1109                 }
1110         }
1111         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1112         spin_unlock(&zcrypt_list_lock);
1113
1114         if (!pref_zq) {
1115                 if (targets && target_num == 1) {
1116                         ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
1117                                        __func__, (int)targets->ap_id,
1118                                        (int)targets->dom_id);
1119                 } else if (targets) {
1120                         ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
1121                                        __func__, (int)target_num);
1122                 } else {
1123                         ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
1124                                        __func__);
1125                 }
1126                 rc = -ENODEV;
1127                 goto out_free;
1128         }
1129
1130         qid = pref_zq->queue->qid;
1131         rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1132
1133         spin_lock(&zcrypt_list_lock);
1134         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1135         spin_unlock(&zcrypt_list_lock);
1136
1137 out_free:
1138         kfree(targets);
1139 out:
1140         ap_release_message(&ap_msg);
1141         if (tr) {
1142                 tr->last_rc = rc;
1143                 tr->last_qid = qid;
1144         }
1145         trace_s390_zcrypt_rep(xcrb, func_code, rc,
1146                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1147         return rc;
1148 }
1149
1150 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1151 {
1152         return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
1153 }
1154 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1155
1156 static long zcrypt_rng(char *buffer)
1157 {
1158         struct zcrypt_card *zc, *pref_zc;
1159         struct zcrypt_queue *zq, *pref_zq;
1160         unsigned int wgt = 0, pref_wgt = 0;
1161         unsigned int func_code;
1162         struct ap_message ap_msg;
1163         unsigned int domain;
1164         int qid = 0, rc = -ENODEV;
1165         struct module *mod;
1166
1167         trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1168
1169         ap_init_message(&ap_msg);
1170         rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1171         if (rc)
1172                 goto out;
1173
1174         pref_zc = NULL;
1175         pref_zq = NULL;
1176         spin_lock(&zcrypt_list_lock);
1177         for_each_zcrypt_card(zc) {
1178                 /* Check for usable CCA card */
1179                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1180                     !(zc->card->functions & 0x10000000))
1181                         continue;
1182                 /* get weight index of the card device  */
1183                 wgt = zc->speed_rating[func_code];
1184                 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1185                         continue;
1186                 for_each_zcrypt_queue(zq, zc) {
1187                         /* check if device is usable and eligible */
1188                         if (!zq->online || !zq->ops->rng ||
1189                             !zq->queue->config || zq->queue->chkstop)
1190                                 continue;
1191                         if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1192                                 continue;
1193                         pref_zc = zc;
1194                         pref_zq = zq;
1195                         pref_wgt = wgt;
1196                 }
1197         }
1198         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1199         spin_unlock(&zcrypt_list_lock);
1200
1201         if (!pref_zq) {
1202                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
1203                                __func__);
1204                 rc = -ENODEV;
1205                 goto out;
1206         }
1207
1208         qid = pref_zq->queue->qid;
1209         rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1210
1211         spin_lock(&zcrypt_list_lock);
1212         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1213         spin_unlock(&zcrypt_list_lock);
1214
1215 out:
1216         ap_release_message(&ap_msg);
1217         trace_s390_zcrypt_rep(buffer, func_code, rc,
1218                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1219         return rc;
1220 }
1221
1222 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1223 {
1224         struct zcrypt_card *zc;
1225         struct zcrypt_queue *zq;
1226         struct zcrypt_device_status *stat;
1227         int card, queue;
1228
1229         memset(devstatus, 0, MAX_ZDEV_ENTRIES
1230                * sizeof(struct zcrypt_device_status));
1231
1232         spin_lock(&zcrypt_list_lock);
1233         for_each_zcrypt_card(zc) {
1234                 for_each_zcrypt_queue(zq, zc) {
1235                         card = AP_QID_CARD(zq->queue->qid);
1236                         if (card >= MAX_ZDEV_CARDIDS)
1237                                 continue;
1238                         queue = AP_QID_QUEUE(zq->queue->qid);
1239                         stat = &devstatus[card * AP_DOMAINS + queue];
1240                         stat->hwtype = zc->card->ap_dev.device_type;
1241                         stat->functions = zc->card->functions >> 26;
1242                         stat->qid = zq->queue->qid;
1243                         stat->online = zq->online ? 0x01 : 0x00;
1244                 }
1245         }
1246         spin_unlock(&zcrypt_list_lock);
1247 }
1248
1249 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
1250 {
1251         struct zcrypt_card *zc;
1252         struct zcrypt_queue *zq;
1253         struct zcrypt_device_status_ext *stat;
1254         int card, queue;
1255
1256         memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
1257                * sizeof(struct zcrypt_device_status_ext));
1258
1259         spin_lock(&zcrypt_list_lock);
1260         for_each_zcrypt_card(zc) {
1261                 for_each_zcrypt_queue(zq, zc) {
1262                         card = AP_QID_CARD(zq->queue->qid);
1263                         queue = AP_QID_QUEUE(zq->queue->qid);
1264                         stat = &devstatus[card * AP_DOMAINS + queue];
1265                         stat->hwtype = zc->card->ap_dev.device_type;
1266                         stat->functions = zc->card->functions >> 26;
1267                         stat->qid = zq->queue->qid;
1268                         stat->online = zq->online ? 0x01 : 0x00;
1269                 }
1270         }
1271         spin_unlock(&zcrypt_list_lock);
1272 }
1273 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1274
1275 int zcrypt_device_status_ext(int card, int queue,
1276                              struct zcrypt_device_status_ext *devstat)
1277 {
1278         struct zcrypt_card *zc;
1279         struct zcrypt_queue *zq;
1280
1281         memset(devstat, 0, sizeof(*devstat));
1282
1283         spin_lock(&zcrypt_list_lock);
1284         for_each_zcrypt_card(zc) {
1285                 for_each_zcrypt_queue(zq, zc) {
1286                         if (card == AP_QID_CARD(zq->queue->qid) &&
1287                             queue == AP_QID_QUEUE(zq->queue->qid)) {
1288                                 devstat->hwtype = zc->card->ap_dev.device_type;
1289                                 devstat->functions = zc->card->functions >> 26;
1290                                 devstat->qid = zq->queue->qid;
1291                                 devstat->online = zq->online ? 0x01 : 0x00;
1292                                 spin_unlock(&zcrypt_list_lock);
1293                                 return 0;
1294                         }
1295                 }
1296         }
1297         spin_unlock(&zcrypt_list_lock);
1298
1299         return -ENODEV;
1300 }
1301 EXPORT_SYMBOL(zcrypt_device_status_ext);
1302
1303 static void zcrypt_status_mask(char status[], size_t max_adapters)
1304 {
1305         struct zcrypt_card *zc;
1306         struct zcrypt_queue *zq;
1307         int card;
1308
1309         memset(status, 0, max_adapters);
1310         spin_lock(&zcrypt_list_lock);
1311         for_each_zcrypt_card(zc) {
1312                 for_each_zcrypt_queue(zq, zc) {
1313                         card = AP_QID_CARD(zq->queue->qid);
1314                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1315                             card >= max_adapters)
1316                                 continue;
1317                         status[card] = zc->online ? zc->user_space_type : 0x0d;
1318                 }
1319         }
1320         spin_unlock(&zcrypt_list_lock);
1321 }
1322
1323 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1324 {
1325         struct zcrypt_card *zc;
1326         struct zcrypt_queue *zq;
1327         int card;
1328
1329         memset(qdepth, 0, max_adapters);
1330         spin_lock(&zcrypt_list_lock);
1331         local_bh_disable();
1332         for_each_zcrypt_card(zc) {
1333                 for_each_zcrypt_queue(zq, zc) {
1334                         card = AP_QID_CARD(zq->queue->qid);
1335                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1336                             card >= max_adapters)
1337                                 continue;
1338                         spin_lock(&zq->queue->lock);
1339                         qdepth[card] =
1340                                 zq->queue->pendingq_count +
1341                                 zq->queue->requestq_count;
1342                         spin_unlock(&zq->queue->lock);
1343                 }
1344         }
1345         local_bh_enable();
1346         spin_unlock(&zcrypt_list_lock);
1347 }
1348
1349 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1350 {
1351         struct zcrypt_card *zc;
1352         struct zcrypt_queue *zq;
1353         int card;
1354         u64 cnt;
1355
1356         memset(reqcnt, 0, sizeof(int) * max_adapters);
1357         spin_lock(&zcrypt_list_lock);
1358         local_bh_disable();
1359         for_each_zcrypt_card(zc) {
1360                 for_each_zcrypt_queue(zq, zc) {
1361                         card = AP_QID_CARD(zq->queue->qid);
1362                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1363                             card >= max_adapters)
1364                                 continue;
1365                         spin_lock(&zq->queue->lock);
1366                         cnt = zq->queue->total_request_count;
1367                         spin_unlock(&zq->queue->lock);
1368                         reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1369                 }
1370         }
1371         local_bh_enable();
1372         spin_unlock(&zcrypt_list_lock);
1373 }
1374
1375 static int zcrypt_pendingq_count(void)
1376 {
1377         struct zcrypt_card *zc;
1378         struct zcrypt_queue *zq;
1379         int pendingq_count;
1380
1381         pendingq_count = 0;
1382         spin_lock(&zcrypt_list_lock);
1383         local_bh_disable();
1384         for_each_zcrypt_card(zc) {
1385                 for_each_zcrypt_queue(zq, zc) {
1386                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1387                                 continue;
1388                         spin_lock(&zq->queue->lock);
1389                         pendingq_count += zq->queue->pendingq_count;
1390                         spin_unlock(&zq->queue->lock);
1391                 }
1392         }
1393         local_bh_enable();
1394         spin_unlock(&zcrypt_list_lock);
1395         return pendingq_count;
1396 }
1397
1398 static int zcrypt_requestq_count(void)
1399 {
1400         struct zcrypt_card *zc;
1401         struct zcrypt_queue *zq;
1402         int requestq_count;
1403
1404         requestq_count = 0;
1405         spin_lock(&zcrypt_list_lock);
1406         local_bh_disable();
1407         for_each_zcrypt_card(zc) {
1408                 for_each_zcrypt_queue(zq, zc) {
1409                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1410                                 continue;
1411                         spin_lock(&zq->queue->lock);
1412                         requestq_count += zq->queue->requestq_count;
1413                         spin_unlock(&zq->queue->lock);
1414                 }
1415         }
1416         local_bh_enable();
1417         spin_unlock(&zcrypt_list_lock);
1418         return requestq_count;
1419 }
1420
1421 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1422 {
1423         int rc;
1424         struct zcrypt_track tr;
1425         struct ica_rsa_modexpo mex;
1426         struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1427
1428         memset(&tr, 0, sizeof(tr));
1429         if (copy_from_user(&mex, umex, sizeof(mex)))
1430                 return -EFAULT;
1431
1432         do {
1433                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1434                 if (rc == -EAGAIN)
1435                         tr.again_counter++;
1436         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1437         /* on failure: retry once again after a requested rescan */
1438         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1439                 do {
1440                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1441                         if (rc == -EAGAIN)
1442                                 tr.again_counter++;
1443                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1444         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1445                 rc = -EIO;
1446         if (rc) {
1447                 ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
1448                 return rc;
1449         }
1450         return put_user(mex.outputdatalength, &umex->outputdatalength);
1451 }
1452
1453 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1454 {
1455         int rc;
1456         struct zcrypt_track tr;
1457         struct ica_rsa_modexpo_crt crt;
1458         struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1459
1460         memset(&tr, 0, sizeof(tr));
1461         if (copy_from_user(&crt, ucrt, sizeof(crt)))
1462                 return -EFAULT;
1463
1464         do {
1465                 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1466                 if (rc == -EAGAIN)
1467                         tr.again_counter++;
1468         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1469         /* on failure: retry once again after a requested rescan */
1470         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1471                 do {
1472                         rc = zcrypt_rsa_crt(perms, &tr, &crt);
1473                         if (rc == -EAGAIN)
1474                                 tr.again_counter++;
1475                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1476         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1477                 rc = -EIO;
1478         if (rc) {
1479                 ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
1480                 return rc;
1481         }
1482         return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1483 }
1484
1485 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1486 {
1487         int rc;
1488         struct ica_xcRB xcrb;
1489         struct zcrypt_track tr;
1490         struct ica_xcRB __user *uxcrb = (void __user *)arg;
1491
1492         memset(&tr, 0, sizeof(tr));
1493         if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1494                 return -EFAULT;
1495
1496         do {
1497                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1498                 if (rc == -EAGAIN)
1499                         tr.again_counter++;
1500         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1501         /* on failure: retry once again after a requested rescan */
1502         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1503                 do {
1504                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1505                         if (rc == -EAGAIN)
1506                                 tr.again_counter++;
1507                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1508         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1509                 rc = -EIO;
1510         if (rc)
1511                 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1512                                rc, xcrb.status);
1513         if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1514                 return -EFAULT;
1515         return rc;
1516 }
1517
1518 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1519 {
1520         int rc;
1521         struct ep11_urb xcrb;
1522         struct zcrypt_track tr;
1523         struct ep11_urb __user *uxcrb = (void __user *)arg;
1524
1525         memset(&tr, 0, sizeof(tr));
1526         if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1527                 return -EFAULT;
1528
1529         do {
1530                 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1531                 if (rc == -EAGAIN)
1532                         tr.again_counter++;
1533         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1534         /* on failure: retry once again after a requested rescan */
1535         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1536                 do {
1537                         rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1538                         if (rc == -EAGAIN)
1539                                 tr.again_counter++;
1540                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1541         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1542                 rc = -EIO;
1543         if (rc)
1544                 ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1545         if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1546                 return -EFAULT;
1547         return rc;
1548 }
1549
1550 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1551                                   unsigned long arg)
1552 {
1553         int rc;
1554         struct ap_perms *perms =
1555                 (struct ap_perms *)filp->private_data;
1556
1557         rc = zcrypt_check_ioctl(perms, cmd);
1558         if (rc)
1559                 return rc;
1560
1561         switch (cmd) {
1562         case ICARSAMODEXPO:
1563                 return icarsamodexpo_ioctl(perms, arg);
1564         case ICARSACRT:
1565                 return icarsacrt_ioctl(perms, arg);
1566         case ZSECSENDCPRB:
1567                 return zsecsendcprb_ioctl(perms, arg);
1568         case ZSENDEP11CPRB:
1569                 return zsendep11cprb_ioctl(perms, arg);
1570         case ZCRYPT_DEVICE_STATUS: {
1571                 struct zcrypt_device_status_ext *device_status;
1572                 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1573                         * sizeof(struct zcrypt_device_status_ext);
1574
1575                 device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
1576                                                sizeof(struct zcrypt_device_status_ext),
1577                                                GFP_KERNEL);
1578                 if (!device_status)
1579                         return -ENOMEM;
1580                 zcrypt_device_status_mask_ext(device_status);
1581                 if (copy_to_user((char __user *)arg, device_status,
1582                                  total_size))
1583                         rc = -EFAULT;
1584                 kvfree(device_status);
1585                 return rc;
1586         }
1587         case ZCRYPT_STATUS_MASK: {
1588                 char status[AP_DEVICES];
1589
1590                 zcrypt_status_mask(status, AP_DEVICES);
1591                 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1592                         return -EFAULT;
1593                 return 0;
1594         }
1595         case ZCRYPT_QDEPTH_MASK: {
1596                 char qdepth[AP_DEVICES];
1597
1598                 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1599                 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1600                         return -EFAULT;
1601                 return 0;
1602         }
1603         case ZCRYPT_PERDEV_REQCNT: {
1604                 u32 *reqcnt;
1605
1606                 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1607                 if (!reqcnt)
1608                         return -ENOMEM;
1609                 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1610                 if (copy_to_user((int __user *)arg, reqcnt,
1611                                  sizeof(u32) * AP_DEVICES))
1612                         rc = -EFAULT;
1613                 kfree(reqcnt);
1614                 return rc;
1615         }
1616         case Z90STAT_REQUESTQ_COUNT:
1617                 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1618         case Z90STAT_PENDINGQ_COUNT:
1619                 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1620         case Z90STAT_TOTALOPEN_COUNT:
1621                 return put_user(atomic_read(&zcrypt_open_count),
1622                                 (int __user *)arg);
1623         case Z90STAT_DOMAIN_INDEX:
1624                 return put_user(ap_domain_index, (int __user *)arg);
1625         /*
1626          * Deprecated ioctls
1627          */
1628         case ZDEVICESTATUS: {
1629                 /* the old ioctl supports only 64 adapters */
1630                 struct zcrypt_device_status *device_status;
1631                 size_t total_size = MAX_ZDEV_ENTRIES
1632                         * sizeof(struct zcrypt_device_status);
1633
1634                 device_status = kzalloc(total_size, GFP_KERNEL);
1635                 if (!device_status)
1636                         return -ENOMEM;
1637                 zcrypt_device_status_mask(device_status);
1638                 if (copy_to_user((char __user *)arg, device_status,
1639                                  total_size))
1640                         rc = -EFAULT;
1641                 kfree(device_status);
1642                 return rc;
1643         }
1644         case Z90STAT_STATUS_MASK: {
1645                 /* the old ioctl supports only 64 adapters */
1646                 char status[MAX_ZDEV_CARDIDS];
1647
1648                 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1649                 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1650                         return -EFAULT;
1651                 return 0;
1652         }
1653         case Z90STAT_QDEPTH_MASK: {
1654                 /* the old ioctl supports only 64 adapters */
1655                 char qdepth[MAX_ZDEV_CARDIDS];
1656
1657                 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1658                 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1659                         return -EFAULT;
1660                 return 0;
1661         }
1662         case Z90STAT_PERDEV_REQCNT: {
1663                 /* the old ioctl supports only 64 adapters */
1664                 u32 reqcnt[MAX_ZDEV_CARDIDS];
1665
1666                 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1667                 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1668                         return -EFAULT;
1669                 return 0;
1670         }
1671         /* unknown ioctl number */
1672         default:
1673                 ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
1674                 return -ENOIOCTLCMD;
1675         }
1676 }
1677
1678 #ifdef CONFIG_COMPAT
1679 /*
1680  * ioctl32 conversion routines
1681  */
1682 struct compat_ica_rsa_modexpo {
1683         compat_uptr_t   inputdata;
1684         unsigned int    inputdatalength;
1685         compat_uptr_t   outputdata;
1686         unsigned int    outputdatalength;
1687         compat_uptr_t   b_key;
1688         compat_uptr_t   n_modulus;
1689 };
1690
1691 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1692                             unsigned int cmd, unsigned long arg)
1693 {
1694         struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1695         struct compat_ica_rsa_modexpo mex32;
1696         struct ica_rsa_modexpo mex64;
1697         struct zcrypt_track tr;
1698         long rc;
1699
1700         memset(&tr, 0, sizeof(tr));
1701         if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1702                 return -EFAULT;
1703         mex64.inputdata = compat_ptr(mex32.inputdata);
1704         mex64.inputdatalength = mex32.inputdatalength;
1705         mex64.outputdata = compat_ptr(mex32.outputdata);
1706         mex64.outputdatalength = mex32.outputdatalength;
1707         mex64.b_key = compat_ptr(mex32.b_key);
1708         mex64.n_modulus = compat_ptr(mex32.n_modulus);
1709         do {
1710                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1711                 if (rc == -EAGAIN)
1712                         tr.again_counter++;
1713         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1714         /* on failure: retry once again after a requested rescan */
1715         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1716                 do {
1717                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1718                         if (rc == -EAGAIN)
1719                                 tr.again_counter++;
1720                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1721         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1722                 rc = -EIO;
1723         if (rc)
1724                 return rc;
1725         return put_user(mex64.outputdatalength,
1726                         &umex32->outputdatalength);
1727 }
1728
1729 struct compat_ica_rsa_modexpo_crt {
1730         compat_uptr_t   inputdata;
1731         unsigned int    inputdatalength;
1732         compat_uptr_t   outputdata;
1733         unsigned int    outputdatalength;
1734         compat_uptr_t   bp_key;
1735         compat_uptr_t   bq_key;
1736         compat_uptr_t   np_prime;
1737         compat_uptr_t   nq_prime;
1738         compat_uptr_t   u_mult_inv;
1739 };
1740
1741 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1742                                 unsigned int cmd, unsigned long arg)
1743 {
1744         struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1745         struct compat_ica_rsa_modexpo_crt crt32;
1746         struct ica_rsa_modexpo_crt crt64;
1747         struct zcrypt_track tr;
1748         long rc;
1749
1750         memset(&tr, 0, sizeof(tr));
1751         if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1752                 return -EFAULT;
1753         crt64.inputdata = compat_ptr(crt32.inputdata);
1754         crt64.inputdatalength = crt32.inputdatalength;
1755         crt64.outputdata = compat_ptr(crt32.outputdata);
1756         crt64.outputdatalength = crt32.outputdatalength;
1757         crt64.bp_key = compat_ptr(crt32.bp_key);
1758         crt64.bq_key = compat_ptr(crt32.bq_key);
1759         crt64.np_prime = compat_ptr(crt32.np_prime);
1760         crt64.nq_prime = compat_ptr(crt32.nq_prime);
1761         crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1762         do {
1763                 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1764                 if (rc == -EAGAIN)
1765                         tr.again_counter++;
1766         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1767         /* on failure: retry once again after a requested rescan */
1768         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1769                 do {
1770                         rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1771                         if (rc == -EAGAIN)
1772                                 tr.again_counter++;
1773                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1774         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1775                 rc = -EIO;
1776         if (rc)
1777                 return rc;
1778         return put_user(crt64.outputdatalength,
1779                         &ucrt32->outputdatalength);
1780 }
1781
1782 struct compat_ica_xcrb {
1783         unsigned short  agent_ID;
1784         unsigned int    user_defined;
1785         unsigned short  request_ID;
1786         unsigned int    request_control_blk_length;
1787         unsigned char   padding1[16 - sizeof(compat_uptr_t)];
1788         compat_uptr_t   request_control_blk_addr;
1789         unsigned int    request_data_length;
1790         char            padding2[16 - sizeof(compat_uptr_t)];
1791         compat_uptr_t   request_data_address;
1792         unsigned int    reply_control_blk_length;
1793         char            padding3[16 - sizeof(compat_uptr_t)];
1794         compat_uptr_t   reply_control_blk_addr;
1795         unsigned int    reply_data_length;
1796         char            padding4[16 - sizeof(compat_uptr_t)];
1797         compat_uptr_t   reply_data_addr;
1798         unsigned short  priority_window;
1799         unsigned int    status;
1800 } __packed;
1801
1802 static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
1803                          unsigned int cmd, unsigned long arg)
1804 {
1805         struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
1806         struct compat_ica_xcrb xcrb32;
1807         struct zcrypt_track tr;
1808         struct ica_xcRB xcrb64;
1809         long rc;
1810
1811         memset(&tr, 0, sizeof(tr));
1812         if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
1813                 return -EFAULT;
1814         xcrb64.agent_ID = xcrb32.agent_ID;
1815         xcrb64.user_defined = xcrb32.user_defined;
1816         xcrb64.request_ID = xcrb32.request_ID;
1817         xcrb64.request_control_blk_length =
1818                 xcrb32.request_control_blk_length;
1819         xcrb64.request_control_blk_addr =
1820                 compat_ptr(xcrb32.request_control_blk_addr);
1821         xcrb64.request_data_length =
1822                 xcrb32.request_data_length;
1823         xcrb64.request_data_address =
1824                 compat_ptr(xcrb32.request_data_address);
1825         xcrb64.reply_control_blk_length =
1826                 xcrb32.reply_control_blk_length;
1827         xcrb64.reply_control_blk_addr =
1828                 compat_ptr(xcrb32.reply_control_blk_addr);
1829         xcrb64.reply_data_length = xcrb32.reply_data_length;
1830         xcrb64.reply_data_addr =
1831                 compat_ptr(xcrb32.reply_data_addr);
1832         xcrb64.priority_window = xcrb32.priority_window;
1833         xcrb64.status = xcrb32.status;
1834         do {
1835                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1836                 if (rc == -EAGAIN)
1837                         tr.again_counter++;
1838         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1839         /* on failure: retry once again after a requested rescan */
1840         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1841                 do {
1842                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1843                         if (rc == -EAGAIN)
1844                                 tr.again_counter++;
1845                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1846         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1847                 rc = -EIO;
1848         xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
1849         xcrb32.reply_data_length = xcrb64.reply_data_length;
1850         xcrb32.status = xcrb64.status;
1851         if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
1852                 return -EFAULT;
1853         return rc;
1854 }
1855
1856 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1857                                 unsigned long arg)
1858 {
1859         int rc;
1860         struct ap_perms *perms =
1861                 (struct ap_perms *)filp->private_data;
1862
1863         rc = zcrypt_check_ioctl(perms, cmd);
1864         if (rc)
1865                 return rc;
1866
1867         if (cmd == ICARSAMODEXPO)
1868                 return trans_modexpo32(perms, filp, cmd, arg);
1869         if (cmd == ICARSACRT)
1870                 return trans_modexpo_crt32(perms, filp, cmd, arg);
1871         if (cmd == ZSECSENDCPRB)
1872                 return trans_xcrb32(perms, filp, cmd, arg);
1873         return zcrypt_unlocked_ioctl(filp, cmd, arg);
1874 }
1875 #endif
1876
1877 /*
1878  * Misc device file operations.
1879  */
1880 static const struct file_operations zcrypt_fops = {
1881         .owner          = THIS_MODULE,
1882         .read           = zcrypt_read,
1883         .write          = zcrypt_write,
1884         .unlocked_ioctl = zcrypt_unlocked_ioctl,
1885 #ifdef CONFIG_COMPAT
1886         .compat_ioctl   = zcrypt_compat_ioctl,
1887 #endif
1888         .open           = zcrypt_open,
1889         .release        = zcrypt_release,
1890         .llseek         = no_llseek,
1891 };
1892
1893 /*
1894  * Misc device.
1895  */
1896 static struct miscdevice zcrypt_misc_device = {
1897         .minor      = MISC_DYNAMIC_MINOR,
1898         .name       = "z90crypt",
1899         .fops       = &zcrypt_fops,
1900 };
1901
1902 static int zcrypt_rng_device_count;
1903 static u32 *zcrypt_rng_buffer;
1904 static int zcrypt_rng_buffer_index;
1905 static DEFINE_MUTEX(zcrypt_rng_mutex);
1906
1907 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1908 {
1909         int rc;
1910
1911         /*
1912          * We don't need locking here because the RNG API guarantees serialized
1913          * read method calls.
1914          */
1915         if (zcrypt_rng_buffer_index == 0) {
1916                 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1917                 /* on failure: retry once again after a requested rescan */
1918                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1919                         rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1920                 if (rc < 0)
1921                         return -EIO;
1922                 zcrypt_rng_buffer_index = rc / sizeof(*data);
1923         }
1924         *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1925         return sizeof(*data);
1926 }
1927
1928 static struct hwrng zcrypt_rng_dev = {
1929         .name           = "zcrypt",
1930         .data_read      = zcrypt_rng_data_read,
1931         .quality        = 990,
1932 };
1933
1934 int zcrypt_rng_device_add(void)
1935 {
1936         int rc = 0;
1937
1938         mutex_lock(&zcrypt_rng_mutex);
1939         if (zcrypt_rng_device_count == 0) {
1940                 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1941                 if (!zcrypt_rng_buffer) {
1942                         rc = -ENOMEM;
1943                         goto out;
1944                 }
1945                 zcrypt_rng_buffer_index = 0;
1946                 rc = hwrng_register(&zcrypt_rng_dev);
1947                 if (rc)
1948                         goto out_free;
1949                 zcrypt_rng_device_count = 1;
1950         } else {
1951                 zcrypt_rng_device_count++;
1952         }
1953         mutex_unlock(&zcrypt_rng_mutex);
1954         return 0;
1955
1956 out_free:
1957         free_page((unsigned long)zcrypt_rng_buffer);
1958 out:
1959         mutex_unlock(&zcrypt_rng_mutex);
1960         return rc;
1961 }
1962
1963 void zcrypt_rng_device_remove(void)
1964 {
1965         mutex_lock(&zcrypt_rng_mutex);
1966         zcrypt_rng_device_count--;
1967         if (zcrypt_rng_device_count == 0) {
1968                 hwrng_unregister(&zcrypt_rng_dev);
1969                 free_page((unsigned long)zcrypt_rng_buffer);
1970         }
1971         mutex_unlock(&zcrypt_rng_mutex);
1972 }
1973
1974 /*
1975  * Wait until the zcrypt api is operational.
1976  * The AP bus scan and the binding of ap devices to device drivers is
1977  * an asynchronous job. This function waits until these initial jobs
1978  * are done and so the zcrypt api should be ready to serve crypto
1979  * requests - if there are resources available. The function uses an
1980  * internal timeout of 60s. The very first caller will either wait for
1981  * ap bus bindings complete or the timeout happens. This state will be
1982  * remembered for further callers which will only be blocked until a
1983  * decision is made (timeout or bindings complete).
1984  * On timeout -ETIME is returned, on success the return value is 0.
1985  */
1986 int zcrypt_wait_api_operational(void)
1987 {
1988         static DEFINE_MUTEX(zcrypt_wait_api_lock);
1989         static int zcrypt_wait_api_state;
1990         int rc;
1991
1992         rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
1993         if (rc)
1994                 return rc;
1995
1996         switch (zcrypt_wait_api_state) {
1997         case 0:
1998                 /* initial state, invoke wait for the ap bus complete */
1999                 rc = ap_wait_init_apqn_bindings_complete(
2000                         msecs_to_jiffies(60 * 1000));
2001                 switch (rc) {
2002                 case 0:
2003                         /* ap bus bindings are complete */
2004                         zcrypt_wait_api_state = 1;
2005                         break;
2006                 case -EINTR:
2007                         /* interrupted, go back to caller */
2008                         break;
2009                 case -ETIME:
2010                         /* timeout */
2011                         ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
2012                                         __func__);
2013                         zcrypt_wait_api_state = -ETIME;
2014                         break;
2015                 default:
2016                         /* other failure */
2017                         ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
2018                                        __func__, rc);
2019                         break;
2020                 }
2021                 break;
2022         case 1:
2023                 /* a previous caller already found ap bus bindings complete */
2024                 rc = 0;
2025                 break;
2026         default:
2027                 /* a previous caller had timeout or other failure */
2028                 rc = zcrypt_wait_api_state;
2029                 break;
2030         }
2031
2032         mutex_unlock(&zcrypt_wait_api_lock);
2033
2034         return rc;
2035 }
2036 EXPORT_SYMBOL(zcrypt_wait_api_operational);
2037
2038 int __init zcrypt_debug_init(void)
2039 {
2040         zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
2041                                          DBF_MAX_SPRINTF_ARGS * sizeof(long));
2042         debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2043         debug_set_level(zcrypt_dbf_info, DBF_ERR);
2044
2045         return 0;
2046 }
2047
2048 void zcrypt_debug_exit(void)
2049 {
2050         debug_unregister(zcrypt_dbf_info);
2051 }
2052
2053 static int __init zcdn_init(void)
2054 {
2055         int rc;
2056
2057         /* create a new class 'zcrypt' */
2058         zcrypt_class = class_create(ZCRYPT_NAME);
2059         if (IS_ERR(zcrypt_class)) {
2060                 rc = PTR_ERR(zcrypt_class);
2061                 goto out_class_create_failed;
2062         }
2063         zcrypt_class->dev_release = zcdn_device_release;
2064
2065         /* alloc device minor range */
2066         rc = alloc_chrdev_region(&zcrypt_devt,
2067                                  0, ZCRYPT_MAX_MINOR_NODES,
2068                                  ZCRYPT_NAME);
2069         if (rc)
2070                 goto out_alloc_chrdev_failed;
2071
2072         cdev_init(&zcrypt_cdev, &zcrypt_fops);
2073         zcrypt_cdev.owner = THIS_MODULE;
2074         rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2075         if (rc)
2076                 goto out_cdev_add_failed;
2077
2078         /* need some class specific sysfs attributes */
2079         rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
2080         if (rc)
2081                 goto out_class_create_file_1_failed;
2082         rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
2083         if (rc)
2084                 goto out_class_create_file_2_failed;
2085
2086         return 0;
2087
2088 out_class_create_file_2_failed:
2089         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2090 out_class_create_file_1_failed:
2091         cdev_del(&zcrypt_cdev);
2092 out_cdev_add_failed:
2093         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2094 out_alloc_chrdev_failed:
2095         class_destroy(zcrypt_class);
2096 out_class_create_failed:
2097         return rc;
2098 }
2099
2100 static void zcdn_exit(void)
2101 {
2102         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2103         class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
2104         zcdn_destroy_all();
2105         cdev_del(&zcrypt_cdev);
2106         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2107         class_destroy(zcrypt_class);
2108 }
2109
2110 /*
2111  * zcrypt_api_init(): Module initialization.
2112  *
2113  * The module initialization code.
2114  */
2115 int __init zcrypt_api_init(void)
2116 {
2117         int rc;
2118
2119         rc = zcrypt_debug_init();
2120         if (rc)
2121                 goto out;
2122
2123         rc = zcdn_init();
2124         if (rc)
2125                 goto out;
2126
2127         /* Register the request sprayer. */
2128         rc = misc_register(&zcrypt_misc_device);
2129         if (rc < 0)
2130                 goto out_misc_register_failed;
2131
2132         zcrypt_msgtype6_init();
2133         zcrypt_msgtype50_init();
2134
2135         return 0;
2136
2137 out_misc_register_failed:
2138         zcdn_exit();
2139         zcrypt_debug_exit();
2140 out:
2141         return rc;
2142 }
2143
2144 /*
2145  * zcrypt_api_exit(): Module termination.
2146  *
2147  * The module termination code.
2148  */
2149 void __exit zcrypt_api_exit(void)
2150 {
2151         zcdn_exit();
2152         misc_deregister(&zcrypt_misc_device);
2153         zcrypt_msgtype6_exit();
2154         zcrypt_msgtype50_exit();
2155         zcrypt_ccamisc_exit();
2156         zcrypt_ep11misc_exit();
2157         zcrypt_debug_exit();
2158 }
2159
2160 module_init(zcrypt_api_init);
2161 module_exit(zcrypt_api_exit);