dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node()
[platform/kernel/linux-rpi.git] / drivers / crypto / n2_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3  *
4  * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 #include <crypto/aes.h>
21 #include <crypto/internal/des.h>
22 #include <linux/mutex.h>
23 #include <linux/delay.h>
24 #include <linux/sched.h>
25
26 #include <crypto/internal/hash.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/scatterwalk.h>
29 #include <crypto/algapi.h>
30
31 #include <asm/hypervisor.h>
32 #include <asm/mdesc.h>
33
34 #include "n2_core.h"
35
36 #define DRV_MODULE_NAME         "n2_crypto"
37 #define DRV_MODULE_VERSION      "0.2"
38 #define DRV_MODULE_RELDATE      "July 28, 2011"
39
40 static const char version[] =
41         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42
43 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
44 MODULE_DESCRIPTION("Niagara2 Crypto driver");
45 MODULE_LICENSE("GPL");
46 MODULE_VERSION(DRV_MODULE_VERSION);
47
48 #define N2_CRA_PRIORITY         200
49
50 static DEFINE_MUTEX(spu_lock);
51
52 struct spu_queue {
53         cpumask_t               sharing;
54         unsigned long           qhandle;
55
56         spinlock_t              lock;
57         u8                      q_type;
58         void                    *q;
59         unsigned long           head;
60         unsigned long           tail;
61         struct list_head        jobs;
62
63         unsigned long           devino;
64
65         char                    irq_name[32];
66         unsigned int            irq;
67
68         struct list_head        list;
69 };
70
71 struct spu_qreg {
72         struct spu_queue        *queue;
73         unsigned long           type;
74 };
75
76 static struct spu_queue **cpu_to_cwq;
77 static struct spu_queue **cpu_to_mau;
78
79 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
80 {
81         if (q->q_type == HV_NCS_QTYPE_MAU) {
82                 off += MAU_ENTRY_SIZE;
83                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
84                         off = 0;
85         } else {
86                 off += CWQ_ENTRY_SIZE;
87                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
88                         off = 0;
89         }
90         return off;
91 }
92
93 struct n2_request_common {
94         struct list_head        entry;
95         unsigned int            offset;
96 };
97 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
98
99 /* An async job request records the final tail value it used in
100  * n2_request_common->offset, test to see if that offset is in
101  * the range old_head, new_head, inclusive.
102  */
103 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
104                                 unsigned long old_head, unsigned long new_head)
105 {
106         if (old_head <= new_head) {
107                 if (offset > old_head && offset <= new_head)
108                         return true;
109         } else {
110                 if (offset > old_head || offset <= new_head)
111                         return true;
112         }
113         return false;
114 }
115
116 /* When the HEAD marker is unequal to the actual HEAD, we get
117  * a virtual device INO interrupt.  We should process the
118  * completed CWQ entries and adjust the HEAD marker to clear
119  * the IRQ.
120  */
121 static irqreturn_t cwq_intr(int irq, void *dev_id)
122 {
123         unsigned long off, new_head, hv_ret;
124         struct spu_queue *q = dev_id;
125
126         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
127                smp_processor_id(), q->qhandle);
128
129         spin_lock(&q->lock);
130
131         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
132
133         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
134                smp_processor_id(), new_head, hv_ret);
135
136         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
137                 /* XXX ... XXX */
138         }
139
140         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
141         if (hv_ret == HV_EOK)
142                 q->head = new_head;
143
144         spin_unlock(&q->lock);
145
146         return IRQ_HANDLED;
147 }
148
149 static irqreturn_t mau_intr(int irq, void *dev_id)
150 {
151         struct spu_queue *q = dev_id;
152         unsigned long head, hv_ret;
153
154         spin_lock(&q->lock);
155
156         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
157                smp_processor_id(), q->qhandle);
158
159         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
160
161         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
162                smp_processor_id(), head, hv_ret);
163
164         sun4v_ncs_sethead_marker(q->qhandle, head);
165
166         spin_unlock(&q->lock);
167
168         return IRQ_HANDLED;
169 }
170
171 static void *spu_queue_next(struct spu_queue *q, void *cur)
172 {
173         return q->q + spu_next_offset(q, cur - q->q);
174 }
175
176 static int spu_queue_num_free(struct spu_queue *q)
177 {
178         unsigned long head = q->head;
179         unsigned long tail = q->tail;
180         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
181         unsigned long diff;
182
183         if (head > tail)
184                 diff = head - tail;
185         else
186                 diff = (end - tail) + head;
187
188         return (diff / CWQ_ENTRY_SIZE) - 1;
189 }
190
191 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
192 {
193         int avail = spu_queue_num_free(q);
194
195         if (avail >= num_entries)
196                 return q->q + q->tail;
197
198         return NULL;
199 }
200
201 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
202 {
203         unsigned long hv_ret, new_tail;
204
205         new_tail = spu_next_offset(q, last - q->q);
206
207         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
208         if (hv_ret == HV_EOK)
209                 q->tail = new_tail;
210         return hv_ret;
211 }
212
213 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
214                              int enc_type, int auth_type,
215                              unsigned int hash_len,
216                              bool sfas, bool sob, bool eob, bool encrypt,
217                              int opcode)
218 {
219         u64 word = (len - 1) & CONTROL_LEN;
220
221         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
222         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
223         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
224         if (sfas)
225                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
226         if (sob)
227                 word |= CONTROL_START_OF_BLOCK;
228         if (eob)
229                 word |= CONTROL_END_OF_BLOCK;
230         if (encrypt)
231                 word |= CONTROL_ENCRYPT;
232         if (hmac_key_len)
233                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
234         if (hash_len)
235                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
236
237         return word;
238 }
239
240 #if 0
241 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
242 {
243         if (this_len >= 64 ||
244             qp->head != qp->tail)
245                 return true;
246         return false;
247 }
248 #endif
249
250 struct n2_ahash_alg {
251         struct list_head        entry;
252         const u8                *hash_zero;
253         const u8                *hash_init;
254         u8                      hw_op_hashsz;
255         u8                      digest_size;
256         u8                      auth_type;
257         u8                      hmac_type;
258         struct ahash_alg        alg;
259 };
260
261 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
262 {
263         struct crypto_alg *alg = tfm->__crt_alg;
264         struct ahash_alg *ahash_alg;
265
266         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
267
268         return container_of(ahash_alg, struct n2_ahash_alg, alg);
269 }
270
271 struct n2_hmac_alg {
272         const char              *child_alg;
273         struct n2_ahash_alg     derived;
274 };
275
276 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
277 {
278         struct crypto_alg *alg = tfm->__crt_alg;
279         struct ahash_alg *ahash_alg;
280
281         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
282
283         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
284 }
285
286 struct n2_hash_ctx {
287         struct crypto_ahash             *fallback_tfm;
288 };
289
290 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
291
292 struct n2_hmac_ctx {
293         struct n2_hash_ctx              base;
294
295         struct crypto_shash             *child_shash;
296
297         int                             hash_key_len;
298         unsigned char                   hash_key[N2_HASH_KEY_MAX];
299 };
300
301 struct n2_hash_req_ctx {
302         union {
303                 struct md5_state        md5;
304                 struct sha1_state       sha1;
305                 struct sha256_state     sha256;
306         } u;
307
308         struct ahash_request            fallback_req;
309 };
310
311 static int n2_hash_async_init(struct ahash_request *req)
312 {
313         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
314         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
315         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
316
317         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
318         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
319
320         return crypto_ahash_init(&rctx->fallback_req);
321 }
322
323 static int n2_hash_async_update(struct ahash_request *req)
324 {
325         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
326         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
327         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
328
329         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
330         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
331         rctx->fallback_req.nbytes = req->nbytes;
332         rctx->fallback_req.src = req->src;
333
334         return crypto_ahash_update(&rctx->fallback_req);
335 }
336
337 static int n2_hash_async_final(struct ahash_request *req)
338 {
339         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
340         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
341         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
342
343         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
344         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
345         rctx->fallback_req.result = req->result;
346
347         return crypto_ahash_final(&rctx->fallback_req);
348 }
349
350 static int n2_hash_async_finup(struct ahash_request *req)
351 {
352         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
353         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
354         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
355
356         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
357         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
358         rctx->fallback_req.nbytes = req->nbytes;
359         rctx->fallback_req.src = req->src;
360         rctx->fallback_req.result = req->result;
361
362         return crypto_ahash_finup(&rctx->fallback_req);
363 }
364
365 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
366 {
367         return -ENOSYS;
368 }
369
370 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
371 {
372         return -ENOSYS;
373 }
374
375 static int n2_hash_cra_init(struct crypto_tfm *tfm)
376 {
377         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
378         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
379         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
380         struct crypto_ahash *fallback_tfm;
381         int err;
382
383         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
384                                           CRYPTO_ALG_NEED_FALLBACK);
385         if (IS_ERR(fallback_tfm)) {
386                 pr_warn("Fallback driver '%s' could not be loaded!\n",
387                         fallback_driver_name);
388                 err = PTR_ERR(fallback_tfm);
389                 goto out;
390         }
391
392         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
393                                          crypto_ahash_reqsize(fallback_tfm)));
394
395         ctx->fallback_tfm = fallback_tfm;
396         return 0;
397
398 out:
399         return err;
400 }
401
402 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
403 {
404         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
405         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
406
407         crypto_free_ahash(ctx->fallback_tfm);
408 }
409
410 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
411 {
412         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
413         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
414         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
415         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
416         struct crypto_ahash *fallback_tfm;
417         struct crypto_shash *child_shash;
418         int err;
419
420         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
421                                           CRYPTO_ALG_NEED_FALLBACK);
422         if (IS_ERR(fallback_tfm)) {
423                 pr_warn("Fallback driver '%s' could not be loaded!\n",
424                         fallback_driver_name);
425                 err = PTR_ERR(fallback_tfm);
426                 goto out;
427         }
428
429         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
430         if (IS_ERR(child_shash)) {
431                 pr_warn("Child shash '%s' could not be loaded!\n",
432                         n2alg->child_alg);
433                 err = PTR_ERR(child_shash);
434                 goto out_free_fallback;
435         }
436
437         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
438                                          crypto_ahash_reqsize(fallback_tfm)));
439
440         ctx->child_shash = child_shash;
441         ctx->base.fallback_tfm = fallback_tfm;
442         return 0;
443
444 out_free_fallback:
445         crypto_free_ahash(fallback_tfm);
446
447 out:
448         return err;
449 }
450
451 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
452 {
453         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
454         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
455
456         crypto_free_ahash(ctx->base.fallback_tfm);
457         crypto_free_shash(ctx->child_shash);
458 }
459
460 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
461                                 unsigned int keylen)
462 {
463         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
464         struct crypto_shash *child_shash = ctx->child_shash;
465         struct crypto_ahash *fallback_tfm;
466         int err, bs, ds;
467
468         fallback_tfm = ctx->base.fallback_tfm;
469         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
470         if (err)
471                 return err;
472
473         bs = crypto_shash_blocksize(child_shash);
474         ds = crypto_shash_digestsize(child_shash);
475         BUG_ON(ds > N2_HASH_KEY_MAX);
476         if (keylen > bs) {
477                 err = crypto_shash_tfm_digest(child_shash, key, keylen,
478                                               ctx->hash_key);
479                 if (err)
480                         return err;
481                 keylen = ds;
482         } else if (keylen <= N2_HASH_KEY_MAX)
483                 memcpy(ctx->hash_key, key, keylen);
484
485         ctx->hash_key_len = keylen;
486
487         return err;
488 }
489
490 static unsigned long wait_for_tail(struct spu_queue *qp)
491 {
492         unsigned long head, hv_ret;
493
494         do {
495                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
496                 if (hv_ret != HV_EOK) {
497                         pr_err("Hypervisor error on gethead\n");
498                         break;
499                 }
500                 if (head == qp->tail) {
501                         qp->head = head;
502                         break;
503                 }
504         } while (1);
505         return hv_ret;
506 }
507
508 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
509                                               struct cwq_initial_entry *ent)
510 {
511         unsigned long hv_ret = spu_queue_submit(qp, ent);
512
513         if (hv_ret == HV_EOK)
514                 hv_ret = wait_for_tail(qp);
515
516         return hv_ret;
517 }
518
519 static int n2_do_async_digest(struct ahash_request *req,
520                               unsigned int auth_type, unsigned int digest_size,
521                               unsigned int result_size, void *hash_loc,
522                               unsigned long auth_key, unsigned int auth_key_len)
523 {
524         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
525         struct cwq_initial_entry *ent;
526         struct crypto_hash_walk walk;
527         struct spu_queue *qp;
528         unsigned long flags;
529         int err = -ENODEV;
530         int nbytes, cpu;
531
532         /* The total effective length of the operation may not
533          * exceed 2^16.
534          */
535         if (unlikely(req->nbytes > (1 << 16))) {
536                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
537                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
538
539                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
540                 rctx->fallback_req.base.flags =
541                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
542                 rctx->fallback_req.nbytes = req->nbytes;
543                 rctx->fallback_req.src = req->src;
544                 rctx->fallback_req.result = req->result;
545
546                 return crypto_ahash_digest(&rctx->fallback_req);
547         }
548
549         nbytes = crypto_hash_walk_first(req, &walk);
550
551         cpu = get_cpu();
552         qp = cpu_to_cwq[cpu];
553         if (!qp)
554                 goto out;
555
556         spin_lock_irqsave(&qp->lock, flags);
557
558         /* XXX can do better, improve this later by doing a by-hand scatterlist
559          * XXX walk, etc.
560          */
561         ent = qp->q + qp->tail;
562
563         ent->control = control_word_base(nbytes, auth_key_len, 0,
564                                          auth_type, digest_size,
565                                          false, true, false, false,
566                                          OPCODE_INPLACE_BIT |
567                                          OPCODE_AUTH_MAC);
568         ent->src_addr = __pa(walk.data);
569         ent->auth_key_addr = auth_key;
570         ent->auth_iv_addr = __pa(hash_loc);
571         ent->final_auth_state_addr = 0UL;
572         ent->enc_key_addr = 0UL;
573         ent->enc_iv_addr = 0UL;
574         ent->dest_addr = __pa(hash_loc);
575
576         nbytes = crypto_hash_walk_done(&walk, 0);
577         while (nbytes > 0) {
578                 ent = spu_queue_next(qp, ent);
579
580                 ent->control = (nbytes - 1);
581                 ent->src_addr = __pa(walk.data);
582                 ent->auth_key_addr = 0UL;
583                 ent->auth_iv_addr = 0UL;
584                 ent->final_auth_state_addr = 0UL;
585                 ent->enc_key_addr = 0UL;
586                 ent->enc_iv_addr = 0UL;
587                 ent->dest_addr = 0UL;
588
589                 nbytes = crypto_hash_walk_done(&walk, 0);
590         }
591         ent->control |= CONTROL_END_OF_BLOCK;
592
593         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
594                 err = -EINVAL;
595         else
596                 err = 0;
597
598         spin_unlock_irqrestore(&qp->lock, flags);
599
600         if (!err)
601                 memcpy(req->result, hash_loc, result_size);
602 out:
603         put_cpu();
604
605         return err;
606 }
607
608 static int n2_hash_async_digest(struct ahash_request *req)
609 {
610         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
611         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
612         int ds;
613
614         ds = n2alg->digest_size;
615         if (unlikely(req->nbytes == 0)) {
616                 memcpy(req->result, n2alg->hash_zero, ds);
617                 return 0;
618         }
619         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
620
621         return n2_do_async_digest(req, n2alg->auth_type,
622                                   n2alg->hw_op_hashsz, ds,
623                                   &rctx->u, 0UL, 0);
624 }
625
626 static int n2_hmac_async_digest(struct ahash_request *req)
627 {
628         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
629         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
630         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
631         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
632         int ds;
633
634         ds = n2alg->derived.digest_size;
635         if (unlikely(req->nbytes == 0) ||
636             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
637                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
638                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
639
640                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
641                 rctx->fallback_req.base.flags =
642                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
643                 rctx->fallback_req.nbytes = req->nbytes;
644                 rctx->fallback_req.src = req->src;
645                 rctx->fallback_req.result = req->result;
646
647                 return crypto_ahash_digest(&rctx->fallback_req);
648         }
649         memcpy(&rctx->u, n2alg->derived.hash_init,
650                n2alg->derived.hw_op_hashsz);
651
652         return n2_do_async_digest(req, n2alg->derived.hmac_type,
653                                   n2alg->derived.hw_op_hashsz, ds,
654                                   &rctx->u,
655                                   __pa(&ctx->hash_key),
656                                   ctx->hash_key_len);
657 }
658
659 struct n2_skcipher_context {
660         int                     key_len;
661         int                     enc_type;
662         union {
663                 u8              aes[AES_MAX_KEY_SIZE];
664                 u8              des[DES_KEY_SIZE];
665                 u8              des3[3 * DES_KEY_SIZE];
666         } key;
667 };
668
669 #define N2_CHUNK_ARR_LEN        16
670
671 struct n2_crypto_chunk {
672         struct list_head        entry;
673         unsigned long           iv_paddr : 44;
674         unsigned long           arr_len : 20;
675         unsigned long           dest_paddr;
676         unsigned long           dest_final;
677         struct {
678                 unsigned long   src_paddr : 44;
679                 unsigned long   src_len : 20;
680         } arr[N2_CHUNK_ARR_LEN];
681 };
682
683 struct n2_request_context {
684         struct skcipher_walk    walk;
685         struct list_head        chunk_list;
686         struct n2_crypto_chunk  chunk;
687         u8                      temp_iv[16];
688 };
689
690 /* The SPU allows some level of flexibility for partial cipher blocks
691  * being specified in a descriptor.
692  *
693  * It merely requires that every descriptor's length field is at least
694  * as large as the cipher block size.  This means that a cipher block
695  * can span at most 2 descriptors.  However, this does not allow a
696  * partial block to span into the final descriptor as that would
697  * violate the rule (since every descriptor's length must be at lest
698  * the block size).  So, for example, assuming an 8 byte block size:
699  *
700  *      0xe --> 0xa --> 0x8
701  *
702  * is a valid length sequence, whereas:
703  *
704  *      0xe --> 0xb --> 0x7
705  *
706  * is not a valid sequence.
707  */
708
709 struct n2_skcipher_alg {
710         struct list_head        entry;
711         u8                      enc_type;
712         struct skcipher_alg     skcipher;
713 };
714
715 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
716 {
717         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
718
719         return container_of(alg, struct n2_skcipher_alg, skcipher);
720 }
721
722 struct n2_skcipher_request_context {
723         struct skcipher_walk    walk;
724 };
725
726 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
727                          unsigned int keylen)
728 {
729         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
730         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
731         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
732
733         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
734
735         switch (keylen) {
736         case AES_KEYSIZE_128:
737                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
738                 break;
739         case AES_KEYSIZE_192:
740                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
741                 break;
742         case AES_KEYSIZE_256:
743                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
744                 break;
745         default:
746                 return -EINVAL;
747         }
748
749         ctx->key_len = keylen;
750         memcpy(ctx->key.aes, key, keylen);
751         return 0;
752 }
753
754 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
755                          unsigned int keylen)
756 {
757         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
758         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
759         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
760         int err;
761
762         err = verify_skcipher_des_key(skcipher, key);
763         if (err)
764                 return err;
765
766         ctx->enc_type = n2alg->enc_type;
767
768         ctx->key_len = keylen;
769         memcpy(ctx->key.des, key, keylen);
770         return 0;
771 }
772
773 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
774                           unsigned int keylen)
775 {
776         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
777         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
778         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
779         int err;
780
781         err = verify_skcipher_des3_key(skcipher, key);
782         if (err)
783                 return err;
784
785         ctx->enc_type = n2alg->enc_type;
786
787         ctx->key_len = keylen;
788         memcpy(ctx->key.des3, key, keylen);
789         return 0;
790 }
791
792 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
793 {
794         int this_len = nbytes;
795
796         this_len -= (nbytes & (block_size - 1));
797         return this_len > (1 << 16) ? (1 << 16) : this_len;
798 }
799
800 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
801                             struct n2_crypto_chunk *cp,
802                             struct spu_queue *qp, bool encrypt)
803 {
804         struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
805         struct cwq_initial_entry *ent;
806         bool in_place;
807         int i;
808
809         ent = spu_queue_alloc(qp, cp->arr_len);
810         if (!ent) {
811                 pr_info("queue_alloc() of %d fails\n",
812                         cp->arr_len);
813                 return -EBUSY;
814         }
815
816         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
817
818         ent->control = control_word_base(cp->arr[0].src_len,
819                                          0, ctx->enc_type, 0, 0,
820                                          false, true, false, encrypt,
821                                          OPCODE_ENCRYPT |
822                                          (in_place ? OPCODE_INPLACE_BIT : 0));
823         ent->src_addr = cp->arr[0].src_paddr;
824         ent->auth_key_addr = 0UL;
825         ent->auth_iv_addr = 0UL;
826         ent->final_auth_state_addr = 0UL;
827         ent->enc_key_addr = __pa(&ctx->key);
828         ent->enc_iv_addr = cp->iv_paddr;
829         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
830
831         for (i = 1; i < cp->arr_len; i++) {
832                 ent = spu_queue_next(qp, ent);
833
834                 ent->control = cp->arr[i].src_len - 1;
835                 ent->src_addr = cp->arr[i].src_paddr;
836                 ent->auth_key_addr = 0UL;
837                 ent->auth_iv_addr = 0UL;
838                 ent->final_auth_state_addr = 0UL;
839                 ent->enc_key_addr = 0UL;
840                 ent->enc_iv_addr = 0UL;
841                 ent->dest_addr = 0UL;
842         }
843         ent->control |= CONTROL_END_OF_BLOCK;
844
845         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
846 }
847
848 static int n2_compute_chunks(struct skcipher_request *req)
849 {
850         struct n2_request_context *rctx = skcipher_request_ctx(req);
851         struct skcipher_walk *walk = &rctx->walk;
852         struct n2_crypto_chunk *chunk;
853         unsigned long dest_prev;
854         unsigned int tot_len;
855         bool prev_in_place;
856         int err, nbytes;
857
858         err = skcipher_walk_async(walk, req);
859         if (err)
860                 return err;
861
862         INIT_LIST_HEAD(&rctx->chunk_list);
863
864         chunk = &rctx->chunk;
865         INIT_LIST_HEAD(&chunk->entry);
866
867         chunk->iv_paddr = 0UL;
868         chunk->arr_len = 0;
869         chunk->dest_paddr = 0UL;
870
871         prev_in_place = false;
872         dest_prev = ~0UL;
873         tot_len = 0;
874
875         while ((nbytes = walk->nbytes) != 0) {
876                 unsigned long dest_paddr, src_paddr;
877                 bool in_place;
878                 int this_len;
879
880                 src_paddr = (page_to_phys(walk->src.phys.page) +
881                              walk->src.phys.offset);
882                 dest_paddr = (page_to_phys(walk->dst.phys.page) +
883                               walk->dst.phys.offset);
884                 in_place = (src_paddr == dest_paddr);
885                 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
886
887                 if (chunk->arr_len != 0) {
888                         if (in_place != prev_in_place ||
889                             (!prev_in_place &&
890                              dest_paddr != dest_prev) ||
891                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
892                             tot_len + this_len > (1 << 16)) {
893                                 chunk->dest_final = dest_prev;
894                                 list_add_tail(&chunk->entry,
895                                               &rctx->chunk_list);
896                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
897                                 if (!chunk) {
898                                         err = -ENOMEM;
899                                         break;
900                                 }
901                                 INIT_LIST_HEAD(&chunk->entry);
902                         }
903                 }
904                 if (chunk->arr_len == 0) {
905                         chunk->dest_paddr = dest_paddr;
906                         tot_len = 0;
907                 }
908                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
909                 chunk->arr[chunk->arr_len].src_len = this_len;
910                 chunk->arr_len++;
911
912                 dest_prev = dest_paddr + this_len;
913                 prev_in_place = in_place;
914                 tot_len += this_len;
915
916                 err = skcipher_walk_done(walk, nbytes - this_len);
917                 if (err)
918                         break;
919         }
920         if (!err && chunk->arr_len != 0) {
921                 chunk->dest_final = dest_prev;
922                 list_add_tail(&chunk->entry, &rctx->chunk_list);
923         }
924
925         return err;
926 }
927
928 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
929 {
930         struct n2_request_context *rctx = skcipher_request_ctx(req);
931         struct n2_crypto_chunk *c, *tmp;
932
933         if (final_iv)
934                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
935
936         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
937                 list_del(&c->entry);
938                 if (unlikely(c != &rctx->chunk))
939                         kfree(c);
940         }
941
942 }
943
944 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
945 {
946         struct n2_request_context *rctx = skcipher_request_ctx(req);
947         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
948         int err = n2_compute_chunks(req);
949         struct n2_crypto_chunk *c, *tmp;
950         unsigned long flags, hv_ret;
951         struct spu_queue *qp;
952
953         if (err)
954                 return err;
955
956         qp = cpu_to_cwq[get_cpu()];
957         err = -ENODEV;
958         if (!qp)
959                 goto out;
960
961         spin_lock_irqsave(&qp->lock, flags);
962
963         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
964                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
965                 if (err)
966                         break;
967                 list_del(&c->entry);
968                 if (unlikely(c != &rctx->chunk))
969                         kfree(c);
970         }
971         if (!err) {
972                 hv_ret = wait_for_tail(qp);
973                 if (hv_ret != HV_EOK)
974                         err = -EINVAL;
975         }
976
977         spin_unlock_irqrestore(&qp->lock, flags);
978
979 out:
980         put_cpu();
981
982         n2_chunk_complete(req, NULL);
983         return err;
984 }
985
986 static int n2_encrypt_ecb(struct skcipher_request *req)
987 {
988         return n2_do_ecb(req, true);
989 }
990
991 static int n2_decrypt_ecb(struct skcipher_request *req)
992 {
993         return n2_do_ecb(req, false);
994 }
995
996 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
997 {
998         struct n2_request_context *rctx = skcipher_request_ctx(req);
999         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1000         unsigned long flags, hv_ret, iv_paddr;
1001         int err = n2_compute_chunks(req);
1002         struct n2_crypto_chunk *c, *tmp;
1003         struct spu_queue *qp;
1004         void *final_iv_addr;
1005
1006         final_iv_addr = NULL;
1007
1008         if (err)
1009                 return err;
1010
1011         qp = cpu_to_cwq[get_cpu()];
1012         err = -ENODEV;
1013         if (!qp)
1014                 goto out;
1015
1016         spin_lock_irqsave(&qp->lock, flags);
1017
1018         if (encrypt) {
1019                 iv_paddr = __pa(rctx->walk.iv);
1020                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1021                                          entry) {
1022                         c->iv_paddr = iv_paddr;
1023                         err = __n2_crypt_chunk(tfm, c, qp, true);
1024                         if (err)
1025                                 break;
1026                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1027                         list_del(&c->entry);
1028                         if (unlikely(c != &rctx->chunk))
1029                                 kfree(c);
1030                 }
1031                 final_iv_addr = __va(iv_paddr);
1032         } else {
1033                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1034                                                  entry) {
1035                         if (c == &rctx->chunk) {
1036                                 iv_paddr = __pa(rctx->walk.iv);
1037                         } else {
1038                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1039                                             tmp->arr[tmp->arr_len-1].src_len -
1040                                             rctx->walk.blocksize);
1041                         }
1042                         if (!final_iv_addr) {
1043                                 unsigned long pa;
1044
1045                                 pa = (c->arr[c->arr_len-1].src_paddr +
1046                                       c->arr[c->arr_len-1].src_len -
1047                                       rctx->walk.blocksize);
1048                                 final_iv_addr = rctx->temp_iv;
1049                                 memcpy(rctx->temp_iv, __va(pa),
1050                                        rctx->walk.blocksize);
1051                         }
1052                         c->iv_paddr = iv_paddr;
1053                         err = __n2_crypt_chunk(tfm, c, qp, false);
1054                         if (err)
1055                                 break;
1056                         list_del(&c->entry);
1057                         if (unlikely(c != &rctx->chunk))
1058                                 kfree(c);
1059                 }
1060         }
1061         if (!err) {
1062                 hv_ret = wait_for_tail(qp);
1063                 if (hv_ret != HV_EOK)
1064                         err = -EINVAL;
1065         }
1066
1067         spin_unlock_irqrestore(&qp->lock, flags);
1068
1069 out:
1070         put_cpu();
1071
1072         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1073         return err;
1074 }
1075
1076 static int n2_encrypt_chaining(struct skcipher_request *req)
1077 {
1078         return n2_do_chaining(req, true);
1079 }
1080
1081 static int n2_decrypt_chaining(struct skcipher_request *req)
1082 {
1083         return n2_do_chaining(req, false);
1084 }
1085
1086 struct n2_skcipher_tmpl {
1087         const char              *name;
1088         const char              *drv_name;
1089         u8                      block_size;
1090         u8                      enc_type;
1091         struct skcipher_alg     skcipher;
1092 };
1093
1094 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1095         /* DES: ECB CBC and CFB are supported */
1096         {       .name           = "ecb(des)",
1097                 .drv_name       = "ecb-des",
1098                 .block_size     = DES_BLOCK_SIZE,
1099                 .enc_type       = (ENC_TYPE_ALG_DES |
1100                                    ENC_TYPE_CHAINING_ECB),
1101                 .skcipher       = {
1102                         .min_keysize    = DES_KEY_SIZE,
1103                         .max_keysize    = DES_KEY_SIZE,
1104                         .setkey         = n2_des_setkey,
1105                         .encrypt        = n2_encrypt_ecb,
1106                         .decrypt        = n2_decrypt_ecb,
1107                 },
1108         },
1109         {       .name           = "cbc(des)",
1110                 .drv_name       = "cbc-des",
1111                 .block_size     = DES_BLOCK_SIZE,
1112                 .enc_type       = (ENC_TYPE_ALG_DES |
1113                                    ENC_TYPE_CHAINING_CBC),
1114                 .skcipher       = {
1115                         .ivsize         = DES_BLOCK_SIZE,
1116                         .min_keysize    = DES_KEY_SIZE,
1117                         .max_keysize    = DES_KEY_SIZE,
1118                         .setkey         = n2_des_setkey,
1119                         .encrypt        = n2_encrypt_chaining,
1120                         .decrypt        = n2_decrypt_chaining,
1121                 },
1122         },
1123         {       .name           = "cfb(des)",
1124                 .drv_name       = "cfb-des",
1125                 .block_size     = DES_BLOCK_SIZE,
1126                 .enc_type       = (ENC_TYPE_ALG_DES |
1127                                    ENC_TYPE_CHAINING_CFB),
1128                 .skcipher       = {
1129                         .min_keysize    = DES_KEY_SIZE,
1130                         .max_keysize    = DES_KEY_SIZE,
1131                         .setkey         = n2_des_setkey,
1132                         .encrypt        = n2_encrypt_chaining,
1133                         .decrypt        = n2_decrypt_chaining,
1134                 },
1135         },
1136
1137         /* 3DES: ECB CBC and CFB are supported */
1138         {       .name           = "ecb(des3_ede)",
1139                 .drv_name       = "ecb-3des",
1140                 .block_size     = DES_BLOCK_SIZE,
1141                 .enc_type       = (ENC_TYPE_ALG_3DES |
1142                                    ENC_TYPE_CHAINING_ECB),
1143                 .skcipher       = {
1144                         .min_keysize    = 3 * DES_KEY_SIZE,
1145                         .max_keysize    = 3 * DES_KEY_SIZE,
1146                         .setkey         = n2_3des_setkey,
1147                         .encrypt        = n2_encrypt_ecb,
1148                         .decrypt        = n2_decrypt_ecb,
1149                 },
1150         },
1151         {       .name           = "cbc(des3_ede)",
1152                 .drv_name       = "cbc-3des",
1153                 .block_size     = DES_BLOCK_SIZE,
1154                 .enc_type       = (ENC_TYPE_ALG_3DES |
1155                                    ENC_TYPE_CHAINING_CBC),
1156                 .skcipher       = {
1157                         .ivsize         = DES_BLOCK_SIZE,
1158                         .min_keysize    = 3 * DES_KEY_SIZE,
1159                         .max_keysize    = 3 * DES_KEY_SIZE,
1160                         .setkey         = n2_3des_setkey,
1161                         .encrypt        = n2_encrypt_chaining,
1162                         .decrypt        = n2_decrypt_chaining,
1163                 },
1164         },
1165         {       .name           = "cfb(des3_ede)",
1166                 .drv_name       = "cfb-3des",
1167                 .block_size     = DES_BLOCK_SIZE,
1168                 .enc_type       = (ENC_TYPE_ALG_3DES |
1169                                    ENC_TYPE_CHAINING_CFB),
1170                 .skcipher       = {
1171                         .min_keysize    = 3 * DES_KEY_SIZE,
1172                         .max_keysize    = 3 * DES_KEY_SIZE,
1173                         .setkey         = n2_3des_setkey,
1174                         .encrypt        = n2_encrypt_chaining,
1175                         .decrypt        = n2_decrypt_chaining,
1176                 },
1177         },
1178         /* AES: ECB CBC and CTR are supported */
1179         {       .name           = "ecb(aes)",
1180                 .drv_name       = "ecb-aes",
1181                 .block_size     = AES_BLOCK_SIZE,
1182                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1183                                    ENC_TYPE_CHAINING_ECB),
1184                 .skcipher       = {
1185                         .min_keysize    = AES_MIN_KEY_SIZE,
1186                         .max_keysize    = AES_MAX_KEY_SIZE,
1187                         .setkey         = n2_aes_setkey,
1188                         .encrypt        = n2_encrypt_ecb,
1189                         .decrypt        = n2_decrypt_ecb,
1190                 },
1191         },
1192         {       .name           = "cbc(aes)",
1193                 .drv_name       = "cbc-aes",
1194                 .block_size     = AES_BLOCK_SIZE,
1195                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1196                                    ENC_TYPE_CHAINING_CBC),
1197                 .skcipher       = {
1198                         .ivsize         = AES_BLOCK_SIZE,
1199                         .min_keysize    = AES_MIN_KEY_SIZE,
1200                         .max_keysize    = AES_MAX_KEY_SIZE,
1201                         .setkey         = n2_aes_setkey,
1202                         .encrypt        = n2_encrypt_chaining,
1203                         .decrypt        = n2_decrypt_chaining,
1204                 },
1205         },
1206         {       .name           = "ctr(aes)",
1207                 .drv_name       = "ctr-aes",
1208                 .block_size     = AES_BLOCK_SIZE,
1209                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1210                                    ENC_TYPE_CHAINING_COUNTER),
1211                 .skcipher       = {
1212                         .ivsize         = AES_BLOCK_SIZE,
1213                         .min_keysize    = AES_MIN_KEY_SIZE,
1214                         .max_keysize    = AES_MAX_KEY_SIZE,
1215                         .setkey         = n2_aes_setkey,
1216                         .encrypt        = n2_encrypt_chaining,
1217                         .decrypt        = n2_encrypt_chaining,
1218                 },
1219         },
1220
1221 };
1222 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1223
1224 static LIST_HEAD(skcipher_algs);
1225
1226 struct n2_hash_tmpl {
1227         const char      *name;
1228         const u8        *hash_zero;
1229         const u8        *hash_init;
1230         u8              hw_op_hashsz;
1231         u8              digest_size;
1232         u8              statesize;
1233         u8              block_size;
1234         u8              auth_type;
1235         u8              hmac_type;
1236 };
1237
1238 static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1239         cpu_to_le32(MD5_H0),
1240         cpu_to_le32(MD5_H1),
1241         cpu_to_le32(MD5_H2),
1242         cpu_to_le32(MD5_H3),
1243 };
1244 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1245         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1246 };
1247 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1248         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1249         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1250 };
1251 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1252         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1253         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1254 };
1255
1256 static const struct n2_hash_tmpl hash_tmpls[] = {
1257         { .name         = "md5",
1258           .hash_zero    = md5_zero_message_hash,
1259           .hash_init    = (u8 *)n2_md5_init,
1260           .auth_type    = AUTH_TYPE_MD5,
1261           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1262           .hw_op_hashsz = MD5_DIGEST_SIZE,
1263           .digest_size  = MD5_DIGEST_SIZE,
1264           .statesize    = sizeof(struct md5_state),
1265           .block_size   = MD5_HMAC_BLOCK_SIZE },
1266         { .name         = "sha1",
1267           .hash_zero    = sha1_zero_message_hash,
1268           .hash_init    = (u8 *)n2_sha1_init,
1269           .auth_type    = AUTH_TYPE_SHA1,
1270           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1271           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1272           .digest_size  = SHA1_DIGEST_SIZE,
1273           .statesize    = sizeof(struct sha1_state),
1274           .block_size   = SHA1_BLOCK_SIZE },
1275         { .name         = "sha256",
1276           .hash_zero    = sha256_zero_message_hash,
1277           .hash_init    = (u8 *)n2_sha256_init,
1278           .auth_type    = AUTH_TYPE_SHA256,
1279           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1280           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1281           .digest_size  = SHA256_DIGEST_SIZE,
1282           .statesize    = sizeof(struct sha256_state),
1283           .block_size   = SHA256_BLOCK_SIZE },
1284         { .name         = "sha224",
1285           .hash_zero    = sha224_zero_message_hash,
1286           .hash_init    = (u8 *)n2_sha224_init,
1287           .auth_type    = AUTH_TYPE_SHA256,
1288           .hmac_type    = AUTH_TYPE_RESERVED,
1289           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1290           .digest_size  = SHA224_DIGEST_SIZE,
1291           .statesize    = sizeof(struct sha256_state),
1292           .block_size   = SHA224_BLOCK_SIZE },
1293 };
1294 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1295
1296 static LIST_HEAD(ahash_algs);
1297 static LIST_HEAD(hmac_algs);
1298
1299 static int algs_registered;
1300
1301 static void __n2_unregister_algs(void)
1302 {
1303         struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1304         struct n2_ahash_alg *alg, *alg_tmp;
1305         struct n2_hmac_alg *hmac, *hmac_tmp;
1306
1307         list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1308                 crypto_unregister_skcipher(&skcipher->skcipher);
1309                 list_del(&skcipher->entry);
1310                 kfree(skcipher);
1311         }
1312         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1313                 crypto_unregister_ahash(&hmac->derived.alg);
1314                 list_del(&hmac->derived.entry);
1315                 kfree(hmac);
1316         }
1317         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1318                 crypto_unregister_ahash(&alg->alg);
1319                 list_del(&alg->entry);
1320                 kfree(alg);
1321         }
1322 }
1323
1324 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1325 {
1326         crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1327         return 0;
1328 }
1329
1330 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1331 {
1332         struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1333         struct skcipher_alg *alg;
1334         int err;
1335
1336         if (!p)
1337                 return -ENOMEM;
1338
1339         alg = &p->skcipher;
1340         *alg = tmpl->skcipher;
1341
1342         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1343         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1344         alg->base.cra_priority = N2_CRA_PRIORITY;
1345         alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1346                               CRYPTO_ALG_ALLOCATES_MEMORY;
1347         alg->base.cra_blocksize = tmpl->block_size;
1348         p->enc_type = tmpl->enc_type;
1349         alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1350         alg->base.cra_module = THIS_MODULE;
1351         alg->init = n2_skcipher_init_tfm;
1352
1353         list_add(&p->entry, &skcipher_algs);
1354         err = crypto_register_skcipher(alg);
1355         if (err) {
1356                 pr_err("%s alg registration failed\n", alg->base.cra_name);
1357                 list_del(&p->entry);
1358                 kfree(p);
1359         } else {
1360                 pr_info("%s alg registered\n", alg->base.cra_name);
1361         }
1362         return err;
1363 }
1364
1365 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1366 {
1367         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1368         struct ahash_alg *ahash;
1369         struct crypto_alg *base;
1370         int err;
1371
1372         if (!p)
1373                 return -ENOMEM;
1374
1375         p->child_alg = n2ahash->alg.halg.base.cra_name;
1376         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1377         INIT_LIST_HEAD(&p->derived.entry);
1378
1379         ahash = &p->derived.alg;
1380         ahash->digest = n2_hmac_async_digest;
1381         ahash->setkey = n2_hmac_async_setkey;
1382
1383         base = &ahash->halg.base;
1384         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1385         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1386
1387         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1388         base->cra_init = n2_hmac_cra_init;
1389         base->cra_exit = n2_hmac_cra_exit;
1390
1391         list_add(&p->derived.entry, &hmac_algs);
1392         err = crypto_register_ahash(ahash);
1393         if (err) {
1394                 pr_err("%s alg registration failed\n", base->cra_name);
1395                 list_del(&p->derived.entry);
1396                 kfree(p);
1397         } else {
1398                 pr_info("%s alg registered\n", base->cra_name);
1399         }
1400         return err;
1401 }
1402
1403 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1404 {
1405         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1406         struct hash_alg_common *halg;
1407         struct crypto_alg *base;
1408         struct ahash_alg *ahash;
1409         int err;
1410
1411         if (!p)
1412                 return -ENOMEM;
1413
1414         p->hash_zero = tmpl->hash_zero;
1415         p->hash_init = tmpl->hash_init;
1416         p->auth_type = tmpl->auth_type;
1417         p->hmac_type = tmpl->hmac_type;
1418         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1419         p->digest_size = tmpl->digest_size;
1420
1421         ahash = &p->alg;
1422         ahash->init = n2_hash_async_init;
1423         ahash->update = n2_hash_async_update;
1424         ahash->final = n2_hash_async_final;
1425         ahash->finup = n2_hash_async_finup;
1426         ahash->digest = n2_hash_async_digest;
1427         ahash->export = n2_hash_async_noexport;
1428         ahash->import = n2_hash_async_noimport;
1429
1430         halg = &ahash->halg;
1431         halg->digestsize = tmpl->digest_size;
1432         halg->statesize = tmpl->statesize;
1433
1434         base = &halg->base;
1435         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1436         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1437         base->cra_priority = N2_CRA_PRIORITY;
1438         base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1439                           CRYPTO_ALG_NEED_FALLBACK;
1440         base->cra_blocksize = tmpl->block_size;
1441         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1442         base->cra_module = THIS_MODULE;
1443         base->cra_init = n2_hash_cra_init;
1444         base->cra_exit = n2_hash_cra_exit;
1445
1446         list_add(&p->entry, &ahash_algs);
1447         err = crypto_register_ahash(ahash);
1448         if (err) {
1449                 pr_err("%s alg registration failed\n", base->cra_name);
1450                 list_del(&p->entry);
1451                 kfree(p);
1452         } else {
1453                 pr_info("%s alg registered\n", base->cra_name);
1454         }
1455         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1456                 err = __n2_register_one_hmac(p);
1457         return err;
1458 }
1459
1460 static int n2_register_algs(void)
1461 {
1462         int i, err = 0;
1463
1464         mutex_lock(&spu_lock);
1465         if (algs_registered++)
1466                 goto out;
1467
1468         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1469                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1470                 if (err) {
1471                         __n2_unregister_algs();
1472                         goto out;
1473                 }
1474         }
1475         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1476                 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1477                 if (err) {
1478                         __n2_unregister_algs();
1479                         goto out;
1480                 }
1481         }
1482
1483 out:
1484         mutex_unlock(&spu_lock);
1485         return err;
1486 }
1487
1488 static void n2_unregister_algs(void)
1489 {
1490         mutex_lock(&spu_lock);
1491         if (!--algs_registered)
1492                 __n2_unregister_algs();
1493         mutex_unlock(&spu_lock);
1494 }
1495
1496 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1497  * a devino.  This isn't very useful to us because all of the
1498  * interrupts listed in the device_node have been translated to
1499  * Linux virtual IRQ cookie numbers.
1500  *
1501  * So we have to back-translate, going through the 'intr' and 'ino'
1502  * property tables of the n2cp MDESC node, matching it with the OF
1503  * 'interrupts' property entries, in order to to figure out which
1504  * devino goes to which already-translated IRQ.
1505  */
1506 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1507                              unsigned long dev_ino)
1508 {
1509         const unsigned int *dev_intrs;
1510         unsigned int intr;
1511         int i;
1512
1513         for (i = 0; i < ip->num_intrs; i++) {
1514                 if (ip->ino_table[i].ino == dev_ino)
1515                         break;
1516         }
1517         if (i == ip->num_intrs)
1518                 return -ENODEV;
1519
1520         intr = ip->ino_table[i].intr;
1521
1522         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1523         if (!dev_intrs)
1524                 return -ENODEV;
1525
1526         for (i = 0; i < dev->archdata.num_irqs; i++) {
1527                 if (dev_intrs[i] == intr)
1528                         return i;
1529         }
1530
1531         return -ENODEV;
1532 }
1533
1534 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1535                        const char *irq_name, struct spu_queue *p,
1536                        irq_handler_t handler)
1537 {
1538         unsigned long herr;
1539         int index;
1540
1541         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1542         if (herr)
1543                 return -EINVAL;
1544
1545         index = find_devino_index(dev, ip, p->devino);
1546         if (index < 0)
1547                 return index;
1548
1549         p->irq = dev->archdata.irqs[index];
1550
1551         sprintf(p->irq_name, "%s-%d", irq_name, index);
1552
1553         return request_irq(p->irq, handler, 0, p->irq_name, p);
1554 }
1555
1556 static struct kmem_cache *queue_cache[2];
1557
1558 static void *new_queue(unsigned long q_type)
1559 {
1560         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1561 }
1562
1563 static void free_queue(void *p, unsigned long q_type)
1564 {
1565         kmem_cache_free(queue_cache[q_type - 1], p);
1566 }
1567
1568 static int queue_cache_init(void)
1569 {
1570         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1571                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1572                         kmem_cache_create("mau_queue",
1573                                           (MAU_NUM_ENTRIES *
1574                                            MAU_ENTRY_SIZE),
1575                                           MAU_ENTRY_SIZE, 0, NULL);
1576         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1577                 return -ENOMEM;
1578
1579         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1580                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1581                         kmem_cache_create("cwq_queue",
1582                                           (CWQ_NUM_ENTRIES *
1583                                            CWQ_ENTRY_SIZE),
1584                                           CWQ_ENTRY_SIZE, 0, NULL);
1585         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1586                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1587                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1588                 return -ENOMEM;
1589         }
1590         return 0;
1591 }
1592
1593 static void queue_cache_destroy(void)
1594 {
1595         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1596         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1597         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1598         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1599 }
1600
1601 static long spu_queue_register_workfn(void *arg)
1602 {
1603         struct spu_qreg *qr = arg;
1604         struct spu_queue *p = qr->queue;
1605         unsigned long q_type = qr->type;
1606         unsigned long hv_ret;
1607
1608         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1609                                  CWQ_NUM_ENTRIES, &p->qhandle);
1610         if (!hv_ret)
1611                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1612
1613         return hv_ret ? -EINVAL : 0;
1614 }
1615
1616 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1617 {
1618         int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1619         struct spu_qreg qr = { .queue = p, .type = q_type };
1620
1621         return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1622 }
1623
1624 static int spu_queue_setup(struct spu_queue *p)
1625 {
1626         int err;
1627
1628         p->q = new_queue(p->q_type);
1629         if (!p->q)
1630                 return -ENOMEM;
1631
1632         err = spu_queue_register(p, p->q_type);
1633         if (err) {
1634                 free_queue(p->q, p->q_type);
1635                 p->q = NULL;
1636         }
1637
1638         return err;
1639 }
1640
1641 static void spu_queue_destroy(struct spu_queue *p)
1642 {
1643         unsigned long hv_ret;
1644
1645         if (!p->q)
1646                 return;
1647
1648         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1649
1650         if (!hv_ret)
1651                 free_queue(p->q, p->q_type);
1652 }
1653
1654 static void spu_list_destroy(struct list_head *list)
1655 {
1656         struct spu_queue *p, *n;
1657
1658         list_for_each_entry_safe(p, n, list, list) {
1659                 int i;
1660
1661                 for (i = 0; i < NR_CPUS; i++) {
1662                         if (cpu_to_cwq[i] == p)
1663                                 cpu_to_cwq[i] = NULL;
1664                 }
1665
1666                 if (p->irq) {
1667                         free_irq(p->irq, p);
1668                         p->irq = 0;
1669                 }
1670                 spu_queue_destroy(p);
1671                 list_del(&p->list);
1672                 kfree(p);
1673         }
1674 }
1675
1676 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1677  * gathering cpu membership information.
1678  */
1679 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1680                                struct platform_device *dev,
1681                                u64 node, struct spu_queue *p,
1682                                struct spu_queue **table)
1683 {
1684         u64 arc;
1685
1686         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1687                 u64 tgt = mdesc_arc_target(mdesc, arc);
1688                 const char *name = mdesc_node_name(mdesc, tgt);
1689                 const u64 *id;
1690
1691                 if (strcmp(name, "cpu"))
1692                         continue;
1693                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1694                 if (table[*id] != NULL) {
1695                         dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1696                                 dev->dev.of_node);
1697                         return -EINVAL;
1698                 }
1699                 cpumask_set_cpu(*id, &p->sharing);
1700                 table[*id] = p;
1701         }
1702         return 0;
1703 }
1704
1705 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1706 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1707                             struct platform_device *dev, struct mdesc_handle *mdesc,
1708                             u64 node, const char *iname, unsigned long q_type,
1709                             irq_handler_t handler, struct spu_queue **table)
1710 {
1711         struct spu_queue *p;
1712         int err;
1713
1714         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1715         if (!p) {
1716                 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1717                         dev->dev.of_node);
1718                 return -ENOMEM;
1719         }
1720
1721         cpumask_clear(&p->sharing);
1722         spin_lock_init(&p->lock);
1723         p->q_type = q_type;
1724         INIT_LIST_HEAD(&p->jobs);
1725         list_add(&p->list, list);
1726
1727         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1728         if (err)
1729                 return err;
1730
1731         err = spu_queue_setup(p);
1732         if (err)
1733                 return err;
1734
1735         return spu_map_ino(dev, ip, iname, p, handler);
1736 }
1737
1738 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1739                           struct spu_mdesc_info *ip, struct list_head *list,
1740                           const char *exec_name, unsigned long q_type,
1741                           irq_handler_t handler, struct spu_queue **table)
1742 {
1743         int err = 0;
1744         u64 node;
1745
1746         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1747                 const char *type;
1748
1749                 type = mdesc_get_property(mdesc, node, "type", NULL);
1750                 if (!type || strcmp(type, exec_name))
1751                         continue;
1752
1753                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1754                                        exec_name, q_type, handler, table);
1755                 if (err) {
1756                         spu_list_destroy(list);
1757                         break;
1758                 }
1759         }
1760
1761         return err;
1762 }
1763
1764 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1765                          struct spu_mdesc_info *ip)
1766 {
1767         const u64 *ino;
1768         int ino_len;
1769         int i;
1770
1771         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1772         if (!ino) {
1773                 printk("NO 'ino'\n");
1774                 return -ENODEV;
1775         }
1776
1777         ip->num_intrs = ino_len / sizeof(u64);
1778         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1779                                  ip->num_intrs),
1780                                 GFP_KERNEL);
1781         if (!ip->ino_table)
1782                 return -ENOMEM;
1783
1784         for (i = 0; i < ip->num_intrs; i++) {
1785                 struct ino_blob *b = &ip->ino_table[i];
1786                 b->intr = i + 1;
1787                 b->ino = ino[i];
1788         }
1789
1790         return 0;
1791 }
1792
1793 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1794                                 struct platform_device *dev,
1795                                 struct spu_mdesc_info *ip,
1796                                 const char *node_name)
1797 {
1798         const unsigned int *reg;
1799         u64 node;
1800
1801         reg = of_get_property(dev->dev.of_node, "reg", NULL);
1802         if (!reg)
1803                 return -ENODEV;
1804
1805         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1806                 const char *name;
1807                 const u64 *chdl;
1808
1809                 name = mdesc_get_property(mdesc, node, "name", NULL);
1810                 if (!name || strcmp(name, node_name))
1811                         continue;
1812                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1813                 if (!chdl || (*chdl != *reg))
1814                         continue;
1815                 ip->cfg_handle = *chdl;
1816                 return get_irq_props(mdesc, node, ip);
1817         }
1818
1819         return -ENODEV;
1820 }
1821
1822 static unsigned long n2_spu_hvapi_major;
1823 static unsigned long n2_spu_hvapi_minor;
1824
1825 static int n2_spu_hvapi_register(void)
1826 {
1827         int err;
1828
1829         n2_spu_hvapi_major = 2;
1830         n2_spu_hvapi_minor = 0;
1831
1832         err = sun4v_hvapi_register(HV_GRP_NCS,
1833                                    n2_spu_hvapi_major,
1834                                    &n2_spu_hvapi_minor);
1835
1836         if (!err)
1837                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1838                         n2_spu_hvapi_major,
1839                         n2_spu_hvapi_minor);
1840
1841         return err;
1842 }
1843
1844 static void n2_spu_hvapi_unregister(void)
1845 {
1846         sun4v_hvapi_unregister(HV_GRP_NCS);
1847 }
1848
1849 static int global_ref;
1850
1851 static int grab_global_resources(void)
1852 {
1853         int err = 0;
1854
1855         mutex_lock(&spu_lock);
1856
1857         if (global_ref++)
1858                 goto out;
1859
1860         err = n2_spu_hvapi_register();
1861         if (err)
1862                 goto out;
1863
1864         err = queue_cache_init();
1865         if (err)
1866                 goto out_hvapi_release;
1867
1868         err = -ENOMEM;
1869         cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1870                              GFP_KERNEL);
1871         if (!cpu_to_cwq)
1872                 goto out_queue_cache_destroy;
1873
1874         cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1875                              GFP_KERNEL);
1876         if (!cpu_to_mau)
1877                 goto out_free_cwq_table;
1878
1879         err = 0;
1880
1881 out:
1882         if (err)
1883                 global_ref--;
1884         mutex_unlock(&spu_lock);
1885         return err;
1886
1887 out_free_cwq_table:
1888         kfree(cpu_to_cwq);
1889         cpu_to_cwq = NULL;
1890
1891 out_queue_cache_destroy:
1892         queue_cache_destroy();
1893
1894 out_hvapi_release:
1895         n2_spu_hvapi_unregister();
1896         goto out;
1897 }
1898
1899 static void release_global_resources(void)
1900 {
1901         mutex_lock(&spu_lock);
1902         if (!--global_ref) {
1903                 kfree(cpu_to_cwq);
1904                 cpu_to_cwq = NULL;
1905
1906                 kfree(cpu_to_mau);
1907                 cpu_to_mau = NULL;
1908
1909                 queue_cache_destroy();
1910                 n2_spu_hvapi_unregister();
1911         }
1912         mutex_unlock(&spu_lock);
1913 }
1914
1915 static struct n2_crypto *alloc_n2cp(void)
1916 {
1917         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1918
1919         if (np)
1920                 INIT_LIST_HEAD(&np->cwq_list);
1921
1922         return np;
1923 }
1924
1925 static void free_n2cp(struct n2_crypto *np)
1926 {
1927         kfree(np->cwq_info.ino_table);
1928         np->cwq_info.ino_table = NULL;
1929
1930         kfree(np);
1931 }
1932
1933 static void n2_spu_driver_version(void)
1934 {
1935         static int n2_spu_version_printed;
1936
1937         if (n2_spu_version_printed++ == 0)
1938                 pr_info("%s", version);
1939 }
1940
1941 static int n2_crypto_probe(struct platform_device *dev)
1942 {
1943         struct mdesc_handle *mdesc;
1944         struct n2_crypto *np;
1945         int err;
1946
1947         n2_spu_driver_version();
1948
1949         pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1950
1951         np = alloc_n2cp();
1952         if (!np) {
1953                 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1954                         dev->dev.of_node);
1955                 return -ENOMEM;
1956         }
1957
1958         err = grab_global_resources();
1959         if (err) {
1960                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1961                         dev->dev.of_node);
1962                 goto out_free_n2cp;
1963         }
1964
1965         mdesc = mdesc_grab();
1966
1967         if (!mdesc) {
1968                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1969                         dev->dev.of_node);
1970                 err = -ENODEV;
1971                 goto out_free_global;
1972         }
1973         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1974         if (err) {
1975                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1976                         dev->dev.of_node);
1977                 mdesc_release(mdesc);
1978                 goto out_free_global;
1979         }
1980
1981         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1982                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1983                              cpu_to_cwq);
1984         mdesc_release(mdesc);
1985
1986         if (err) {
1987                 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1988                         dev->dev.of_node);
1989                 goto out_free_global;
1990         }
1991
1992         err = n2_register_algs();
1993         if (err) {
1994                 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1995                         dev->dev.of_node);
1996                 goto out_free_spu_list;
1997         }
1998
1999         dev_set_drvdata(&dev->dev, np);
2000
2001         return 0;
2002
2003 out_free_spu_list:
2004         spu_list_destroy(&np->cwq_list);
2005
2006 out_free_global:
2007         release_global_resources();
2008
2009 out_free_n2cp:
2010         free_n2cp(np);
2011
2012         return err;
2013 }
2014
2015 static int n2_crypto_remove(struct platform_device *dev)
2016 {
2017         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2018
2019         n2_unregister_algs();
2020
2021         spu_list_destroy(&np->cwq_list);
2022
2023         release_global_resources();
2024
2025         free_n2cp(np);
2026
2027         return 0;
2028 }
2029
2030 static struct n2_mau *alloc_ncp(void)
2031 {
2032         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2033
2034         if (mp)
2035                 INIT_LIST_HEAD(&mp->mau_list);
2036
2037         return mp;
2038 }
2039
2040 static void free_ncp(struct n2_mau *mp)
2041 {
2042         kfree(mp->mau_info.ino_table);
2043         mp->mau_info.ino_table = NULL;
2044
2045         kfree(mp);
2046 }
2047
2048 static int n2_mau_probe(struct platform_device *dev)
2049 {
2050         struct mdesc_handle *mdesc;
2051         struct n2_mau *mp;
2052         int err;
2053
2054         n2_spu_driver_version();
2055
2056         pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2057
2058         mp = alloc_ncp();
2059         if (!mp) {
2060                 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2061                         dev->dev.of_node);
2062                 return -ENOMEM;
2063         }
2064
2065         err = grab_global_resources();
2066         if (err) {
2067                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2068                         dev->dev.of_node);
2069                 goto out_free_ncp;
2070         }
2071
2072         mdesc = mdesc_grab();
2073
2074         if (!mdesc) {
2075                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2076                         dev->dev.of_node);
2077                 err = -ENODEV;
2078                 goto out_free_global;
2079         }
2080
2081         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2082         if (err) {
2083                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2084                         dev->dev.of_node);
2085                 mdesc_release(mdesc);
2086                 goto out_free_global;
2087         }
2088
2089         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2090                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2091                              cpu_to_mau);
2092         mdesc_release(mdesc);
2093
2094         if (err) {
2095                 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2096                         dev->dev.of_node);
2097                 goto out_free_global;
2098         }
2099
2100         dev_set_drvdata(&dev->dev, mp);
2101
2102         return 0;
2103
2104 out_free_global:
2105         release_global_resources();
2106
2107 out_free_ncp:
2108         free_ncp(mp);
2109
2110         return err;
2111 }
2112
2113 static int n2_mau_remove(struct platform_device *dev)
2114 {
2115         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2116
2117         spu_list_destroy(&mp->mau_list);
2118
2119         release_global_resources();
2120
2121         free_ncp(mp);
2122
2123         return 0;
2124 }
2125
2126 static const struct of_device_id n2_crypto_match[] = {
2127         {
2128                 .name = "n2cp",
2129                 .compatible = "SUNW,n2-cwq",
2130         },
2131         {
2132                 .name = "n2cp",
2133                 .compatible = "SUNW,vf-cwq",
2134         },
2135         {
2136                 .name = "n2cp",
2137                 .compatible = "SUNW,kt-cwq",
2138         },
2139         {},
2140 };
2141
2142 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2143
2144 static struct platform_driver n2_crypto_driver = {
2145         .driver = {
2146                 .name           =       "n2cp",
2147                 .of_match_table =       n2_crypto_match,
2148         },
2149         .probe          =       n2_crypto_probe,
2150         .remove         =       n2_crypto_remove,
2151 };
2152
2153 static const struct of_device_id n2_mau_match[] = {
2154         {
2155                 .name = "ncp",
2156                 .compatible = "SUNW,n2-mau",
2157         },
2158         {
2159                 .name = "ncp",
2160                 .compatible = "SUNW,vf-mau",
2161         },
2162         {
2163                 .name = "ncp",
2164                 .compatible = "SUNW,kt-mau",
2165         },
2166         {},
2167 };
2168
2169 MODULE_DEVICE_TABLE(of, n2_mau_match);
2170
2171 static struct platform_driver n2_mau_driver = {
2172         .driver = {
2173                 .name           =       "ncp",
2174                 .of_match_table =       n2_mau_match,
2175         },
2176         .probe          =       n2_mau_probe,
2177         .remove         =       n2_mau_remove,
2178 };
2179
2180 static struct platform_driver * const drivers[] = {
2181         &n2_crypto_driver,
2182         &n2_mau_driver,
2183 };
2184
2185 static int __init n2_init(void)
2186 {
2187         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2188 }
2189
2190 static void __exit n2_exit(void)
2191 {
2192         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2193 }
2194
2195 module_init(n2_init);
2196 module_exit(n2_exit);