1 // SPDX-License-Identifier: GPL-2.0-only
3 * Routines supporting the Power 7+ Nest Accelerators driver
5 * Copyright (C) 2011-2012 International Business Machines Inc.
7 * Author: Kent Yoder <yoder1@us.ibm.com>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/hash.h>
12 #include <crypto/aes.h>
13 #include <crypto/sha2.h>
14 #include <crypto/algapi.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
20 #include <linux/scatterlist.h>
21 #include <linux/device.h>
23 #include <asm/hvcall.h>
26 #include "nx_csbcpb.h"
31 * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
33 * @nx_ctx: the crypto context handle
34 * @op: PFO operation struct to pass in
35 * @may_sleep: flag indicating the request can sleep
37 * Make the hcall, retrying while the hardware is busy. If we cannot yield
38 * the thread, limit the number of retries to 10 here.
40 int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
41 struct vio_pfo_op *op,
45 struct vio_dev *viodev = nx_driver.viodev;
47 atomic_inc(&(nx_ctx->stats->sync_ops));
50 rc = vio_h_cop_sync(viodev, op);
51 } while (rc == -EBUSY && !may_sleep && retries--);
54 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
55 "hcall rc: %ld\n", rc, op->hcall_err);
56 atomic_inc(&(nx_ctx->stats->errors));
57 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
58 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
65 * nx_build_sg_list - build an NX scatter list describing a single buffer
67 * @sg_head: pointer to the first scatter list element to build
68 * @start_addr: pointer to the linear buffer
69 * @len: length of the data at @start_addr
70 * @sgmax: the largest number of scatter list elements we're allowed to create
72 * This function will start writing nx_sg elements at @sg_head and keep
73 * writing them until all of the data from @start_addr is described or
74 * until sgmax elements have been written. Scatter list elements will be
75 * created such that none of the elements describes a buffer that crosses a 4K
78 struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
83 unsigned int sg_len = 0;
85 u64 sg_addr = (u64)start_addr;
88 /* determine the start and end for this address range - slightly
89 * different if this is in VMALLOC_REGION */
90 if (is_vmalloc_addr(start_addr))
91 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
92 + offset_in_page(sg_addr);
94 sg_addr = __pa(sg_addr);
96 end_addr = sg_addr + *len;
98 /* each iteration will write one struct nx_sg element and add the
99 * length of data described by that element to sg_len. Once @len bytes
100 * have been described (or @sgmax elements have been written), the
101 * loop ends. min_t is used to ensure @end_addr falls on the same page
102 * as sg_addr, if not, we need to create another nx_sg element for the
103 * data on the next page.
105 * Also when using vmalloc'ed data, every time that a system page
106 * boundary is crossed the physical address needs to be re-calculated.
108 for (sg = sg_head; sg_len < *len; sg++) {
112 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
115 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
116 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
119 if (sg_addr >= next_page &&
120 is_vmalloc_addr(start_addr + sg_len)) {
121 sg_addr = page_to_phys(vmalloc_to_page(
122 start_addr + sg_len));
123 end_addr = sg_addr + *len - sg_len;
126 if ((sg - sg_head) == sgmax) {
127 pr_err("nx: scatter/gather list overflow, pid: %d\n",
135 /* return the moved sg_head pointer */
140 * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
142 * @nx_dst: pointer to the first nx_sg element to write
143 * @sglen: max number of nx_sg entries we're allowed to write
144 * @sg_src: pointer to the source linux scatterlist to walk
145 * @start: number of bytes to fast-forward past at the beginning of @sg_src
146 * @src_len: number of bytes to walk in @sg_src
148 struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
150 struct scatterlist *sg_src,
152 unsigned int *src_len)
154 struct scatter_walk walk;
155 struct nx_sg *nx_sg = nx_dst;
156 unsigned int n, offset = 0, len = *src_len;
159 /* we need to fast forward through @start bytes first */
161 scatterwalk_start(&walk, sg_src);
163 if (start < offset + sg_src->length)
166 offset += sg_src->length;
167 sg_src = sg_next(sg_src);
170 /* start - offset is the number of bytes to advance in the scatterlist
171 * element we're currently looking at */
172 scatterwalk_advance(&walk, start - offset);
174 while (len && (nx_sg - nx_dst) < sglen) {
175 n = scatterwalk_clamp(&walk, len);
177 /* In cases where we have scatterlist chain sg_next
178 * handles with it properly */
179 scatterwalk_start(&walk, sg_next(walk.sg));
180 n = scatterwalk_clamp(&walk, len);
182 dst = scatterwalk_map(&walk);
184 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
187 scatterwalk_unmap(dst);
188 scatterwalk_advance(&walk, n);
189 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
191 /* update to_process */
194 /* return the moved destination pointer */
199 * trim_sg_list - ensures the bound in sg list.
202 * @delta: is the amount we need to crop in order to bound the list.
205 static long int trim_sg_list(struct nx_sg *sg,
208 unsigned int *nbytes)
212 unsigned int is_delta = delta;
214 while (delta && end > sg) {
215 struct nx_sg *last = end - 1;
217 if (last->len > delta) {
226 /* There are cases where we need to crop list in order to make it
227 * a block size multiple, but we also need to align data. In order to
228 * that we need to calculate how much we need to put back to be
231 oplen = (sg - end) * sizeof(struct nx_sg);
233 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
234 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
235 *nbytes -= data_back;
242 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
243 * scatterlists based on them.
245 * @nx_ctx: NX crypto context for the lists we're building
246 * @iv: iv data, if the algorithm requires it
247 * @dst: destination scatterlist
248 * @src: source scatterlist
249 * @nbytes: length of data described in the scatterlists
250 * @offset: number of bytes to fast-forward past at the beginning of
252 * @oiv: destination for the iv data, if the algorithm requires it
254 * This is common code shared by all the AES algorithms. It uses the crypto
255 * scatterlist walk routines to traverse input and output scatterlists, building
256 * corresponding NX scatterlists
258 int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
260 struct scatterlist *dst,
261 struct scatterlist *src,
262 unsigned int *nbytes,
266 unsigned int delta = 0;
267 unsigned int total = *nbytes;
268 struct nx_sg *nx_insg = nx_ctx->in_sg;
269 struct nx_sg *nx_outsg = nx_ctx->out_sg;
270 unsigned int max_sg_len;
272 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
273 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
274 max_sg_len = min_t(u64, max_sg_len,
275 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
278 memcpy(oiv, iv, AES_BLOCK_SIZE);
280 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
282 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
284 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
288 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
290 /* these lengths should be negative, which will indicate to phyp that
291 * the input and output parameters are scatterlists, not linear
293 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
294 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
300 * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
302 * @nx_ctx: the nx context to initialize
303 * @function: the function code for the op
305 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
307 spin_lock_init(&nx_ctx->lock);
308 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
309 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
311 nx_ctx->op.flags = function;
312 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
313 nx_ctx->op.in = __pa(nx_ctx->in_sg);
314 nx_ctx->op.out = __pa(nx_ctx->out_sg);
316 if (nx_ctx->csbcpb_aead) {
317 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
319 nx_ctx->op_aead.flags = function;
320 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
321 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
322 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
326 static void nx_of_update_status(struct device *dev,
330 if (!strncmp(p->value, "okay", p->length)) {
331 props->status = NX_WAITING;
332 props->flags |= NX_OF_FLAG_STATUS_SET;
334 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
339 static void nx_of_update_sglen(struct device *dev,
343 if (p->length != sizeof(props->max_sg_len)) {
344 dev_err(dev, "%s: unexpected format for "
345 "ibm,max-sg-len property\n", __func__);
346 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
347 "long, expected %zd bytes\n", __func__,
348 p->length, sizeof(props->max_sg_len));
352 props->max_sg_len = *(u32 *)p->value;
353 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
356 static void nx_of_update_msc(struct device *dev,
360 struct msc_triplet *trip;
361 struct max_sync_cop *msc;
362 unsigned int bytes_so_far, i, lenp;
364 msc = (struct max_sync_cop *)p->value;
367 /* You can't tell if the data read in for this property is sane by its
368 * size alone. This is because there are sizes embedded in the data
369 * structure. The best we can do is check lengths as we parse and bail
370 * as soon as a length error is detected. */
373 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
374 bytes_so_far += sizeof(struct max_sync_cop);
379 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
382 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
383 dev_err(dev, "unknown function code/mode "
384 "combo: %d/%d (ignored)\n", msc->fc,
389 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
390 dev_warn(dev, "bogus sglen/databytelen: "
391 "%u/%u (ignored)\n", trip->sglen,
396 switch (trip->keybitlen) {
399 props->ap[msc->fc][msc->mode][0].databytelen =
401 props->ap[msc->fc][msc->mode][0].sglen =
405 props->ap[msc->fc][msc->mode][1].databytelen =
407 props->ap[msc->fc][msc->mode][1].sglen =
411 if (msc->fc == NX_FC_AES) {
412 props->ap[msc->fc][msc->mode][2].
413 databytelen = trip->databytelen;
414 props->ap[msc->fc][msc->mode][2].sglen =
416 } else if (msc->fc == NX_FC_AES_HMAC ||
417 msc->fc == NX_FC_SHA) {
418 props->ap[msc->fc][msc->mode][1].
419 databytelen = trip->databytelen;
420 props->ap[msc->fc][msc->mode][1].sglen =
423 dev_warn(dev, "unknown function "
424 "code/key bit len combo"
425 ": (%u/256)\n", msc->fc);
429 props->ap[msc->fc][msc->mode][2].databytelen =
431 props->ap[msc->fc][msc->mode][2].sglen =
435 dev_warn(dev, "unknown function code/key bit "
436 "len combo: (%u/%u)\n", msc->fc,
441 bytes_so_far += sizeof(struct msc_triplet);
445 msc = (struct max_sync_cop *)trip;
448 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
452 * nx_of_init - read openFirmware values from the device tree
454 * @dev: device handle
455 * @props: pointer to struct to hold the properties values
457 * Called once at driver probe time, this function will read out the
458 * openFirmware properties we use at runtime. If all the OF properties are
459 * acceptable, when we exit this function props->flags will indicate that
460 * we're ready to register our crypto algorithms.
462 static void nx_of_init(struct device *dev, struct nx_of *props)
464 struct device_node *base_node = dev->of_node;
467 p = of_find_property(base_node, "status", NULL);
469 dev_info(dev, "%s: property 'status' not found\n", __func__);
471 nx_of_update_status(dev, p, props);
473 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
475 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
478 nx_of_update_sglen(dev, p, props);
480 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
482 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
485 nx_of_update_msc(dev, p, props);
488 static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
490 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
492 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
494 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
495 "%u/%u (ignored)\n", fc, mode, slot,
496 props->sglen, props->databytelen);
503 static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
507 for (i = 0; i < 3; i++)
508 if (!nx_check_prop(dev, fc, mode, i))
514 static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
516 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
517 crypto_register_skcipher(alg) : 0;
520 static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
522 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
523 crypto_register_aead(alg) : 0;
526 static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
528 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
530 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
531 crypto_register_shash(alg) : 0;
534 static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
536 if (nx_check_props(NULL, fc, mode))
537 crypto_unregister_skcipher(alg);
540 static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
542 if (nx_check_props(NULL, fc, mode))
543 crypto_unregister_aead(alg);
546 static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
549 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
550 nx_check_props(NULL, fc, mode))
551 crypto_unregister_shash(alg);
555 * nx_register_algs - register algorithms with the crypto API
557 * Called from nx_probe()
559 * If all OF properties are in an acceptable state, the driver flags will
560 * indicate that we're ready and we'll create our debugfs files and register
561 * out crypto algorithms.
563 static int nx_register_algs(void)
567 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
570 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
572 NX_DEBUGFS_INIT(&nx_driver);
574 nx_driver.of.status = NX_OKAY;
576 rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
580 rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
584 rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
589 rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
591 goto out_unreg_ctr3686;
593 rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
597 rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
599 goto out_unreg_gcm4106;
601 rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
605 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
608 goto out_unreg_ccm4309;
610 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
615 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
616 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
623 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
626 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
629 nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
631 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
633 nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
635 nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
637 nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
639 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
641 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
647 * nx_crypto_ctx_init - create and initialize a crypto api context
649 * @nx_ctx: the crypto api context
650 * @fc: function code for the context
651 * @mode: the function code specific mode for this context
653 static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
655 if (nx_driver.of.status != NX_OKAY) {
656 pr_err("Attempt to initialize NX crypto context while device "
657 "is not available!\n");
661 /* we need an extra page for csbcpb_aead for these modes */
662 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
663 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
664 sizeof(struct nx_csbcpb);
666 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
667 sizeof(struct nx_csbcpb);
669 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
673 /* the csbcpb and scatterlists must be 4K aligned pages */
674 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
676 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
677 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
679 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
680 nx_ctx->csbcpb_aead =
681 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
684 /* give each context a pointer to global stats and their OF
686 nx_ctx->stats = &nx_driver.stats;
687 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
688 sizeof(struct alg_props) * 3);
693 /* entry points from the crypto tfm initializers */
694 int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
696 crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
697 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
701 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
703 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
704 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
708 int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
710 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
714 int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
716 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
720 int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
722 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
726 int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
728 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
731 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
733 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
734 NX_MODE_AES_XCBC_MAC);
738 * nx_crypto_ctx_exit - destroy a crypto api context
740 * @tfm: the crypto transform pointer for the context
742 * As crypto API contexts are destroyed, this exit hook is called to free the
743 * memory associated with it.
745 void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
747 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
749 kfree_sensitive(nx_ctx->kmem);
750 nx_ctx->csbcpb = NULL;
751 nx_ctx->csbcpb_aead = NULL;
752 nx_ctx->in_sg = NULL;
753 nx_ctx->out_sg = NULL;
756 void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
758 nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
761 void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
763 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
765 kfree_sensitive(nx_ctx->kmem);
768 static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
770 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
771 viodev->name, viodev->resource_id);
773 if (nx_driver.viodev) {
774 dev_err(&viodev->dev, "%s: Attempt to register more than one "
775 "instance of the hardware\n", __func__);
779 nx_driver.viodev = viodev;
781 nx_of_init(&viodev->dev, &nx_driver.of);
783 return nx_register_algs();
786 static void nx_remove(struct vio_dev *viodev)
788 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
789 viodev->unit_address);
791 if (nx_driver.of.status == NX_OKAY) {
792 NX_DEBUGFS_FINI(&nx_driver);
794 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
795 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
796 nx_unregister_shash(&nx_shash_sha512_alg,
797 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
798 nx_unregister_shash(&nx_shash_sha256_alg,
799 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
800 nx_unregister_aead(&nx_ccm4309_aes_alg,
801 NX_FC_AES, NX_MODE_AES_CCM);
802 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
803 nx_unregister_aead(&nx_gcm4106_aes_alg,
804 NX_FC_AES, NX_MODE_AES_GCM);
805 nx_unregister_aead(&nx_gcm_aes_alg,
806 NX_FC_AES, NX_MODE_AES_GCM);
807 nx_unregister_skcipher(&nx_ctr3686_aes_alg,
808 NX_FC_AES, NX_MODE_AES_CTR);
809 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
811 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
817 /* module wide initialization/cleanup */
818 static int __init nx_init(void)
820 return vio_register_driver(&nx_driver.viodriver);
823 static void __exit nx_fini(void)
825 vio_unregister_driver(&nx_driver.viodriver);
828 static const struct vio_device_id nx_crypto_driver_ids[] = {
829 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
832 MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
834 /* driver state structure */
835 struct nx_crypto_driver nx_driver = {
837 .id_table = nx_crypto_driver_ids,
844 module_init(nx_init);
845 module_exit(nx_fini);
847 MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
848 MODULE_DESCRIPTION(NX_STRING);
849 MODULE_LICENSE("GPL");
850 MODULE_VERSION(NX_VERSION);