Merge tag 'for-linus-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw...
[platform/kernel/linux-rpi.git] / crypto / cfb.c
1 //SPDX-License-Identifier: GPL-2.0
2 /*
3  * CFB: Cipher FeedBack mode
4  *
5  * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
6  *
7  * CFB is a stream cipher mode which is layered on to a block
8  * encryption scheme.  It works very much like a one time pad where
9  * the pad is generated initially from the encrypted IV and then
10  * subsequently from the encrypted previous block of ciphertext.  The
11  * pad is XOR'd into the plain text to get the final ciphertext.
12  *
13  * The scheme of CFB is best described by wikipedia:
14  *
15  * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
16  *
17  * Note that since the pad for both encryption and decryption is
18  * generated by an encryption operation, CFB never uses the block
19  * decryption function.
20  */
21
22 #include <crypto/algapi.h>
23 #include <crypto/internal/cipher.h>
24 #include <crypto/internal/skcipher.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/string.h>
30
31 static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
32 {
33         return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
34 }
35
36 static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
37                                           const u8 *src, u8 *dst)
38 {
39         crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
40 }
41
42 /* final encrypt and decrypt is the same */
43 static void crypto_cfb_final(struct skcipher_walk *walk,
44                              struct crypto_skcipher *tfm)
45 {
46         const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
47         u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
48         u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
49         u8 *src = walk->src.virt.addr;
50         u8 *dst = walk->dst.virt.addr;
51         u8 *iv = walk->iv;
52         unsigned int nbytes = walk->nbytes;
53
54         crypto_cfb_encrypt_one(tfm, iv, stream);
55         crypto_xor_cpy(dst, stream, src, nbytes);
56 }
57
58 static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
59                                       struct crypto_skcipher *tfm)
60 {
61         const unsigned int bsize = crypto_cfb_bsize(tfm);
62         unsigned int nbytes = walk->nbytes;
63         u8 *src = walk->src.virt.addr;
64         u8 *dst = walk->dst.virt.addr;
65         u8 *iv = walk->iv;
66
67         do {
68                 crypto_cfb_encrypt_one(tfm, iv, dst);
69                 crypto_xor(dst, src, bsize);
70                 iv = dst;
71
72                 src += bsize;
73                 dst += bsize;
74         } while ((nbytes -= bsize) >= bsize);
75
76         memcpy(walk->iv, iv, bsize);
77
78         return nbytes;
79 }
80
81 static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
82                                       struct crypto_skcipher *tfm)
83 {
84         const unsigned int bsize = crypto_cfb_bsize(tfm);
85         unsigned int nbytes = walk->nbytes;
86         u8 *src = walk->src.virt.addr;
87         u8 *iv = walk->iv;
88         u8 tmp[MAX_CIPHER_BLOCKSIZE];
89
90         do {
91                 crypto_cfb_encrypt_one(tfm, iv, tmp);
92                 crypto_xor(src, tmp, bsize);
93                 iv = src;
94
95                 src += bsize;
96         } while ((nbytes -= bsize) >= bsize);
97
98         memcpy(walk->iv, iv, bsize);
99
100         return nbytes;
101 }
102
103 static int crypto_cfb_encrypt(struct skcipher_request *req)
104 {
105         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
106         struct skcipher_walk walk;
107         unsigned int bsize = crypto_cfb_bsize(tfm);
108         int err;
109
110         err = skcipher_walk_virt(&walk, req, false);
111
112         while (walk.nbytes >= bsize) {
113                 if (walk.src.virt.addr == walk.dst.virt.addr)
114                         err = crypto_cfb_encrypt_inplace(&walk, tfm);
115                 else
116                         err = crypto_cfb_encrypt_segment(&walk, tfm);
117                 err = skcipher_walk_done(&walk, err);
118         }
119
120         if (walk.nbytes) {
121                 crypto_cfb_final(&walk, tfm);
122                 err = skcipher_walk_done(&walk, 0);
123         }
124
125         return err;
126 }
127
128 static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
129                                       struct crypto_skcipher *tfm)
130 {
131         const unsigned int bsize = crypto_cfb_bsize(tfm);
132         unsigned int nbytes = walk->nbytes;
133         u8 *src = walk->src.virt.addr;
134         u8 *dst = walk->dst.virt.addr;
135         u8 *iv = walk->iv;
136
137         do {
138                 crypto_cfb_encrypt_one(tfm, iv, dst);
139                 crypto_xor(dst, src, bsize);
140                 iv = src;
141
142                 src += bsize;
143                 dst += bsize;
144         } while ((nbytes -= bsize) >= bsize);
145
146         memcpy(walk->iv, iv, bsize);
147
148         return nbytes;
149 }
150
151 static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
152                                       struct crypto_skcipher *tfm)
153 {
154         const unsigned int bsize = crypto_cfb_bsize(tfm);
155         unsigned int nbytes = walk->nbytes;
156         u8 *src = walk->src.virt.addr;
157         u8 * const iv = walk->iv;
158         u8 tmp[MAX_CIPHER_BLOCKSIZE];
159
160         do {
161                 crypto_cfb_encrypt_one(tfm, iv, tmp);
162                 memcpy(iv, src, bsize);
163                 crypto_xor(src, tmp, bsize);
164                 src += bsize;
165         } while ((nbytes -= bsize) >= bsize);
166
167         return nbytes;
168 }
169
170 static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
171                                      struct crypto_skcipher *tfm)
172 {
173         if (walk->src.virt.addr == walk->dst.virt.addr)
174                 return crypto_cfb_decrypt_inplace(walk, tfm);
175         else
176                 return crypto_cfb_decrypt_segment(walk, tfm);
177 }
178
179 static int crypto_cfb_decrypt(struct skcipher_request *req)
180 {
181         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
182         struct skcipher_walk walk;
183         const unsigned int bsize = crypto_cfb_bsize(tfm);
184         int err;
185
186         err = skcipher_walk_virt(&walk, req, false);
187
188         while (walk.nbytes >= bsize) {
189                 err = crypto_cfb_decrypt_blocks(&walk, tfm);
190                 err = skcipher_walk_done(&walk, err);
191         }
192
193         if (walk.nbytes) {
194                 crypto_cfb_final(&walk, tfm);
195                 err = skcipher_walk_done(&walk, 0);
196         }
197
198         return err;
199 }
200
201 static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
202 {
203         struct skcipher_instance *inst;
204         struct crypto_alg *alg;
205         int err;
206
207         inst = skcipher_alloc_instance_simple(tmpl, tb);
208         if (IS_ERR(inst))
209                 return PTR_ERR(inst);
210
211         alg = skcipher_ialg_simple(inst);
212
213         /* CFB mode is a stream cipher. */
214         inst->alg.base.cra_blocksize = 1;
215
216         /*
217          * To simplify the implementation, configure the skcipher walk to only
218          * give a partial block at the very end, never earlier.
219          */
220         inst->alg.chunksize = alg->cra_blocksize;
221
222         inst->alg.encrypt = crypto_cfb_encrypt;
223         inst->alg.decrypt = crypto_cfb_decrypt;
224
225         err = skcipher_register_instance(tmpl, inst);
226         if (err)
227                 inst->free(inst);
228
229         return err;
230 }
231
232 static struct crypto_template crypto_cfb_tmpl = {
233         .name = "cfb",
234         .create = crypto_cfb_create,
235         .module = THIS_MODULE,
236 };
237
238 static int __init crypto_cfb_module_init(void)
239 {
240         return crypto_register_template(&crypto_cfb_tmpl);
241 }
242
243 static void __exit crypto_cfb_module_exit(void)
244 {
245         crypto_unregister_template(&crypto_cfb_tmpl);
246 }
247
248 subsys_initcall(crypto_cfb_module_init);
249 module_exit(crypto_cfb_module_exit);
250
251 MODULE_LICENSE("GPL");
252 MODULE_DESCRIPTION("CFB block cipher mode of operation");
253 MODULE_ALIAS_CRYPTO("cfb");
254 MODULE_IMPORT_NS(CRYPTO_INTERNAL);