3 Galois counter mode, specified by NIST,
4 http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf
6 See also the gcm paper at
7 http://www.cryptobarn.com/papers/gcm-spec.pdf.
9 Copyright (C) 2011, 2013 Niels Möller
10 Copyright (C) 2011 Katholieke Universiteit Leuven
12 Contributed by Nikos Mavrogiannopoulos
14 This file is part of GNU Nettle.
16 GNU Nettle is free software: you can redistribute it and/or
17 modify it under the terms of either:
19 * the GNU Lesser General Public License as published by the Free
20 Software Foundation; either version 3 of the License, or (at your
21 option) any later version.
25 * the GNU General Public License as published by the Free
26 Software Foundation; either version 2 of the License, or (at your
27 option) any later version.
29 or both in parallel, as here.
31 GNU Nettle is distributed in the hope that it will be useful,
32 but WITHOUT ANY WARRANTY; without even the implied warranty of
33 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 General Public License for more details.
36 You should have received copies of the GNU General Public License and
37 the GNU Lesser General Public License along with this program. If
38 not, see http://www.gnu.org/licenses/.
52 #include "nettle-internal.h"
55 #define GHASH_POLYNOMIAL 0xE1UL
58 gcm_gf_add (union nettle_block16 *r,
59 const union nettle_block16 *x, const union nettle_block16 *y)
61 r->w[0] = x->w[0] ^ y->w[0];
62 r->w[1] = x->w[1] ^ y->w[1];
64 r->w[2] = x->w[2] ^ y->w[2];
65 r->w[3] = x->w[3] ^ y->w[3];
68 /* Multiplication by 010...0; a big-endian shift right. If the bit
69 shifted out is one, the defining polynomial is added to cancel it
70 out. r == x is allowed. */
72 gcm_gf_shift (union nettle_block16 *r, const union nettle_block16 *x)
76 /* Shift uses big-endian representation. */
79 mask = - (x->w[3] & 1);
80 r->w[3] = (x->w[3] >> 1) | ((x->w[2] & 1) << 31);
81 r->w[2] = (x->w[2] >> 1) | ((x->w[1] & 1) << 31);
82 r->w[1] = (x->w[1] >> 1) | ((x->w[0] & 1) << 31);
83 r->w[0] = (x->w[0] >> 1) ^ (mask & (GHASH_POLYNOMIAL << 24));
84 # elif SIZEOF_LONG == 8
85 mask = - (x->w[1] & 1);
86 r->w[1] = (x->w[1] >> 1) | ((x->w[0] & 1) << 63);
87 r->w[0] = (x->w[0] >> 1) ^ (mask & (GHASH_POLYNOMIAL << 56));
89 # error Unsupported word size. */
91 #else /* ! WORDS_BIGENDIAN */
93 #define RSHIFT_WORD(x) \
94 ((((x) & 0xfefefefeUL) >> 1) \
95 | (((x) & 0x00010101) << 15))
96 mask = - ((x->w[3] >> 24) & 1);
97 r->w[3] = RSHIFT_WORD(x->w[3]) | ((x->w[2] >> 17) & 0x80);
98 r->w[2] = RSHIFT_WORD(x->w[2]) | ((x->w[1] >> 17) & 0x80);
99 r->w[1] = RSHIFT_WORD(x->w[1]) | ((x->w[0] >> 17) & 0x80);
100 r->w[0] = RSHIFT_WORD(x->w[0]) ^ (mask & GHASH_POLYNOMIAL);
101 # elif SIZEOF_LONG == 8
102 #define RSHIFT_WORD(x) \
103 ((((x) & 0xfefefefefefefefeUL) >> 1) \
104 | (((x) & 0x0001010101010101UL) << 15))
105 mask = - ((x->w[1] >> 56) & 1);
106 r->w[1] = RSHIFT_WORD(x->w[1]) | ((x->w[0] >> 49) & 0x80);
107 r->w[0] = RSHIFT_WORD(x->w[0]) ^ (mask & GHASH_POLYNOMIAL);
109 # error Unsupported word size. */
112 #endif /* ! WORDS_BIGENDIAN */
115 #if GCM_TABLE_BITS == 0
116 /* Sets x <- x * y mod r, using the plain bitwise algorithm from the
117 specification. y may be shorter than a full block, missing bytes
120 gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *y)
122 union nettle_block16 V;
123 union nettle_block16 Z;
126 memcpy(V.b, x, sizeof(V));
127 memset(Z.b, 0, sizeof(Z));
129 for (i = 0; i < GCM_BLOCK_SIZE; i++)
133 for (j = 0; j < 8; j++, b <<= 1)
136 gcm_gf_add(&Z, &Z, &V);
138 gcm_gf_shift(&V, &V);
141 memcpy (x->b, Z.b, sizeof(Z));
143 #else /* GCM_TABLE_BITS != 0 */
146 # define W(left,right) (0x##left##right)
148 # define W(left,right) (0x##right##left)
151 # if GCM_TABLE_BITS == 4
152 static const uint16_t
153 shift_table[0x10] = {
154 W(00,00),W(1c,20),W(38,40),W(24,60),W(70,80),W(6c,a0),W(48,c0),W(54,e0),
155 W(e1,00),W(fd,20),W(d9,40),W(c5,60),W(91,80),W(8d,a0),W(a9,c0),W(b5,e0),
159 gcm_gf_shift_4(union nettle_block16 *x)
161 unsigned long *w = x->w;
162 unsigned long reduce;
164 /* Shift uses big-endian representation. */
166 # if SIZEOF_LONG == 4
167 reduce = shift_table[w[3] & 0xf];
168 w[3] = (w[3] >> 4) | ((w[2] & 0xf) << 28);
169 w[2] = (w[2] >> 4) | ((w[1] & 0xf) << 28);
170 w[1] = (w[1] >> 4) | ((w[0] & 0xf) << 28);
171 w[0] = (w[0] >> 4) ^ (reduce << 16);
172 # elif SIZEOF_LONG == 8
173 reduce = shift_table[w[1] & 0xf];
174 w[1] = (w[1] >> 4) | ((w[0] & 0xf) << 60);
175 w[0] = (w[0] >> 4) ^ (reduce << 48);
177 # error Unsupported word size. */
179 #else /* ! WORDS_BIGENDIAN */
180 # if SIZEOF_LONG == 4
181 #define RSHIFT_WORD(x) \
182 ((((x) & 0xf0f0f0f0UL) >> 4) \
183 | (((x) & 0x000f0f0f) << 12))
184 reduce = shift_table[(w[3] >> 24) & 0xf];
185 w[3] = RSHIFT_WORD(w[3]) | ((w[2] >> 20) & 0xf0);
186 w[2] = RSHIFT_WORD(w[2]) | ((w[1] >> 20) & 0xf0);
187 w[1] = RSHIFT_WORD(w[1]) | ((w[0] >> 20) & 0xf0);
188 w[0] = RSHIFT_WORD(w[0]) ^ reduce;
189 # elif SIZEOF_LONG == 8
190 #define RSHIFT_WORD(x) \
191 ((((x) & 0xf0f0f0f0f0f0f0f0UL) >> 4) \
192 | (((x) & 0x000f0f0f0f0f0f0fUL) << 12))
193 reduce = shift_table[(w[1] >> 56) & 0xf];
194 w[1] = RSHIFT_WORD(w[1]) | ((w[0] >> 52) & 0xf0);
195 w[0] = RSHIFT_WORD(w[0]) ^ reduce;
197 # error Unsupported word size. */
200 #endif /* ! WORDS_BIGENDIAN */
204 gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table)
206 union nettle_block16 Z;
209 memset(Z.b, 0, sizeof(Z));
211 for (i = GCM_BLOCK_SIZE; i-- > 0;)
216 gcm_gf_add(&Z, &Z, &table[b & 0xf]);
218 gcm_gf_add(&Z, &Z, &table[b >> 4]);
220 memcpy (x->b, Z.b, sizeof(Z));
222 # elif GCM_TABLE_BITS == 8
223 # if HAVE_NATIVE_gcm_hash8
225 #define gcm_hash _nettle_gcm_hash8
227 _nettle_gcm_hash8 (const struct gcm_key *key, union nettle_block16 *x,
228 size_t length, const uint8_t *data);
229 # else /* !HAVE_NATIVE_gcm_hash8 */
230 static const uint16_t
231 shift_table[0x100] = {
232 W(00,00),W(01,c2),W(03,84),W(02,46),W(07,08),W(06,ca),W(04,8c),W(05,4e),
233 W(0e,10),W(0f,d2),W(0d,94),W(0c,56),W(09,18),W(08,da),W(0a,9c),W(0b,5e),
234 W(1c,20),W(1d,e2),W(1f,a4),W(1e,66),W(1b,28),W(1a,ea),W(18,ac),W(19,6e),
235 W(12,30),W(13,f2),W(11,b4),W(10,76),W(15,38),W(14,fa),W(16,bc),W(17,7e),
236 W(38,40),W(39,82),W(3b,c4),W(3a,06),W(3f,48),W(3e,8a),W(3c,cc),W(3d,0e),
237 W(36,50),W(37,92),W(35,d4),W(34,16),W(31,58),W(30,9a),W(32,dc),W(33,1e),
238 W(24,60),W(25,a2),W(27,e4),W(26,26),W(23,68),W(22,aa),W(20,ec),W(21,2e),
239 W(2a,70),W(2b,b2),W(29,f4),W(28,36),W(2d,78),W(2c,ba),W(2e,fc),W(2f,3e),
240 W(70,80),W(71,42),W(73,04),W(72,c6),W(77,88),W(76,4a),W(74,0c),W(75,ce),
241 W(7e,90),W(7f,52),W(7d,14),W(7c,d6),W(79,98),W(78,5a),W(7a,1c),W(7b,de),
242 W(6c,a0),W(6d,62),W(6f,24),W(6e,e6),W(6b,a8),W(6a,6a),W(68,2c),W(69,ee),
243 W(62,b0),W(63,72),W(61,34),W(60,f6),W(65,b8),W(64,7a),W(66,3c),W(67,fe),
244 W(48,c0),W(49,02),W(4b,44),W(4a,86),W(4f,c8),W(4e,0a),W(4c,4c),W(4d,8e),
245 W(46,d0),W(47,12),W(45,54),W(44,96),W(41,d8),W(40,1a),W(42,5c),W(43,9e),
246 W(54,e0),W(55,22),W(57,64),W(56,a6),W(53,e8),W(52,2a),W(50,6c),W(51,ae),
247 W(5a,f0),W(5b,32),W(59,74),W(58,b6),W(5d,f8),W(5c,3a),W(5e,7c),W(5f,be),
248 W(e1,00),W(e0,c2),W(e2,84),W(e3,46),W(e6,08),W(e7,ca),W(e5,8c),W(e4,4e),
249 W(ef,10),W(ee,d2),W(ec,94),W(ed,56),W(e8,18),W(e9,da),W(eb,9c),W(ea,5e),
250 W(fd,20),W(fc,e2),W(fe,a4),W(ff,66),W(fa,28),W(fb,ea),W(f9,ac),W(f8,6e),
251 W(f3,30),W(f2,f2),W(f0,b4),W(f1,76),W(f4,38),W(f5,fa),W(f7,bc),W(f6,7e),
252 W(d9,40),W(d8,82),W(da,c4),W(db,06),W(de,48),W(df,8a),W(dd,cc),W(dc,0e),
253 W(d7,50),W(d6,92),W(d4,d4),W(d5,16),W(d0,58),W(d1,9a),W(d3,dc),W(d2,1e),
254 W(c5,60),W(c4,a2),W(c6,e4),W(c7,26),W(c2,68),W(c3,aa),W(c1,ec),W(c0,2e),
255 W(cb,70),W(ca,b2),W(c8,f4),W(c9,36),W(cc,78),W(cd,ba),W(cf,fc),W(ce,3e),
256 W(91,80),W(90,42),W(92,04),W(93,c6),W(96,88),W(97,4a),W(95,0c),W(94,ce),
257 W(9f,90),W(9e,52),W(9c,14),W(9d,d6),W(98,98),W(99,5a),W(9b,1c),W(9a,de),
258 W(8d,a0),W(8c,62),W(8e,24),W(8f,e6),W(8a,a8),W(8b,6a),W(89,2c),W(88,ee),
259 W(83,b0),W(82,72),W(80,34),W(81,f6),W(84,b8),W(85,7a),W(87,3c),W(86,fe),
260 W(a9,c0),W(a8,02),W(aa,44),W(ab,86),W(ae,c8),W(af,0a),W(ad,4c),W(ac,8e),
261 W(a7,d0),W(a6,12),W(a4,54),W(a5,96),W(a0,d8),W(a1,1a),W(a3,5c),W(a2,9e),
262 W(b5,e0),W(b4,22),W(b6,64),W(b7,a6),W(b2,e8),W(b3,2a),W(b1,6c),W(b0,ae),
263 W(bb,f0),W(ba,32),W(b8,74),W(b9,b6),W(bc,f8),W(bd,3a),W(bf,7c),W(be,be),
267 gcm_gf_shift_8(union nettle_block16 *x)
269 unsigned long *w = x->w;
270 unsigned long reduce;
272 /* Shift uses big-endian representation. */
274 # if SIZEOF_LONG == 4
275 reduce = shift_table[w[3] & 0xff];
276 w[3] = (w[3] >> 8) | ((w[2] & 0xff) << 24);
277 w[2] = (w[2] >> 8) | ((w[1] & 0xff) << 24);
278 w[1] = (w[1] >> 8) | ((w[0] & 0xff) << 24);
279 w[0] = (w[0] >> 8) ^ (reduce << 16);
280 # elif SIZEOF_LONG == 8
281 reduce = shift_table[w[1] & 0xff];
282 w[1] = (w[1] >> 8) | ((w[0] & 0xff) << 56);
283 w[0] = (w[0] >> 8) ^ (reduce << 48);
285 # error Unsupported word size. */
287 #else /* ! WORDS_BIGENDIAN */
288 # if SIZEOF_LONG == 4
289 reduce = shift_table[(w[3] >> 24) & 0xff];
290 w[3] = (w[3] << 8) | (w[2] >> 24);
291 w[2] = (w[2] << 8) | (w[1] >> 24);
292 w[1] = (w[1] << 8) | (w[0] >> 24);
293 w[0] = (w[0] << 8) ^ reduce;
294 # elif SIZEOF_LONG == 8
295 reduce = shift_table[(w[1] >> 56) & 0xff];
296 w[1] = (w[1] << 8) | (w[0] >> 56);
297 w[0] = (w[0] << 8) ^ reduce;
299 # error Unsupported word size. */
301 #endif /* ! WORDS_BIGENDIAN */
305 gcm_gf_mul (union nettle_block16 *x, const union nettle_block16 *table)
307 union nettle_block16 Z;
310 memcpy(Z.b, table[x->b[GCM_BLOCK_SIZE-1]].b, GCM_BLOCK_SIZE);
312 for (i = GCM_BLOCK_SIZE-2; i > 0; i--)
315 gcm_gf_add(&Z, &Z, &table[x->b[i]]);
318 gcm_gf_add(x, &Z, &table[x->b[0]]);
320 # endif /* ! HAVE_NATIVE_gcm_hash8 */
321 # else /* GCM_TABLE_BITS != 8 */
322 # error Unsupported table size.
323 # endif /* GCM_TABLE_BITS != 8 */
327 #endif /* GCM_TABLE_BITS */
329 /* Increment the rightmost 32 bits. */
330 #define INC32(block) INCREMENT(4, (block.b) + GCM_BLOCK_SIZE - 4)
332 /* Initialization of GCM.
333 * @ctx: The context of GCM
334 * @cipher: The context of the underlying block cipher
335 * @f: The underlying cipher encryption function
338 gcm_set_key(struct gcm_key *key,
339 const void *cipher, nettle_cipher_func *f)
341 /* Middle element if GCM_TABLE_BITS > 0, otherwise the first
343 unsigned i = (1<<GCM_TABLE_BITS)/2;
346 memset(key->h[0].b, 0, GCM_BLOCK_SIZE);
347 f (cipher, GCM_BLOCK_SIZE, key->h[i].b, key->h[0].b);
350 /* Algorithm 3 from the gcm paper. First do powers of two, then do
351 the rest by adding. */
353 gcm_gf_shift(&key->h[i], &key->h[2*i]);
354 for (i = 2; i < 1<<GCM_TABLE_BITS; i *= 2)
357 for (j = 1; j < i; j++)
358 gcm_gf_add(&key->h[i+j], &key->h[i],&key->h[j]);
365 gcm_hash(const struct gcm_key *key, union nettle_block16 *x,
366 size_t length, const uint8_t *data)
368 for (; length >= GCM_BLOCK_SIZE;
369 length -= GCM_BLOCK_SIZE, data += GCM_BLOCK_SIZE)
371 memxor (x->b, data, GCM_BLOCK_SIZE);
372 gcm_gf_mul (x, key->h);
376 memxor (x->b, data, length);
377 gcm_gf_mul (x, key->h);
380 #endif /* !gcm_hash */
383 gcm_hash_sizes(const struct gcm_key *key, union nettle_block16 *x,
384 uint64_t auth_size, uint64_t data_size)
386 uint8_t buffer[GCM_BLOCK_SIZE];
391 WRITE_UINT64 (buffer, auth_size);
392 WRITE_UINT64 (buffer + 8, data_size);
394 gcm_hash(key, x, GCM_BLOCK_SIZE, buffer);
397 /* NOTE: The key is needed only if length != GCM_IV_SIZE */
399 gcm_set_iv(struct gcm_ctx *ctx, const struct gcm_key *key,
400 size_t length, const uint8_t *iv)
402 if (length == GCM_IV_SIZE)
404 memcpy (ctx->iv.b, iv, GCM_BLOCK_SIZE - 4);
405 ctx->iv.b[GCM_BLOCK_SIZE - 4] = 0;
406 ctx->iv.b[GCM_BLOCK_SIZE - 3] = 0;
407 ctx->iv.b[GCM_BLOCK_SIZE - 2] = 0;
408 ctx->iv.b[GCM_BLOCK_SIZE - 1] = 1;
412 memset(ctx->iv.b, 0, GCM_BLOCK_SIZE);
413 gcm_hash(key, &ctx->iv, length, iv);
414 gcm_hash_sizes(key, &ctx->iv, 0, length);
417 memcpy (ctx->ctr.b, ctx->iv.b, GCM_BLOCK_SIZE);
420 /* Reset the rest of the message-dependent state. */
421 memset(ctx->x.b, 0, sizeof(ctx->x));
422 ctx->auth_size = ctx->data_size = 0;
426 gcm_update(struct gcm_ctx *ctx, const struct gcm_key *key,
427 size_t length, const uint8_t *data)
429 assert(ctx->auth_size % GCM_BLOCK_SIZE == 0);
430 assert(ctx->data_size == 0);
432 gcm_hash(key, &ctx->x, length, data);
434 ctx->auth_size += length;
438 gcm_crypt(struct gcm_ctx *ctx, const void *cipher, nettle_cipher_func *f,
439 size_t length, uint8_t *dst, const uint8_t *src)
441 uint8_t buffer[GCM_BLOCK_SIZE];
445 for (; length >= GCM_BLOCK_SIZE;
446 (length -= GCM_BLOCK_SIZE,
447 src += GCM_BLOCK_SIZE, dst += GCM_BLOCK_SIZE))
449 f (cipher, GCM_BLOCK_SIZE, dst, ctx->ctr.b);
450 memxor (dst, src, GCM_BLOCK_SIZE);
456 for (; length >= GCM_BLOCK_SIZE;
457 (length -= GCM_BLOCK_SIZE,
458 src += GCM_BLOCK_SIZE, dst += GCM_BLOCK_SIZE))
460 f (cipher, GCM_BLOCK_SIZE, buffer, ctx->ctr.b);
461 memxor3 (dst, src, buffer, GCM_BLOCK_SIZE);
467 /* A final partial block */
468 f (cipher, GCM_BLOCK_SIZE, buffer, ctx->ctr.b);
469 memxor3 (dst, src, buffer, length);
475 gcm_encrypt (struct gcm_ctx *ctx, const struct gcm_key *key,
476 const void *cipher, nettle_cipher_func *f,
477 size_t length, uint8_t *dst, const uint8_t *src)
479 assert(ctx->data_size % GCM_BLOCK_SIZE == 0);
481 gcm_crypt(ctx, cipher, f, length, dst, src);
482 gcm_hash(key, &ctx->x, length, dst);
484 ctx->data_size += length;
488 gcm_decrypt(struct gcm_ctx *ctx, const struct gcm_key *key,
489 const void *cipher, nettle_cipher_func *f,
490 size_t length, uint8_t *dst, const uint8_t *src)
492 assert(ctx->data_size % GCM_BLOCK_SIZE == 0);
494 gcm_hash(key, &ctx->x, length, src);
495 gcm_crypt(ctx, cipher, f, length, dst, src);
497 ctx->data_size += length;
501 gcm_digest(struct gcm_ctx *ctx, const struct gcm_key *key,
502 const void *cipher, nettle_cipher_func *f,
503 size_t length, uint8_t *digest)
505 uint8_t buffer[GCM_BLOCK_SIZE];
507 assert (length <= GCM_BLOCK_SIZE);
509 gcm_hash_sizes(key, &ctx->x, ctx->auth_size, ctx->data_size);
511 f (cipher, GCM_BLOCK_SIZE, buffer, ctx->iv.b);
512 memxor3 (digest, ctx->x.b, buffer, length);