summaryrefslogtreecommitdiffabout
path: root/pwmanager/libcrypt/mpi/mpih-mul.c
Unidiff
Diffstat (limited to 'pwmanager/libcrypt/mpi/mpih-mul.c') (more/less context) (ignore whitespace changes)
-rw-r--r--pwmanager/libcrypt/mpi/mpih-mul.c530
1 files changed, 530 insertions, 0 deletions
diff --git a/pwmanager/libcrypt/mpi/mpih-mul.c b/pwmanager/libcrypt/mpi/mpih-mul.c
new file mode 100644
index 0000000..e1f6f58
--- a/dev/null
+++ b/pwmanager/libcrypt/mpi/mpih-mul.c
@@ -0,0 +1,530 @@
1/* mpih-mul.c - MPI helper functions
2 * Copyright (C) 1994, 1996, 1998, 1999, 2000,
3 * 2001, 2002 Free Software Foundation, Inc.
4 *
5 * This file is part of Libgcrypt.
6 *
7 * Libgcrypt is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as
9 * published by the Free Software Foundation; either version 2.1 of
10 * the License, or (at your option) any later version.
11 *
12 * Libgcrypt is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
20 *
21 * Note: This code is heavily based on the GNU MP Library.
22 * Actually it's the same code with only minor changes in the
23 * way the data is stored; this is to support the abstraction
24 * of an optional secure memory allocation which may be used
25 * to avoid revealing of sensitive data due to paging etc.
26 */
27
28#include <config.h>
29#include <stdio.h>
30#include <stdlib.h>
31#include <string.h>
32#include "mpi-internal.h"
33#include "longlong.h"
34#include "g10lib.h"
35
36#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
37 do { \
38 if( (size) < KARATSUBA_THRESHOLD ) \
39 mul_n_basecase (prodp, up, vp, size);\
40 else \
41 mul_n (prodp, up, vp, size, tspace);\
42 } while (0);
43
44#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
45 do { \
46 if ((size) < KARATSUBA_THRESHOLD) \
47 _gcry_mpih_sqr_n_basecase (prodp, up, size); \
48 else \
49 _gcry_mpih_sqr_n (prodp, up, size, tspace); \
50 } while (0);
51
52
53
54
55/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
56 * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
57 * always stored. Return the most significant limb.
58 *
59 * Argument constraints:
60 * 1. PRODP != UP and PRODP != VP, i.e. the destination
61 * must be distinct from the multiplier and the multiplicand.
62 *
63 *
64 * Handle simple cases with traditional multiplication.
65 *
66 * This is the most critical code of multiplication. All multiplies rely
67 * on this, both small and huge. Small ones arrive here immediately. Huge
68 * ones arrive here as this is the base case for Karatsuba's recursive
69 * algorithm below.
70 */
71
72static mpi_limb_t
73mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up,
74 mpi_ptr_t vp, mpi_size_t size)
75{
76 mpi_size_t i;
77 mpi_limb_t cy;
78 mpi_limb_t v_limb;
79
80 /* Multiply by the first limb in V separately, as the result can be
81 * stored (not added) to PROD. We also avoid a loop for zeroing. */
82 v_limb = vp[0];
83 if( v_limb <= 1 ) {
84 if( v_limb == 1 )
85 MPN_COPY( prodp, up, size );
86 else
87 MPN_ZERO( prodp, size );
88 cy = 0;
89 }
90 else
91 cy = _gcry_mpih_mul_1( prodp, up, size, v_limb );
92
93 prodp[size] = cy;
94 prodp++;
95
96 /* For each iteration in the outer loop, multiply one limb from
97 * U with one limb from V, and add it to PROD. */
98 for( i = 1; i < size; i++ ) {
99 v_limb = vp[i];
100 if( v_limb <= 1 ) {
101 cy = 0;
102 if( v_limb == 1 )
103 cy = _gcry_mpih_add_n(prodp, prodp, up, size);
104 }
105 else
106 cy = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
107
108 prodp[size] = cy;
109 prodp++;
110 }
111
112 return cy;
113}
114
115
116static void
117mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
118 mpi_size_t size, mpi_ptr_t tspace )
119{
120 if( size & 1 ) {
121 /* The size is odd, and the code below doesn't handle that.
122 * Multiply the least significant (size - 1) limbs with a recursive
123 * call, and handle the most significant limb of S1 and S2
124 * separately.
125 * A slightly faster way to do this would be to make the Karatsuba
126 * code below behave as if the size were even, and let it check for
127 * odd size in the end. I.e., in essence move this code to the end.
128 * Doing so would save us a recursive call, and potentially make the
129 * stack grow a lot less.
130 */
131 mpi_size_t esize = size - 1; /* even size */
132 mpi_limb_t cy_limb;
133
134 MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace );
135 cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, vp[esize] );
136 prodp[esize + esize] = cy_limb;
137 cy_limb = _gcry_mpih_addmul_1( prodp + esize, vp, size, up[esize] );
138 prodp[esize + size] = cy_limb;
139 }
140 else {
141 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
142 *
143 * Split U in two pieces, U1 and U0, such that
144 * U = U0 + U1*(B**n),
145 * and V in V1 and V0, such that
146 * V = V0 + V1*(B**n).
147 *
148 * UV is then computed recursively using the identity
149 *
150 * 2n n n n
151 * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
152 * 1 1 1 0 0 1 0 0
153 *
154 * Where B = 2**BITS_PER_MP_LIMB.
155 */
156 mpi_size_t hsize = size >> 1;
157 mpi_limb_t cy;
158 int negflg;
159
160 /* Product H. ________________ ________________
161 * |_____U1 x V1____||____U0 x V0_____|
162 * Put result in upper part of PROD and pass low part of TSPACE
163 * as new TSPACE.
164 */
165 MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace);
166
167 /* Product M. ________________
168 * |_(U1-U0)(V0-V1)_|
169 */
170 if( _gcry_mpih_cmp(up + hsize, up, hsize) >= 0 ) {
171 _gcry_mpih_sub_n(prodp, up + hsize, up, hsize);
172 negflg = 0;
173 }
174 else {
175 _gcry_mpih_sub_n(prodp, up, up + hsize, hsize);
176 negflg = 1;
177 }
178 if( _gcry_mpih_cmp(vp + hsize, vp, hsize) >= 0 ) {
179 _gcry_mpih_sub_n(prodp + hsize, vp + hsize, vp, hsize);
180 negflg ^= 1;
181 }
182 else {
183 _gcry_mpih_sub_n(prodp + hsize, vp, vp + hsize, hsize);
184 /* No change of NEGFLG. */
185 }
186 /* Read temporary operands from low part of PROD.
187 * Put result in low part of TSPACE using upper part of TSPACE
188 * as new TSPACE.
189 */
190 MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size);
191
192 /* Add/copy product H. */
193 MPN_COPY (prodp + hsize, prodp + size, hsize);
194 cy = _gcry_mpih_add_n( prodp + size, prodp + size,
195 prodp + size + hsize, hsize);
196
197 /* Add product M (if NEGFLG M is a negative number) */
198 if(negflg)
199 cy -= _gcry_mpih_sub_n(prodp + hsize, prodp + hsize, tspace, size);
200 else
201 cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
202
203 /* Product L. ________________ ________________
204 * |________________||____U0 x V0_____|
205 * Read temporary operands from low part of PROD.
206 * Put result in low part of TSPACE using upper part of TSPACE
207 * as new TSPACE.
208 */
209 MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
210
211 /* Add/copy Product L (twice) */
212
213 cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size);
214 if( cy )
215 _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy);
216
217 MPN_COPY(prodp, tspace, hsize);
218 cy = _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize);
219 if( cy )
220 _gcry_mpih_add_1(prodp + size, prodp + size, size, 1);
221 }
222}
223
224
225void
226_gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size )
227{
228 mpi_size_t i;
229 mpi_limb_t cy_limb;
230 mpi_limb_t v_limb;
231
232 /* Multiply by the first limb in V separately, as the result can be
233 * stored (not added) to PROD. We also avoid a loop for zeroing. */
234 v_limb = up[0];
235 if( v_limb <= 1 ) {
236 if( v_limb == 1 )
237 MPN_COPY( prodp, up, size );
238 else
239 MPN_ZERO(prodp, size);
240 cy_limb = 0;
241 }
242 else
243 cy_limb = _gcry_mpih_mul_1( prodp, up, size, v_limb );
244
245 prodp[size] = cy_limb;
246 prodp++;
247
248 /* For each iteration in the outer loop, multiply one limb from
249 * U with one limb from V, and add it to PROD. */
250 for( i=1; i < size; i++) {
251 v_limb = up[i];
252 if( v_limb <= 1 ) {
253 cy_limb = 0;
254 if( v_limb == 1 )
255 cy_limb = _gcry_mpih_add_n(prodp, prodp, up, size);
256 }
257 else
258 cy_limb = _gcry_mpih_addmul_1(prodp, up, size, v_limb);
259
260 prodp[size] = cy_limb;
261 prodp++;
262 }
263}
264
265
266void
267_gcry_mpih_sqr_n( mpi_ptr_t prodp,
268 mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
269{
270 if( size & 1 ) {
271 /* The size is odd, and the code below doesn't handle that.
272 * Multiply the least significant (size - 1) limbs with a recursive
273 * call, and handle the most significant limb of S1 and S2
274 * separately.
275 * A slightly faster way to do this would be to make the Karatsuba
276 * code below behave as if the size were even, and let it check for
277 * odd size in the end. I.e., in essence move this code to the end.
278 * Doing so would save us a recursive call, and potentially make the
279 * stack grow a lot less.
280 */
281 mpi_size_t esize = size - 1; /* even size */
282 mpi_limb_t cy_limb;
283
284 MPN_SQR_N_RECURSE( prodp, up, esize, tspace );
285 cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, up[esize] );
286 prodp[esize + esize] = cy_limb;
287 cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, size, up[esize] );
288
289 prodp[esize + size] = cy_limb;
290 }
291 else {
292 mpi_size_t hsize = size >> 1;
293 mpi_limb_t cy;
294
295 /* Product H. ________________ ________________
296 * |_____U1 x U1____||____U0 x U0_____|
297 * Put result in upper part of PROD and pass low part of TSPACE
298 * as new TSPACE.
299 */
300 MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
301
302 /* Product M. ________________
303 * |_(U1-U0)(U0-U1)_|
304 */
305 if( _gcry_mpih_cmp( up + hsize, up, hsize) >= 0 )
306 _gcry_mpih_sub_n( prodp, up + hsize, up, hsize);
307 else
308 _gcry_mpih_sub_n (prodp, up, up + hsize, hsize);
309
310 /* Read temporary operands from low part of PROD.
311 * Put result in low part of TSPACE using upper part of TSPACE
312 * as new TSPACE. */
313 MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
314
315 /* Add/copy product H */
316 MPN_COPY(prodp + hsize, prodp + size, hsize);
317 cy = _gcry_mpih_add_n(prodp + size, prodp + size,
318 prodp + size + hsize, hsize);
319
320 /* Add product M (if NEGFLG M is a negative number). */
321 cy -= _gcry_mpih_sub_n (prodp + hsize, prodp + hsize, tspace, size);
322
323 /* Product L. ________________ ________________
324 * |________________||____U0 x U0_____|
325 * Read temporary operands from low part of PROD.
326 * Put result in low part of TSPACE using upper part of TSPACE
327 * as new TSPACE. */
328 MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
329
330 /* Add/copy Product L (twice).*/
331 cy += _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace, size);
332 if( cy )
333 _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size,
334 hsize, cy);
335
336 MPN_COPY(prodp, tspace, hsize);
337 cy = _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
338 if( cy )
339 _gcry_mpih_add_1 (prodp + size, prodp + size, size, 1);
340 }
341}
342
343
344/* This should be made into an inline function in gmp.h. */
345void
346_gcry_mpih_mul_n( mpi_ptr_t prodp,
347 mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
348{
349 int secure;
350
351 if( up == vp ) {
352 if( size < KARATSUBA_THRESHOLD )
353 _gcry_mpih_sqr_n_basecase( prodp, up, size );
354 else {
355 mpi_ptr_t tspace;
356 secure = gcry_is_secure( up );
357 tspace = mpi_alloc_limb_space( 2 * size, secure );
358 _gcry_mpih_sqr_n( prodp, up, size, tspace );
359 _gcry_mpi_free_limb_space (tspace, 2 * size );
360 }
361 }
362 else {
363 if( size < KARATSUBA_THRESHOLD )
364 mul_n_basecase( prodp, up, vp, size );
365 else {
366 mpi_ptr_t tspace;
367 secure = gcry_is_secure( up ) || gcry_is_secure( vp );
368 tspace = mpi_alloc_limb_space( 2 * size, secure );
369 mul_n (prodp, up, vp, size, tspace);
370 _gcry_mpi_free_limb_space (tspace, 2 * size );
371 }
372 }
373}
374
375
376
377void
378_gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp,
379 mpi_ptr_t up, mpi_size_t usize,
380 mpi_ptr_t vp, mpi_size_t vsize,
381 struct karatsuba_ctx *ctx )
382{
383 mpi_limb_t cy;
384
385 if( !ctx->tspace || ctx->tspace_size < vsize ) {
386 if( ctx->tspace )
387 _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
388 ctx->tspace_nlimbs = 2 * vsize;
389 ctx->tspace = mpi_alloc_limb_space( 2 * vsize,
390 (gcry_is_secure( up )
391 || gcry_is_secure( vp )) );
392 ctx->tspace_size = vsize;
393 }
394
395 MPN_MUL_N_RECURSE( prodp, up, vp, vsize, ctx->tspace );
396
397 prodp += vsize;
398 up += vsize;
399 usize -= vsize;
400 if( usize >= vsize ) {
401 if( !ctx->tp || ctx->tp_size < vsize ) {
402 if( ctx->tp )
403 _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
404 ctx->tp_nlimbs = 2 * vsize;
405 ctx->tp = mpi_alloc_limb_space( 2 * vsize, gcry_is_secure( up )
406 || gcry_is_secure( vp ) );
407 ctx->tp_size = vsize;
408 }
409
410 do {
411 MPN_MUL_N_RECURSE( ctx->tp, up, vp, vsize, ctx->tspace );
412 cy = _gcry_mpih_add_n( prodp, prodp, ctx->tp, vsize );
413 _gcry_mpih_add_1( prodp + vsize, ctx->tp + vsize, vsize, cy );
414 prodp += vsize;
415 up += vsize;
416 usize -= vsize;
417 } while( usize >= vsize );
418 }
419
420 if( usize ) {
421 if( usize < KARATSUBA_THRESHOLD ) {
422 _gcry_mpih_mul( ctx->tspace, vp, vsize, up, usize );
423 }
424 else {
425 if( !ctx->next ) {
426 ctx->next = gcry_xcalloc( 1, sizeof *ctx );
427 }
428 _gcry_mpih_mul_karatsuba_case( ctx->tspace,
429 vp, vsize,
430 up, usize,
431 ctx->next );
432 }
433
434 cy = _gcry_mpih_add_n( prodp, prodp, ctx->tspace, vsize);
435 _gcry_mpih_add_1( prodp + vsize, ctx->tspace + vsize, usize, cy );
436 }
437}
438
439
440void
441_gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx )
442{
443 struct karatsuba_ctx *ctx2;
444
445 if( ctx->tp )
446 _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
447 if( ctx->tspace )
448 _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
449 for( ctx=ctx->next; ctx; ctx = ctx2 ) {
450 ctx2 = ctx->next;
451 if( ctx->tp )
452 _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs );
453 if( ctx->tspace )
454 _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs );
455 gcry_free( ctx );
456 }
457}
458
459/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
460 * and v (pointed to by VP, with VSIZE limbs), and store the result at
461 * PRODP. USIZE + VSIZE limbs are always stored, but if the input
462 * operands are normalized. Return the most significant limb of the
463 * result.
464 *
465 * NOTE: The space pointed to by PRODP is overwritten before finished
466 * with U and V, so overlap is an error.
467 *
468 * Argument constraints:
469 * 1. USIZE >= VSIZE.
470 * 2. PRODP != UP and PRODP != VP, i.e. the destination
471 * must be distinct from the multiplier and the multiplicand.
472 */
473
474mpi_limb_t
475_gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
476 mpi_ptr_t vp, mpi_size_t vsize)
477{
478 mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
479 mpi_limb_t cy;
480 struct karatsuba_ctx ctx;
481
482 if( vsize < KARATSUBA_THRESHOLD ) {
483 mpi_size_t i;
484 mpi_limb_t v_limb;
485
486 if( !vsize )
487 return 0;
488
489 /* Multiply by the first limb in V separately, as the result can be
490 * stored (not added) to PROD.We also avoid a loop for zeroing. */
491 v_limb = vp[0];
492 if( v_limb <= 1 ) {
493 if( v_limb == 1 )
494 MPN_COPY( prodp, up, usize );
495 else
496 MPN_ZERO( prodp, usize );
497 cy = 0;
498 }
499 else
500 cy = _gcry_mpih_mul_1( prodp, up, usize, v_limb );
501
502 prodp[usize] = cy;
503 prodp++;
504
505 /* For each iteration in the outer loop, multiply one limb from
506 * U with one limb from V, and add it to PROD.*/
507 for( i = 1; i < vsize; i++ ) {
508 v_limb = vp[i];
509 if( v_limb <= 1 ) {
510 cy = 0;
511 if( v_limb == 1 )
512 cy = _gcry_mpih_add_n(prodp, prodp, up, usize);
513 }
514 else
515 cy = _gcry_mpih_addmul_1(prodp, up, usize, v_limb);
516
517 prodp[usize] = cy;
518 prodp++;
519 }
520
521 return cy;
522 }
523
524 memset( &ctx, 0, sizeof ctx );
525 _gcry_mpih_mul_karatsuba_case( prodp, up, usize, vp, vsize, &ctx );
526 _gcry_mpih_release_karatsuba_ctx( &ctx );
527 return *prod_endp;
528}
529
530