1 /* mpn_mul_n -- Multiply two natural numbers of length n.
3 Copyright (C) 1991, 1992, 1993, 1994, 1996 Free Software Foundation, Inc.
5 This file is part of the GNU MP Library.
7 The GNU MP Library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Library General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or (at your
10 option) any later version.
12 The GNU MP Library is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
15 License for more details.
17 You should have received a copy of the GNU Library General Public License
18 along with the GNU MP Library; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 MA 02111-1307, USA. */
26 /* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
27 both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
28 always stored. Return the most significant limb.
31 1. PRODP != UP and PRODP != VP, i.e. the destination
32 must be distinct from the multiplier and the multiplicand. */
34 /* If KARATSUBA_THRESHOLD is not already defined, define it to a
35 value which is good on most machines. */
36 #ifndef KARATSUBA_THRESHOLD
37 #define KARATSUBA_THRESHOLD 32
40 /* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
41 #if KARATSUBA_THRESHOLD < 2
42 #undef KARATSUBA_THRESHOLD
43 #define KARATSUBA_THRESHOLD 2
46 /* Handle simple cases with traditional multiplication.
48 This is the most critical code of multiplication. All multiplies rely
49 on this, both small and huge. Small ones arrive here immediately. Huge
50 ones arrive here as this is the base case for Karatsuba's recursive
55 impn_mul_n_basecase (mp_ptr prodp, mp_srcptr up, mp_srcptr vp, mp_size_t size)
57 impn_mul_n_basecase (prodp, up, vp, size)
68 /* Multiply by the first limb in V separately, as the result can be
69 stored (not added) to PROD. We also avoid a loop for zeroing. */
74 MPN_COPY (prodp, up, size);
76 MPN_ZERO (prodp, size);
80 cy_limb = mpn_mul_1 (prodp, up, size, v_limb);
82 prodp[size] = cy_limb;
85 /* For each iteration in the outer loop, multiply one limb from
86 U with one limb from V, and add it to PROD. */
87 for (i = 1; i < size; i++)
94 cy_limb = mpn_add_n (prodp, prodp, up, size);
97 cy_limb = mpn_addmul_1 (prodp, up, size, v_limb);
99 prodp[size] = cy_limb;
106 impn_mul_n (mp_ptr prodp,
107 mp_srcptr up, mp_srcptr vp, mp_size_t size, mp_ptr tspace)
109 impn_mul_n (prodp, up, vp, size, tspace)
119 /* The size is odd, the code code below doesn't handle that.
120 Multiply the least significant (size - 1) limbs with a recursive
121 call, and handle the most significant limb of S1 and S2
123 /* A slightly faster way to do this would be to make the Karatsuba
124 code below behave as if the size were even, and let it check for
125 odd size in the end. I.e., in essence move this code to the end.
126 Doing so would save us a recursive call, and potentially make the
127 stack grow a lot less. */
129 mp_size_t esize = size - 1; /* even size */
132 MPN_MUL_N_RECURSE (prodp, up, vp, esize, tspace);
133 cy_limb = mpn_addmul_1 (prodp + esize, up, esize, vp[esize]);
134 prodp[esize + esize] = cy_limb;
135 cy_limb = mpn_addmul_1 (prodp + esize, vp, size, up[esize]);
137 prodp[esize + size] = cy_limb;
141 /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
143 Split U in two pieces, U1 and U0, such that
145 and V in V1 and V0, such that
148 UV is then computed recursively using the identity
151 UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
154 Where B = 2**BITS_PER_MP_LIMB. */
156 mp_size_t hsize = size >> 1;
160 /*** Product H. ________________ ________________
161 |_____U1 x V1____||____U0 x V0_____| */
162 /* Put result in upper part of PROD and pass low part of TSPACE
164 MPN_MUL_N_RECURSE (prodp + size, up + hsize, vp + hsize, hsize, tspace);
166 /*** Product M. ________________
167 |_(U1-U0)(V0-V1)_| */
168 if (mpn_cmp (up + hsize, up, hsize) >= 0)
170 mpn_sub_n (prodp, up + hsize, up, hsize);
175 mpn_sub_n (prodp, up, up + hsize, hsize);
178 if (mpn_cmp (vp + hsize, vp, hsize) >= 0)
180 mpn_sub_n (prodp + hsize, vp + hsize, vp, hsize);
185 mpn_sub_n (prodp + hsize, vp, vp + hsize, hsize);
186 /* No change of NEGFLG. */
188 /* Read temporary operands from low part of PROD.
189 Put result in low part of TSPACE using upper part of TSPACE
191 MPN_MUL_N_RECURSE (tspace, prodp, prodp + hsize, hsize, tspace + size);
193 /*** Add/copy product H. */
194 MPN_COPY (prodp + hsize, prodp + size, hsize);
195 cy = mpn_add_n (prodp + size, prodp + size, prodp + size + hsize, hsize);
197 /*** Add product M (if NEGFLG M is a negative number). */
199 cy -= mpn_sub_n (prodp + hsize, prodp + hsize, tspace, size);
201 cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
203 /*** Product L. ________________ ________________
204 |________________||____U0 x V0_____| */
205 /* Read temporary operands from low part of PROD.
206 Put result in low part of TSPACE using upper part of TSPACE
208 MPN_MUL_N_RECURSE (tspace, up, vp, hsize, tspace + size);
210 /*** Add/copy Product L (twice). */
212 cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
214 mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
216 MPN_COPY (prodp, tspace, hsize);
217 cy = mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
219 mpn_add_1 (prodp + size, prodp + size, size, 1);
225 impn_sqr_n_basecase (mp_ptr prodp, mp_srcptr up, mp_size_t size)
227 impn_sqr_n_basecase (prodp, up, size)
237 /* Multiply by the first limb in V separately, as the result can be
238 stored (not added) to PROD. We also avoid a loop for zeroing. */
243 MPN_COPY (prodp, up, size);
245 MPN_ZERO (prodp, size);
249 cy_limb = mpn_mul_1 (prodp, up, size, v_limb);
251 prodp[size] = cy_limb;
254 /* For each iteration in the outer loop, multiply one limb from
255 U with one limb from V, and add it to PROD. */
256 for (i = 1; i < size; i++)
263 cy_limb = mpn_add_n (prodp, prodp, up, size);
266 cy_limb = mpn_addmul_1 (prodp, up, size, v_limb);
268 prodp[size] = cy_limb;
275 impn_sqr_n (mp_ptr prodp,
276 mp_srcptr up, mp_size_t size, mp_ptr tspace)
278 impn_sqr_n (prodp, up, size, tspace)
287 /* The size is odd, the code code below doesn't handle that.
288 Multiply the least significant (size - 1) limbs with a recursive
289 call, and handle the most significant limb of S1 and S2
291 /* A slightly faster way to do this would be to make the Karatsuba
292 code below behave as if the size were even, and let it check for
293 odd size in the end. I.e., in essence move this code to the end.
294 Doing so would save us a recursive call, and potentially make the
295 stack grow a lot less. */
297 mp_size_t esize = size - 1; /* even size */
300 MPN_SQR_N_RECURSE (prodp, up, esize, tspace);
301 cy_limb = mpn_addmul_1 (prodp + esize, up, esize, up[esize]);
302 prodp[esize + esize] = cy_limb;
303 cy_limb = mpn_addmul_1 (prodp + esize, up, size, up[esize]);
305 prodp[esize + size] = cy_limb;
309 mp_size_t hsize = size >> 1;
312 /*** Product H. ________________ ________________
313 |_____U1 x U1____||____U0 x U0_____| */
314 /* Put result in upper part of PROD and pass low part of TSPACE
316 MPN_SQR_N_RECURSE (prodp + size, up + hsize, hsize, tspace);
318 /*** Product M. ________________
319 |_(U1-U0)(U0-U1)_| */
320 if (mpn_cmp (up + hsize, up, hsize) >= 0)
322 mpn_sub_n (prodp, up + hsize, up, hsize);
326 mpn_sub_n (prodp, up, up + hsize, hsize);
329 /* Read temporary operands from low part of PROD.
330 Put result in low part of TSPACE using upper part of TSPACE
332 MPN_SQR_N_RECURSE (tspace, prodp, hsize, tspace + size);
334 /*** Add/copy product H. */
335 MPN_COPY (prodp + hsize, prodp + size, hsize);
336 cy = mpn_add_n (prodp + size, prodp + size, prodp + size + hsize, hsize);
338 /*** Add product M (if NEGFLG M is a negative number). */
339 cy -= mpn_sub_n (prodp + hsize, prodp + hsize, tspace, size);
341 /*** Product L. ________________ ________________
342 |________________||____U0 x U0_____| */
343 /* Read temporary operands from low part of PROD.
344 Put result in low part of TSPACE using upper part of TSPACE
346 MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size);
348 /*** Add/copy Product L (twice). */
350 cy += mpn_add_n (prodp + hsize, prodp + hsize, tspace, size);
352 mpn_add_1 (prodp + hsize + size, prodp + hsize + size, hsize, cy);
354 MPN_COPY (prodp, tspace, hsize);
355 cy = mpn_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize);
357 mpn_add_1 (prodp + size, prodp + size, size, 1);
361 /* This should be made into an inline function in gmp.h. */
364 mpn_mul_n (mp_ptr prodp, mp_srcptr up, mp_srcptr vp, mp_size_t size)
366 mpn_mul_n (prodp, up, vp, size)
377 if (size < KARATSUBA_THRESHOLD)
379 impn_sqr_n_basecase (prodp, up, size);
384 tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
385 impn_sqr_n (prodp, up, size, tspace);
390 if (size < KARATSUBA_THRESHOLD)
392 impn_mul_n_basecase (prodp, up, vp, size);
397 tspace = (mp_ptr) TMP_ALLOC (2 * size * BYTES_PER_MP_LIMB);
398 impn_mul_n (prodp, up, vp, size, tspace);