diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:54:16 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:54:16 +0000 |
commit | 485f6ecd453d8a2fd8b9b9fadea03159d8b50797 (patch) | |
tree | 32451fa3cdd9321fb2591fada9891b2cb70a9cd1 /grub-core/lib/libgcrypt-grub/mpi | |
parent | Initial commit. (diff) | |
download | grub2-upstream.tar.xz grub2-upstream.zip |
Adding upstream version 2.06.upstream/2.06upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'grub-core/lib/libgcrypt-grub/mpi')
37 files changed, 7827 insertions, 0 deletions
diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/Manifest b/grub-core/lib/libgcrypt-grub/mpi/generic/Manifest new file mode 100644 index 0000000..c429fde --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/Manifest @@ -0,0 +1,29 @@ +# Manifest - checksums +# Copyright 2003 Free Software Foundation, Inc. +# +# This file is part of Libgcrypt. +# +# Libgcrypt is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser general Public License as +# published by the Free Software Foundation; either version 2.1 of +# the License, or (at your option) any later version. +# +# Libgcrypt is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + +mpih-add1.c iQCVAwUAP+Lj2DEAnp832S/7AQKn/AQAwQLWggl6zNQ5EZ+lE+jKV8W3FsogW3/6tp9T5rrSR5JnlWyoHQ9/Pu4knOcLjS6nIfVOiAEifu3nuIysQr9jDSSSJA2LylSUBSXKLKDamPsOCwXOLxiZODslJT3CCGAUtLvXJrWDbTZQrkEuwnLnjQFDzuA7iY9JLrG9kAoXD6Q==WoWm +mpih-mul1.c iQCVAwUAP+LkCTEAnp832S/7AQKFVQP+MhBNjcY73JtnsHZfnaVZq3TiKwN151cWV51nDc1RnTaMhSIFeuNlj3vNML2W0Gn8n+GnyiWE2XXdQEaik6BL02eekUn9aq7I/rdpnTHuOjQPK1uwjuNl8RuJ9YrERBAxq4oB71f+iwMab8dsMSUlVC+NdeAocRqLLgnR/efkdLc==2Tkb +mpih-mul2.c iQCVAwUAP+LkMjEAnp832S/7AQLPeAQAqmRzxFe/mDqTdZr/pTXT8RVyB1vKB0Ei2THV05BxmI4OPv39uysfFpLMt/INsX7AGqdOlj4jOZ/qNaFXR1ceMrlSXvo8u/epk6rCXFp82kM7Qs983LjoP//PrMCkYkXwblaVrgUGiBUCbuPMliWTK6qKkxxXtEfqZ7nVbEWdBx8==Kwhl +mpih-mul3.c iQCVAwUAP+LkVDEAnp832S/7AQL91gP/Qd5iZWxRiN5DdEIVHAedoNvl23NPrT2UUdXvnSK49DpplTxkLiMBj0WqCayG/YIET2NpMRCeLvAZNcSt6lOm0bSZDYo1Hv/N+UoqD3V1McjY16REBv/nnPaMWMZcx7rl5yKTVZiX2PgV6oQOL7Yfrt5ZIOlrHBRs9S2/zcCaVz0==9BQe +mpih-lshift.c iQCVAwUAP+LlATEAnp832S/7AQIACAQAhMrpx0SRXE/LN1NkjMO9n74nMrvmzYJyru0gw2O4BYrUPvD/LWGju2FZaggKV0IBjmi0cDoCrNeK9EGjKOO1lfgODbX2IZ1LUhr9jDuMj0QRqj6T9YkAFYTNUk4GfpwIf7T6Ybo7c78Jx93PidCJt7d39eMMEalooC7LZ4IU3NM==nZ4k +mpih-rshift.c iQCVAwUAP+LlIjEAnp832S/7AQKiuAP/eYC2ZScd+taBx/kNzRvGjA0eAXvORMkMLV6Ot+OXVzVUi04eoP2yXdxSNFKwUj12p8GWXkdoMG3aOGBKg2a7bY5Q5RUho3hUWb9UsVYVUfXLf7IOTt/3a6MLh2CmV5dFPWJmSlbCyQRcn6n/fLDeJ3A2bWTS/BhqGfpOXUIU1ws==jCf8 +mpih-sub1.c iQCVAwUAP+LlZzEAnp832S/7AQIEPgP/dLHTDRbPrYJhsLp9SjGstU1M8/IC5XytcDtO3NQeu4mx6vaXjpujtsTvKIbX4QL5IahNntVVKv1xFLEm2yFg7L2ns0uD/mfwGgOhCG1j2o/SaTAWP5KxP7ae5UDcZl2w6NWvEuMj9t32zmziAZjP8W73A37FUspeRDYiL9sQzkI==QQzk +udiv-w-sdiv.c iQCVAwUAP+Lk0TEAnp832S/7AQICXAQAsxe1SQD4+xZaZTqBC0V9Cyuo0mrdccnRFzthOtm0ARwKFXU2cuLW/ZBOkmeWOVmOFhBp22/I8dEGYnMA3gcfmOMCpNu9i9zk/XHfptdunA1MnOe3GsoWgfHL0rhpAyPhp/X043ICB41NElnnuxADuQQlD4Z1fca5ygYxMr2crJg==EI/6 +mpi-asm-defs.h iQCVAwUAP+LkgDEAnp832S/7AQK0FgQAxJZ7xvXhoZa33GWe23LRb3asrno/loZSyAIXrntqtVH8M3pEsCY0OyW4ry4hX2RnxpuhRCM/PdRNLG3xXyMSVIhkHU8WVRLqzF2LLjEkyU3cAmHnnTQ9aO/XpUWtJGTZ8q2bv7ZsAEi4aPl0p6KhPXcPgM9vQ2XcyOPn3Dl0d6Q==xpjI +$names$ iQCVAwUAP+LmNDEAnp832S/7AQJa+gP+KQNJpbNOgc+s2UX+Ya2gDaOFcAROImIllhg3ej8EaBF8xxdHmWT1zaKwTwi3moEEleykMR104YAGWyQeMbFYiuPPBW+ohrT6KxRBVJpIA9auOOqqJMyglZyoR3Hv7gduVYUW1h/DebnqiKXKEfzQDFqYuT0ayuteoOR4B5NICbE==nLSh diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/distfiles b/grub-core/lib/libgcrypt-grub/mpi/generic/distfiles new file mode 100644 index 0000000..9810eef --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/distfiles @@ -0,0 +1,11 @@ +Manifest +mpih-add1.c +mpih-mul1.c +mpih-mul2.c +mpih-mul3.c +mpih-lshift.c +mpih-rshift.c +mpih-sub1.c +udiv-w-sdiv.c +mpi-asm-defs.h + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpi-asm-defs.h b/grub-core/lib/libgcrypt-grub/mpi/generic/mpi-asm-defs.h new file mode 100644 index 0000000..13424e2 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpi-asm-defs.h @@ -0,0 +1,10 @@ +/* This file defines some basic constants for the MPI machinery. We + * need to define the types on a per-CPU basis, so it is done with + * this file here. */ +#define BYTES_PER_MPI_LIMB (SIZEOF_UNSIGNED_LONG) + + + + + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-add1.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-add1.c new file mode 100644 index 0000000..4a84df6 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-add1.c @@ -0,0 +1,65 @@ +/* mpihelp-add_1.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, + * 2000, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_add_n (mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size) +{ + mpi_limb_t x, y, cy; + mpi_size_t j; + + /* The loop counter and index J goes from -SIZE to -1. This way + the loop becomes faster. */ + j = -size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + s2_ptr -= j; + res_ptr -= j; + + cy = 0; + do + { + y = s2_ptr[j]; + x = s1_ptr[j]; + y += cy; /* add previous carry to one addend */ + cy = y < cy; /* get out carry from that addition */ + y += x; /* add other addend */ + cy += y < x; /* get out carry from that add, combine */ + res_ptr[j] = y; + } + while ( ++j ); + + return cy; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-lshift.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-lshift.c new file mode 100644 index 0000000..f48c12c --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-lshift.c @@ -0,0 +1,68 @@ +/* mpi-lshift.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + +/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left + * and store the USIZE least significant digits of the result at WP. + * Return the bits shifted out from the most significant digit. + * + * Argument constraints: + * 1. 0 < CNT < BITS_PER_MP_LIMB + * 2. If the result is to be written over the input, WP must be >= UP. + */ + +mpi_limb_t +_gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, + unsigned int cnt) +{ + mpi_limb_t high_limb, low_limb; + unsigned sh_1, sh_2; + mpi_size_t i; + mpi_limb_t retval; + + sh_1 = cnt; + wp += 1; + sh_2 = BITS_PER_MPI_LIMB - sh_1; + i = usize - 1; + low_limb = up[i]; + retval = low_limb >> sh_2; + high_limb = low_limb; + while ( --i >= 0 ) + { + low_limb = up[i]; + wp[i] = (high_limb << sh_1) | (low_limb >> sh_2); + high_limb = low_limb; + } + wp[i] = high_limb << sh_1; + + return retval; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul1.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul1.c new file mode 100644 index 0000000..0e8197d --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul1.c @@ -0,0 +1,62 @@ +/* mpihelp-mul_1.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + + /* The loop counter and index J goes from -S1_SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + res_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb ); + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + res_ptr[j] = prod_low; + } + while( ++j ); + + return cy_limb; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul2.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul2.c new file mode 100644 index 0000000..3b75496 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul2.c @@ -0,0 +1,68 @@ +/* mpih-mul2.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +mpi_limb_t +_gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + mpi_limb_t x; + + /* The loop counter and index J goes from -SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + res_ptr -= j; + s1_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb ); + + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + + x = res_ptr[j]; + prod_low = x + prod_low; + cy_limb += prod_low < x?1:0; + res_ptr[j] = prod_low; + } + while ( ++j ); + + return cy_limb; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul3.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul3.c new file mode 100644 index 0000000..5e84f94 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-mul3.c @@ -0,0 +1,68 @@ +/* mpih-mul3.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +mpi_limb_t +_gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + mpi_limb_t x; + + /* The loop counter and index J goes from -SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + res_ptr -= j; + s1_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb); + + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + + x = res_ptr[j]; + prod_low = x - prod_low; + cy_limb += prod_low > x?1:0; + res_ptr[j] = prod_low; + } + while( ++j ); + + return cy_limb; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-rshift.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-rshift.c new file mode 100644 index 0000000..e40794f --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-rshift.c @@ -0,0 +1,67 @@ +/* mpih-rshift.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 1999, + * 2000, 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + + +/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right + * and store the USIZE least significant limbs of the result at WP. + * The bits shifted out to the right are returned. + * + * Argument constraints: + * 1. 0 < CNT < BITS_PER_MP_LIMB + * 2. If the result is to be written over the input, WP must be <= UP. + */ + +mpi_limb_t +_gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt) +{ + mpi_limb_t high_limb, low_limb; + unsigned sh_1, sh_2; + mpi_size_t i; + mpi_limb_t retval; + + sh_1 = cnt; + wp -= 1; + sh_2 = BITS_PER_MPI_LIMB - sh_1; + high_limb = up[0]; + retval = high_limb << sh_2; + low_limb = high_limb; + for (i=1; i < usize; i++) + { + high_limb = up[i]; + wp[i] = (low_limb >> sh_1) | (high_limb << sh_2); + low_limb = high_limb; + } + wp[i] = low_limb >> sh_1; + + return retval; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-sub1.c b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-sub1.c new file mode 100644 index 0000000..e88821b --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/mpih-sub1.c @@ -0,0 +1,66 @@ +/* mpihelp-add_2.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size) +{ + mpi_limb_t x, y, cy; + mpi_size_t j; + + /* The loop counter and index J goes from -SIZE to -1. This way + the loop becomes faster. */ + j = -size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + s2_ptr -= j; + res_ptr -= j; + + cy = 0; + do + { + y = s2_ptr[j]; + x = s1_ptr[j]; + y += cy; /* add previous carry to subtrahend */ + cy = y < cy; /* get out carry from that addition */ + y = x - y; /* main subtract */ + cy += y > x; /* get out carry from the subtract, combine */ + res_ptr[j] = y; + } + while( ++j ); + + return cy; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/generic/udiv-w-sdiv.c b/grub-core/lib/libgcrypt-grub/mpi/generic/udiv-w-sdiv.c new file mode 100644 index 0000000..e80d98b --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/generic/udiv-w-sdiv.c @@ -0,0 +1,133 @@ +/* mpih-w-sdiv -- implement udiv_qrnnd on machines with only signed + * division. + * Copyright (C) 1992, 1994, 1996, 1998, 2002 Free Software Foundation, Inc. + * Contributed by Peter L. Montgomery. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +#if 0 /* not yet ported to MPI */ + +mpi_limb_t +mpihelp_udiv_w_sdiv( mpi_limp_t *rp, + mpi_limp_t *a1, + mpi_limp_t *a0, + mpi_limp_t *d ) +{ + mp_limb_t q, r; + mp_limb_t c0, c1, b1; + + if ((mpi_limb_signed_t) d >= 0) + { + if (a1 < d - a1 - (a0 >> (BITS_PER_MP_LIMB - 1))) + { + /* dividend, divisor, and quotient are nonnegative */ + sdiv_qrnnd (q, r, a1, a0, d); + } + else + { + /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */ + sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (BITS_PER_MP_LIMB - 1)); + /* Divide (c1*2^32 + c0) by d */ + sdiv_qrnnd (q, r, c1, c0, d); + /* Add 2^31 to quotient */ + q += (mp_limb_t) 1 << (BITS_PER_MP_LIMB - 1); + } + } + else + { + b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */ + c1 = a1 >> 1; /* A/2 */ + c0 = (a1 << (BITS_PER_MP_LIMB - 1)) + (a0 >> 1); + + if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */ + { + sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ + + r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */ + if ((d & 1) != 0) + { + if (r >= q) + r = r - q; + else if (q - r <= d) + { + r = r - q + d; + q--; + } + else + { + r = r - q + 2*d; + q -= 2; + } + } + } + else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */ + { + c1 = (b1 - 1) - c1; + c0 = ~c0; /* logical NOT */ + + sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ + + q = ~q; /* (A/2)/b1 */ + r = (b1 - 1) - r; + + r = 2*r + (a0 & 1); /* A/(2*b1) */ + + if ((d & 1) != 0) + { + if (r >= q) + r = r - q; + else if (q - r <= d) + { + r = r - q + d; + q--; + } + else + { + r = r - q + 2*d; + q -= 2; + } + } + } + else /* Implies c1 = b1 */ + { /* Hence a1 = d - 1 = 2*b1 - 1 */ + if (a0 >= -d) + { + q = -1; + r = a0 + d; + } + else + { + q = -2; + r = a0 + 2*d; + } + } + } + + *rp = r; + return q; +} + +#endif + diff --git a/grub-core/lib/libgcrypt-grub/mpi/longlong.h b/grub-core/lib/libgcrypt-grub/mpi/longlong.h new file mode 100644 index 0000000..37a9b9a --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/longlong.h @@ -0,0 +1,1615 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* longlong.h -- definitions for mixed size 32/64 bit arithmetic. + Note: I added some stuff for use with gnupg + +Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998, + 2000, 2001, 2002, 2003, 2004, 2011 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 2.1 of the License, or (at your +option) any later version. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Library General Public License +along with this file; see the file COPYING.LIB. If not, write to +the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, +MA 02111-1307, USA. */ + +/* You have to define the following before including this file: + + UWtype -- An unsigned type, default type for operations (typically a "word") + UHWtype -- An unsigned type, at least half the size of UWtype. + UDWtype -- An unsigned type, at least twice as large a UWtype + W_TYPE_SIZE -- size in bits of UWtype + + SItype, USItype -- Signed and unsigned 32 bit types. + DItype, UDItype -- Signed and unsigned 64 bit types. + + On a 32 bit machine UWtype should typically be USItype; + on a 64 bit machine, UWtype should typically be UDItype. +*/ + +#define __BITS4 (W_TYPE_SIZE / 4) +#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) +#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) + +/* This is used to make sure no undesirable sharing between different libraries + that use this file takes place. */ +#ifndef __MPN +#define __MPN(x) __##x +#endif + +/* Define auxiliary asm macros. + + 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two + UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype + word product in HIGH_PROD and LOW_PROD. + + 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a + UDWtype product. This is just a variant of umul_ppmm. + + 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator) divides a UDWtype, composed by the UWtype integers + HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient + in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less + than DENOMINATOR for correct operation. If, in addition, the most + significant bit of DENOMINATOR must be 1, then the pre-processor symbol + UDIV_NEEDS_NORMALIZATION is defined to 1. + + 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator). Like udiv_qrnnd but the numbers are signed. The quotient + is rounded towards 0. + + 5) count_leading_zeros(count, x) counts the number of zero-bits from the + msb to the first non-zero bit in the UWtype X. This is the number of + steps X needs to be shifted left to set the msb. Undefined for X == 0, + unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value. + + 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts + from the least significant end. + + 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + high_addend_2, low_addend_2) adds two UWtype integers, composed by + HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 + respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow + (i.e. carry out) is not stored anywhere, and is lost. + + 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, + high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, + composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and + LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE + and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + and is lost. + + If any of these macros are left undefined for a particular CPU, + C macros are used. */ + +/* The CPUs come in alphabetical order below. + + Please add support for more CPUs here, or improve the current support + for the CPUs below! */ + +#ifdef __riscos__ +#pragma continue_after_hash_error +#else /* !__riscos__ */ +#if defined (__GNUC__) && !defined (NO_ASM) + +/* We sometimes need to clobber "cc" with gcc2, but that would not be + understood by gcc1. Use cpp to avoid major code duplication. */ +#if __GNUC__ < 2 +#define __CLOBBER_CC +#define __AND_CLOBBER_CC +#else /* __GNUC__ >= 2 */ +#define __CLOBBER_CC : "cc" +#define __AND_CLOBBER_CC , "cc" +#endif /* __GNUC__ < 2 */ + + +/*************************************** + ************** A29K ***************** + ***************************************/ +#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add %1,%4,%5\n" \ + "addc %0,%2,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%r" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub %1,%4,%5\n" \ + "subc %0,%2,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "r" ((USItype)(al)), \ + "rI" ((USItype)(bl))) +#define umul_ppmm(xh, xl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("multiplu %0,%1,%2" \ + : "=r" ((USItype)(xl)) \ + : "r" (__m0), \ + "r" (__m1)); \ + __asm__ ("multmu %0,%1,%2" \ + : "=r" ((USItype)(xh)) \ + : "r" (__m0), \ + "r" (__m1)); \ + } while (0) +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("dividu %0,%3,%4" \ + : "=r" ((USItype)(q)), \ + "=q" ((USItype)(r)) \ + : "1" ((USItype)(n1)), \ + "r" ((USItype)(n0)), \ + "r" ((USItype)(d))) +#define count_leading_zeros(count, x) \ + __asm__ ("clz %0,%1" \ + : "=r" ((USItype)(count)) \ + : "r" ((USItype)(x))) +#define COUNT_LEADING_ZEROS_0 32 +#endif /* __a29k__ */ + + +#if defined (__alpha) && W_TYPE_SIZE == 64 +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + UDItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("umulh %r1,%2,%0" \ + : "=r" ((UDItype) ph) \ + : "%rJ" (__m0), \ + "rI" (__m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 46 +#ifndef LONGLONG_STANDALONE +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { UDItype __r; \ + (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ + (r) = __r; \ + } while (0) +extern UDItype __udiv_qrnnd (); +#define UDIV_TIME 220 +#endif /* LONGLONG_STANDALONE */ +#endif /* __alpha */ + +/*************************************** + ************** ARM ****************** + ***************************************/ +#if defined (__arm__) && W_TYPE_SIZE == 32 && \ + (!defined (__thumb__) || defined (__thumb2__)) +/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ +#ifndef __ARM_ARCH +# ifdef __ARM_ARCH_2__ +# define __ARM_ARCH 2 +# elif defined (__ARM_ARCH_3__) || defined (__ARM_ARCH_3M__) +# define __ARM_ARCH 3 +# elif defined (__ARM_ARCH_4__) || defined (__ARM_ARCH_4T__) +# define __ARM_ARCH 4 +# elif defined (__ARM_ARCH_5__) || defined (__ARM_ARCH_5E__) \ + || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# elif defined (__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined (__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ + || defined (__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) +# define __ARM_ARCH 6 +# elif defined (__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# else + /* could not detect? */ +# endif +#endif +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("adds %1, %4, %5\n" \ + "adc %0, %2, %3" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "%r" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl)) __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subs %1, %4, %5\n" \ + "sbc %0, %2, %3" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "r" ((USItype)(al)), \ + "rI" ((USItype)(bl)) __CLOBBER_CC) +#if (defined __ARM_ARCH && __ARM_ARCH <= 3) +#define umul_ppmm(xh, xl, a, b) \ + __asm__ ("@ Inlined umul_ppmm\n" \ + "mov %|r0, %2, lsr #16 @ AAAA\n" \ + "mov %|r2, %3, lsr #16 @ BBBB\n" \ + "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \ + "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \ + "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \ + "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \ + "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \ + "mul %0, %|r0, %0 @ AAAA * bbbb\n" \ + "adds %|r0, %1, %0 @ central sum\n" \ + "addcs %|r2, %|r2, #65536\n" \ + "adds %1, %|r1, %|r0, lsl #16\n" \ + "adc %0, %|r2, %|r0, lsr #16" \ + : "=&r" ((xh)), \ + "=r" ((xl)) \ + : "r" ((USItype)(a)), \ + "r" ((USItype)(b)) \ + : "r0", "r1", "r2" __AND_CLOBBER_CC) +#else /* __ARM_ARCH >= 4 */ +#define umul_ppmm(xh, xl, a, b) \ + __asm__ ("@ Inlined umul_ppmm\n" \ + "umull %1, %0, %2, %3" \ + : "=&r" ((xh)), \ + "=r" ((xl)) \ + : "r" ((USItype)(a)), \ + "r" ((USItype)(b))) +#endif /* __ARM_ARCH >= 4 */ +#define UMUL_TIME 20 +#define UDIV_TIME 100 +#if (defined __ARM_ARCH && __ARM_ARCH >= 5) +#define count_leading_zeros(count, x) \ + __asm__ ("clz %0, %1" \ + : "=r" ((count)) \ + : "r" ((USItype)(x))) +#endif /* __ARM_ARCH >= 5 */ +#endif /* __arm__ */ + +/*************************************** + ************** CLIPPER ************** + ***************************************/ +#if defined (__clipper__) && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("mulwux %2,%0" \ + : "=r" (__xx.__ll) \ + : "%0" ((USItype)(u)), \ + "r" ((USItype)(v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define smul_ppmm(w1, w0, u, v) \ + ({union {DItype __ll; \ + struct {SItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("mulwx %2,%0" \ + : "=r" (__xx.__ll) \ + : "%0" ((SItype)(u)), \ + "r" ((SItype)(v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define __umulsidi3(u, v) \ + ({UDItype __w; \ + __asm__ ("mulwux %2,%0" \ + : "=r" (__w) \ + : "%0" ((USItype)(u)), \ + "r" ((USItype)(v))); \ + __w; }) +#endif /* __clipper__ */ + + +/*************************************** + ************** GMICRO *************** + ***************************************/ +#if defined (__gmicro__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add.w %5,%1\n" \ + "addx %3,%0" \ + : "=g" ((USItype)(sh)), \ + "=&g" ((USItype)(sl)) \ + : "%0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub.w %5,%1\n" \ + "subx %3,%0" \ + : "=g" ((USItype)(sh)), \ + "=&g" ((USItype)(sl)) \ + : "0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define umul_ppmm(ph, pl, m0, m1) \ + __asm__ ("mulx %3,%0,%1" \ + : "=g" ((USItype)(ph)), \ + "=r" ((USItype)(pl)) \ + : "%0" ((USItype)(m0)), \ + "g" ((USItype)(m1))) +#define udiv_qrnnd(q, r, nh, nl, d) \ + __asm__ ("divx %4,%0,%1" \ + : "=g" ((USItype)(q)), \ + "=r" ((USItype)(r)) \ + : "1" ((USItype)(nh)), \ + "0" ((USItype)(nl)), \ + "g" ((USItype)(d))) +#define count_leading_zeros(count, x) \ + __asm__ ("bsch/1 %1,%0" \ + : "=g" (count) \ + : "g" ((USItype)(x)), \ + "0" ((USItype)0)) +#endif + + +/*************************************** + ************** HPPA ***************** + ***************************************/ +#if defined (__hppa) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ (" add %4,%5,%1\n" \ + " addc %2,%3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%rM" ((USItype)(ah)), \ + "rM" ((USItype)(bh)), \ + "%rM" ((USItype)(al)), \ + "rM" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ (" sub %4,%5,%1\n" \ + " subb %2,%3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "rM" ((USItype)(ah)), \ + "rM" ((USItype)(bh)), \ + "rM" ((USItype)(al)), \ + "rM" ((USItype)(bl))) +#if defined (_PA_RISC1_1) +#define umul_ppmm(wh, wl, u, v) \ + do { \ + union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + __asm__ (" xmpyu %1,%2,%0" \ + : "=*f" (__xx.__ll) \ + : "*f" ((USItype)(u)), \ + "*f" ((USItype)(v))); \ + (wh) = __xx.__i.__h; \ + (wl) = __xx.__i.__l; \ + } while (0) +#define UMUL_TIME 8 +#define UDIV_TIME 60 +#else +#define UMUL_TIME 40 +#define UDIV_TIME 80 +#endif +#ifndef LONGLONG_STANDALONE +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { USItype __r; \ + (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ + (r) = __r; \ + } while (0) +extern USItype __udiv_qrnnd (); +#endif /* LONGLONG_STANDALONE */ +#define count_leading_zeros(count, x) \ + do { \ + USItype __tmp; \ + __asm__ ( \ + " ldi 1,%0 \n" \ + " extru,= %1,15,16,%%r0 ; Bits 31..16 zero? \n" \ + " extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \ + " ldo 16(%0),%0 ; Yes. Perform add. \n" \ + " extru,= %1,23,8,%%r0 ; Bits 15..8 zero? \n" \ + " extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \ + " ldo 8(%0),%0 ; Yes. Perform add. \n" \ + " extru,= %1,27,4,%%r0 ; Bits 7..4 zero? \n" \ + " extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \ + " ldo 4(%0),%0 ; Yes. Perform add. \n" \ + " extru,= %1,29,2,%%r0 ; Bits 3..2 zero? \n" \ + " extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \ + " ldo 2(%0),%0 ; Yes. Perform add. \n" \ + " extru %1,30,1,%1 ; Extract bit 1. \n" \ + " sub %0,%1,%0 ; Subtract it. " \ + : "=r" (count), "=r" (__tmp) : "1" (x)); \ + } while (0) +#endif /* hppa */ + + +/*************************************** + ************** I370 ***************** + ***************************************/ +#if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32 +#define umul_ppmm(xh, xl, m0, m1) \ + do { \ + union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mr %0,%3" \ + : "=r" (__xx.__i.__h), \ + "=r" (__xx.__i.__l) \ + : "%1" (__m0), \ + "r" (__m1)); \ + (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ + (xh) += ((((SItype) __m0 >> 31) & __m1) \ + + (((SItype) __m1 >> 31) & __m0)); \ + } while (0) +#define smul_ppmm(xh, xl, m0, m1) \ + do { \ + union {DItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + __asm__ ("mr %0,%3" \ + : "=r" (__xx.__i.__h), \ + "=r" (__xx.__i.__l) \ + : "%1" (m0), \ + "r" (m1)); \ + (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ + } while (0) +#define sdiv_qrnnd(q, r, n1, n0, d) \ + do { \ + union {DItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + __xx.__i.__h = n1; __xx.__i.__l = n0; \ + __asm__ ("dr %0,%2" \ + : "=r" (__xx.__ll) \ + : "0" (__xx.__ll), "r" (d)); \ + (q) = __xx.__i.__l; (r) = __xx.__i.__h; \ + } while (0) +#endif + + +/*************************************** + ************** I386 ***************** + ***************************************/ +#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addl %5,%1\n" \ + "adcl %3,%0" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "%0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "g" ((USItype)(bl)) \ + __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subl %5,%1\n" \ + "sbbl %3,%0" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "g" ((USItype)(bl)) \ + __CLOBBER_CC) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mull %3" \ + : "=a" ((w0)), \ + "=d" ((w1)) \ + : "%0" ((USItype)(u)), \ + "rm" ((USItype)(v)) \ + __CLOBBER_CC) +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("divl %4" \ + : "=a" ((q)), \ + "=d" ((r)) \ + : "0" ((USItype)(n0)), \ + "1" ((USItype)(n1)), \ + "rm" ((USItype)(d)) \ + __CLOBBER_CC) +#define count_leading_zeros(count, x) \ + do { \ + USItype __cbtmp; \ + __asm__ ("bsrl %1,%0" \ + : "=r" (__cbtmp) : "rm" ((USItype)(x)) \ + __CLOBBER_CC); \ + (count) = __cbtmp ^ 31; \ + } while (0) +#define count_trailing_zeros(count, x) \ + __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)) __CLOBBER_CC) +#ifndef UMUL_TIME +#define UMUL_TIME 40 +#endif +#ifndef UDIV_TIME +#define UDIV_TIME 40 +#endif +#endif /* 80x86 */ + + +/*************************************** + ************** I860 ***************** + ***************************************/ +#if defined (__i860__) && W_TYPE_SIZE == 32 +#define rshift_rhlc(r,h,l,c) \ + __asm__ ("shr %3,r0,r0\n" \ + "shrd %1,%2,%0" \ + "=r" (r) : "r" (h), "r" (l), "rn" (c)) +#endif /* i860 */ + +/*************************************** + ************** I960 ***************** + ***************************************/ +#if defined (__i960__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("cmpo 1,0\n" \ + "addc %5,%4,%1\n" \ + "addc %3,%2,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%dI" ((USItype)(ah)), \ + "dI" ((USItype)(bh)), \ + "%dI" ((USItype)(al)), \ + "dI" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("cmpo 0,0\n" \ + "subc %5,%4,%1\n" \ + "subc %3,%2,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "dI" ((USItype)(ah)), \ + "dI" ((USItype)(bh)), \ + "dI" ((USItype)(al)), \ + "dI" ((USItype)(bl))) +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("emul %2,%1,%0" \ + : "=d" (__xx.__ll) \ + : "%dI" ((USItype)(u)), \ + "dI" ((USItype)(v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define __umulsidi3(u, v) \ + ({UDItype __w; \ + __asm__ ("emul %2,%1,%0" \ + : "=d" (__w) \ + : "%dI" ((USItype)(u)), \ + "dI" ((USItype)(v))); \ + __w; }) +#define udiv_qrnnd(q, r, nh, nl, d) \ + do { \ + union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __nn; \ + __nn.__i.__h = (nh); __nn.__i.__l = (nl); \ + __asm__ ("ediv %d,%n,%0" \ + : "=d" (__rq.__ll) \ + : "dI" (__nn.__ll), \ + "dI" ((USItype)(d))); \ + (r) = __rq.__i.__l; (q) = __rq.__i.__h; \ + } while (0) +#define count_leading_zeros(count, x) \ + do { \ + USItype __cbtmp; \ + __asm__ ("scanbit %1,%0" \ + : "=r" (__cbtmp) \ + : "r" ((USItype)(x))); \ + (count) = __cbtmp ^ 31; \ + } while (0) +#define COUNT_LEADING_ZEROS_0 (-32) /* sic */ +#if defined (__i960mx) /* what is the proper symbol to test??? */ +#define rshift_rhlc(r,h,l,c) \ + do { \ + union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __nn; \ + __nn.__i.__h = (h); __nn.__i.__l = (l); \ + __asm__ ("shre %2,%1,%0" \ + : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \ + } +#endif /* i960mx */ +#endif /* i960 */ + + +/*************************************** + ************** 68000 **************** + ***************************************/ +#if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add%.l %5,%1\n" \ + "addx%.l %3,%0" \ + : "=d" ((USItype)(sh)), \ + "=&d" ((USItype)(sl)) \ + : "%0" ((USItype)(ah)), \ + "d" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub%.l %5,%1\n" \ + "subx%.l %3,%0" \ + : "=d" ((USItype)(sh)), \ + "=&d" ((USItype)(sl)) \ + : "0" ((USItype)(ah)), \ + "d" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mulu%.l %3,%1:%0" \ + : "=d" ((USItype)(w0)), \ + "=d" ((USItype)(w1)) \ + : "%0" ((USItype)(u)), \ + "dmi" ((USItype)(v))) +#define UMUL_TIME 45 +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("divu%.l %4,%1:%0" \ + : "=d" ((USItype)(q)), \ + "=d" ((USItype)(r)) \ + : "0" ((USItype)(n0)), \ + "1" ((USItype)(n1)), \ + "dmi" ((USItype)(d))) +#define UDIV_TIME 90 +#define sdiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("divs%.l %4,%1:%0" \ + : "=d" ((USItype)(q)), \ + "=d" ((USItype)(r)) \ + : "0" ((USItype)(n0)), \ + "1" ((USItype)(n1)), \ + "dmi" ((USItype)(d))) +#define count_leading_zeros(count, x) \ + __asm__ ("bfffo %1{%b2:%b2},%0" \ + : "=d" ((USItype)(count)) \ + : "od" ((USItype)(x)), "n" (0)) +#define COUNT_LEADING_ZEROS_0 32 +#else /* not mc68020 */ +#define umul_ppmm(xh, xl, a, b) \ + do { USItype __umul_tmp1, __umul_tmp2; \ + __asm__ ("| Inlined umul_ppmm \n" \ + " move%.l %5,%3 \n" \ + " move%.l %2,%0 \n" \ + " move%.w %3,%1 \n" \ + " swap %3 \n" \ + " swap %0 \n" \ + " mulu %2,%1 \n" \ + " mulu %3,%0 \n" \ + " mulu %2,%3 \n" \ + " swap %2 \n" \ + " mulu %5,%2 \n" \ + " add%.l %3,%2 \n" \ + " jcc 1f \n" \ + " add%.l %#0x10000,%0 \n" \ + "1: move%.l %2,%3 \n" \ + " clr%.w %2 \n" \ + " swap %2 \n" \ + " swap %3 \n" \ + " clr%.w %3 \n" \ + " add%.l %3,%1 \n" \ + " addx%.l %2,%0 \n" \ + " | End inlined umul_ppmm" \ + : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \ + "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \ + : "%2" ((USItype)(a)), "d" ((USItype)(b))); \ + } while (0) +#define UMUL_TIME 100 +#define UDIV_TIME 400 +#endif /* not mc68020 */ +#endif /* mc68000 */ + + +/*************************************** + ************** 88000 **************** + ***************************************/ +#if defined (__m88000__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addu.co %1,%r4,%r5\n" \ + "addu.ci %0,%r2,%r3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%rJ" ((USItype)(ah)), \ + "rJ" ((USItype)(bh)), \ + "%rJ" ((USItype)(al)), \ + "rJ" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subu.co %1,%r4,%r5\n" \ + "subu.ci %0,%r2,%r3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "rJ" ((USItype)(ah)), \ + "rJ" ((USItype)(bh)), \ + "rJ" ((USItype)(al)), \ + "rJ" ((USItype)(bl))) +#define count_leading_zeros(count, x) \ + do { \ + USItype __cbtmp; \ + __asm__ ("ff1 %0,%1" \ + : "=r" (__cbtmp) \ + : "r" ((USItype)(x))); \ + (count) = __cbtmp ^ 31; \ + } while (0) +#define COUNT_LEADING_ZEROS_0 63 /* sic */ +#if defined (__m88110__) +#define umul_ppmm(wh, wl, u, v) \ + do { \ + union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __x; \ + __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \ + (wh) = __x.__i.__h; \ + (wl) = __x.__i.__l; \ + } while (0) +#define udiv_qrnnd(q, r, n1, n0, d) \ + ({union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __x, __q; \ + __x.__i.__h = (n1); __x.__i.__l = (n0); \ + __asm__ ("divu.d %0,%1,%2" \ + : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \ + (r) = (n0) - __q.__l * (d); (q) = __q.__l; }) +#define UMUL_TIME 5 +#define UDIV_TIME 25 +#else +#define UMUL_TIME 17 +#define UDIV_TIME 150 +#endif /* __m88110__ */ +#endif /* __m88000__ */ + +/*************************************** + ************** MIPS ***************** + ***************************************/ +#if defined (__mips__) && W_TYPE_SIZE == 32 +#if defined (__clang__) || (__GNUC__ >= 5) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) +#define umul_ppmm(w1, w0, u, v) \ + do { \ + UDItype _r; \ + _r = (UDItype) u * v; \ + (w1) = _r >> 32; \ + (w0) = (USItype) _r; \ + } while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("multu %2,%3" \ + : "=l" ((USItype)(w0)), \ + "=h" ((USItype)(w1)) \ + : "d" ((USItype)(u)), \ + "d" ((USItype)(v))) +#else +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("multu %2,%3 \n" \ + "mflo %0 \n" \ + "mfhi %1" \ + : "=d" ((USItype)(w0)), \ + "=d" ((USItype)(w1)) \ + : "d" ((USItype)(u)), \ + "d" ((USItype)(v))) +#endif +#define UMUL_TIME 10 +#define UDIV_TIME 100 +#endif /* __mips__ */ + +/*************************************** + ************** MIPS/64 ************** + ***************************************/ +#if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64 +#if (__GNUC__ >= 5) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) +typedef unsigned int UTItype __attribute__ ((mode (TI))); +#define umul_ppmm(w1, w0, u, v) \ + do { \ + UTItype _r; \ + _r = (UTItype) u * v; \ + (w1) = _r >> 64; \ + (w0) = (UDItype) _r; \ + } while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("dmultu %2,%3" \ + : "=l" ((UDItype)(w0)), \ + "=h" ((UDItype)(w1)) \ + : "d" ((UDItype)(u)), \ + "d" ((UDItype)(v))) +#else +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("dmultu %2,%3 \n" \ + "mflo %0 \n" \ + "mfhi %1" \ + : "=d" ((UDItype)(w0)), \ + "=d" ((UDItype)(w1)) \ + : "d" ((UDItype)(u)), \ + "d" ((UDItype)(v))) +#endif +#define UMUL_TIME 20 +#define UDIV_TIME 140 +#endif /* __mips__ */ + + +/*************************************** + ************** 32000 **************** + ***************************************/ +#if defined (__ns32000__) && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __asm__ ("meid %2,%0" \ + : "=g" (__xx.__ll) \ + : "%0" ((USItype)(u)), \ + "g" ((USItype)(v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#define __umulsidi3(u, v) \ + ({UDItype __w; \ + __asm__ ("meid %2,%0" \ + : "=g" (__w) \ + : "%0" ((USItype)(u)), \ + "g" ((USItype)(v))); \ + __w; }) +#define udiv_qrnnd(q, r, n1, n0, d) \ + ({union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ + __asm__ ("deid %2,%0" \ + : "=g" (__xx.__ll) \ + : "0" (__xx.__ll), \ + "g" ((USItype)(d))); \ + (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) +#define count_trailing_zeros(count,x) \ + do { + __asm__ ("ffsd %2,%0" \ + : "=r" ((USItype) (count)) \ + : "0" ((USItype) 0), \ + "r" ((USItype) (x))); \ + } while (0) +#endif /* __ns32000__ */ + + +/*************************************** + ************** PPC ****************** + ***************************************/ +#if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "%r" ((USItype)(ah)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "%r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "%r" ((USItype)(al)), \ + "rI" ((USItype)(bl))); \ + } while (0) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(ah)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" ((sh)), \ + "=&r" ((sl)) \ + : "r" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "rI" ((USItype)(al)), \ + "r" ((USItype)(bl))); \ + } while (0) +#define count_leading_zeros(count, x) \ + __asm__ ("{cntlz|cntlzw} %0,%1" \ + : "=r" ((count)) \ + : "r" ((USItype)(x))) +#define COUNT_LEADING_ZEROS_0 32 +#if defined (_ARCH_PPC) +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhwu %0,%1,%2" \ + : "=r" (ph) \ + : "%r" (__m0), \ + "r" (__m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 15 +#define smul_ppmm(ph, pl, m0, m1) \ + do { \ + SItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhw %0,%1,%2" \ + : "=r" ((SItype) ph) \ + : "%r" (__m0), \ + "r" (__m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define SMUL_TIME 14 +#define UDIV_TIME 120 +#else +#define umul_ppmm(xh, xl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mul %0,%2,%3" \ + : "=r" ((xh)), \ + "=q" ((xl)) \ + : "r" (__m0), \ + "r" (__m1)); \ + (xh) += ((((SItype) __m0 >> 31) & __m1) \ + + (((SItype) __m1 >> 31) & __m0)); \ + } while (0) +#define UMUL_TIME 8 +#define smul_ppmm(xh, xl, m0, m1) \ + __asm__ ("mul %0,%2,%3" \ + : "=r" ((SItype)(xh)), \ + "=q" ((SItype)(xl)) \ + : "r" (m0), \ + "r" (m1)) +#define SMUL_TIME 4 +#define sdiv_qrnnd(q, r, nh, nl, d) \ + __asm__ ("div %0,%2,%4" \ + : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \ + : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d))) +#define UDIV_TIME 100 +#endif +#endif /* Power architecture variants. */ + +/* Powerpc 64 bit support taken from gmp-4.1.2. */ +/* We should test _IBMR2 here when we add assembly support for the system + vendor compilers. */ +#if 0 /* Not yet enabled because we don't have hardware for a test. */ +#if (defined (_ARCH_PPC) || defined (__powerpc__)) && W_TYPE_SIZE == 64 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \ + __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ + else \ + __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ + : "=r" (sh), "=&r" (sl) \ + : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \ + } while (0) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + if (__builtin_constant_p (ah) && (ah) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \ + __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ + : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ + else \ + __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ + : "=r" (sh), "=&r" (sl) \ + : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \ + } while (0) +#define count_leading_zeros(count, x) \ + __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x)) +#define COUNT_LEADING_ZEROS_0 64 +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + UDItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define UMUL_TIME 15 +#define smul_ppmm(ph, pl, m0, m1) \ + do { \ + DItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \ + (pl) = __m0 * __m1; \ + } while (0) +#define SMUL_TIME 14 /* ??? */ +#define UDIV_TIME 120 /* ??? */ +#endif /* 64-bit PowerPC. */ +#endif /* if 0 */ + +/*************************************** + ************** PYR ****************** + ***************************************/ +#if defined (__pyr__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addw %5,%1 \n" \ + "addwc %3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subw %5,%1 \n" \ + "subwb %3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */ +#define umul_ppmm(w1, w0, u, v) \ + ({union {UDItype __ll; \ + struct {USItype __h, __l;} __i; \ + } __xx; \ + __asm__ ("movw %1,%R0 \n" \ + "uemul %2,%0" \ + : "=&r" (__xx.__ll) \ + : "g" ((USItype) (u)), \ + "g" ((USItype)(v))); \ + (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) +#endif /* __pyr__ */ + + +/*************************************** + ************** RT/ROMP ************** + ***************************************/ +#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("a %1,%5 \n" \ + "ae %0,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%0" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "r" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("s %1,%5\n" \ + "se %0,%3" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "0" ((USItype)(ah)), \ + "r" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "r" ((USItype)(bl))) +#define umul_ppmm(ph, pl, m0, m1) \ + do { \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ( \ + "s r2,r2 \n" \ + "mts r10,%2 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "m r2,%3 \n" \ + "cas %0,r2,r0 \n" \ + "mfs r10,%1" \ + : "=r" ((USItype)(ph)), \ + "=r" ((USItype)(pl)) \ + : "%r" (__m0), \ + "r" (__m1) \ + : "r2"); \ + (ph) += ((((SItype) __m0 >> 31) & __m1) \ + + (((SItype) __m1 >> 31) & __m0)); \ + } while (0) +#define UMUL_TIME 20 +#define UDIV_TIME 200 +#define count_leading_zeros(count, x) \ + do { \ + if ((x) >= 0x10000) \ + __asm__ ("clz %0,%1" \ + : "=r" ((USItype)(count)) \ + : "r" ((USItype)(x) >> 16)); \ + else \ + { \ + __asm__ ("clz %0,%1" \ + : "=r" ((USItype)(count)) \ + : "r" ((USItype)(x))); \ + (count) += 16; \ + } \ + } while (0) +#endif /* RT/ROMP */ + + +/*************************************** + ************** SH2 ****************** + ***************************************/ +#if (defined (__sh2__) || defined(__sh3__) || defined(__SH4__) ) \ + && W_TYPE_SIZE == 32 +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ( \ + "dmulu.l %2,%3\n" \ + "sts macl,%1\n" \ + "sts mach,%0" \ + : "=r" ((USItype)(w1)), \ + "=r" ((USItype)(w0)) \ + : "r" ((USItype)(u)), \ + "r" ((USItype)(v)) \ + : "macl", "mach") +#define UMUL_TIME 5 +#endif + +/*************************************** + ************** SPARC **************** + ***************************************/ +#if defined (__sparc__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addcc %r4,%5,%1\n" \ + "addx %r2,%3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "%rJ" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "%rJ" ((USItype)(al)), \ + "rI" ((USItype)(bl)) \ + __CLOBBER_CC) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subcc %r4,%5,%1\n" \ + "subx %r2,%3,%0" \ + : "=r" ((USItype)(sh)), \ + "=&r" ((USItype)(sl)) \ + : "rJ" ((USItype)(ah)), \ + "rI" ((USItype)(bh)), \ + "rJ" ((USItype)(al)), \ + "rI" ((USItype)(bl)) \ + __CLOBBER_CC) +#if defined (__sparc_v8__) +/* Don't match immediate range because, 1) it is not often useful, + 2) the 'I' flag thinks of the range as a 13 bit signed interval, + while we want to match a 13 bit interval, sign extended to 32 bits, + but INTERPRETED AS UNSIGNED. */ +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("umul %2,%3,%1;rd %%y,%0" \ + : "=r" ((USItype)(w1)), \ + "=r" ((USItype)(w0)) \ + : "r" ((USItype)(u)), \ + "r" ((USItype)(v))) +#define UMUL_TIME 5 +#ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */ +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { \ + USItype __q; \ + __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \ + : "=r" ((USItype)(__q)) \ + : "r" ((USItype)(n1)), \ + "r" ((USItype)(n0)), \ + "r" ((USItype)(d))); \ + (r) = (n0) - __q * (d); \ + (q) = __q; \ + } while (0) +#define UDIV_TIME 25 +#endif /* SUPERSPARC */ +#else /* ! __sparc_v8__ */ +#if defined (__sparclite__) +/* This has hardware multiply but not divide. It also has two additional + instructions scan (ffs from high bit) and divscc. */ +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("umul %2,%3,%1;rd %%y,%0" \ + : "=r" ((USItype)(w1)), \ + "=r" ((USItype)(w0)) \ + : "r" ((USItype)(u)), \ + "r" ((USItype)(v))) +#define UMUL_TIME 5 +#define udiv_qrnnd(q, r, n1, n0, d) \ + __asm__ ("! Inlined udiv_qrnnd \n" \ + " wr %%g0,%2,%%y ! Not a delayed write for sparclite \n" \ + " tst %%g0 \n" \ + " divscc %3,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%%g1 \n" \ + " divscc %%g1,%4,%0 \n" \ + " rd %%y,%1 \n" \ + " bl,a 1f \n" \ + " add %1,%4,%1 \n" \ + "1: ! End of inline udiv_qrnnd" \ + : "=r" ((USItype)(q)), \ + "=r" ((USItype)(r)) \ + : "r" ((USItype)(n1)), \ + "r" ((USItype)(n0)), \ + "rI" ((USItype)(d)) \ + : "%g1" __AND_CLOBBER_CC) +#define UDIV_TIME 37 +#define count_leading_zeros(count, x) \ + __asm__ ("scan %1,0,%0" \ + : "=r" ((USItype)(x)) \ + : "r" ((USItype)(count))) +/* Early sparclites return 63 for an argument of 0, but they warn that future + implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0 + undefined. */ +#endif /* __sparclite__ */ +#endif /* __sparc_v8__ */ +/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */ +#ifndef umul_ppmm +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("! Inlined umul_ppmm \n" \ + " wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr \n" \ + " sra %3,31,%%g2 ! Don't move this insn \n" \ + " and %2,%%g2,%%g2 ! Don't move this insn \n" \ + " andcc %%g0,0,%%g1 ! Don't move this insn \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,%3,%%g1 \n" \ + " mulscc %%g1,0,%%g1 \n" \ + " add %%g1,%%g2,%0 \n" \ + " rd %%y,%1" \ + : "=r" ((USItype)(w1)), \ + "=r" ((USItype)(w0)) \ + : "%rI" ((USItype)(u)), \ + "r" ((USItype)(v)) \ + : "%g1", "%g2" __AND_CLOBBER_CC) +#define UMUL_TIME 39 /* 39 instructions */ +#endif +#ifndef udiv_qrnnd +#ifndef LONGLONG_STANDALONE +#define udiv_qrnnd(q, r, n1, n0, d) \ + do { USItype __r; \ + (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ + (r) = __r; \ + } while (0) +extern USItype __udiv_qrnnd (); +#define UDIV_TIME 140 +#endif /* LONGLONG_STANDALONE */ +#endif /* udiv_qrnnd */ +#endif /* __sparc__ */ + + +/*************************************** + ************** VAX ****************** + ***************************************/ +#if defined (__vax__) && W_TYPE_SIZE == 32 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addl2 %5,%1\n" \ + "adwc %3,%0" \ + : "=g" ((USItype)(sh)), \ + "=&g" ((USItype)(sl)) \ + : "%0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "%1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subl2 %5,%1\n" \ + "sbwc %3,%0" \ + : "=g" ((USItype)(sh)), \ + "=&g" ((USItype)(sl)) \ + : "0" ((USItype)(ah)), \ + "g" ((USItype)(bh)), \ + "1" ((USItype)(al)), \ + "g" ((USItype)(bl))) +#define umul_ppmm(xh, xl, m0, m1) \ + do { \ + union {UDItype __ll; \ + struct {USItype __l, __h;} __i; \ + } __xx; \ + USItype __m0 = (m0), __m1 = (m1); \ + __asm__ ("emul %1,%2,$0,%0" \ + : "=g" (__xx.__ll) \ + : "g" (__m0), \ + "g" (__m1)); \ + (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ + (xh) += ((((SItype) __m0 >> 31) & __m1) \ + + (((SItype) __m1 >> 31) & __m0)); \ + } while (0) +#define sdiv_qrnnd(q, r, n1, n0, d) \ + do { \ + union {DItype __ll; \ + struct {SItype __l, __h;} __i; \ + } __xx; \ + __xx.__i.__h = n1; __xx.__i.__l = n0; \ + __asm__ ("ediv %3,%2,%0,%1" \ + : "=g" (q), "=g" (r) \ + : "g" (__xx.__ll), "g" (d)); \ + } while (0) +#endif /* __vax__ */ + + +/*************************************** + ************** Z8000 **************** + ***************************************/ +#if defined (__z8000__) && W_TYPE_SIZE == 16 +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \ + : "=r" ((unsigned int)(sh)), \ + "=&r" ((unsigned int)(sl)) \ + : "%0" ((unsigned int)(ah)), \ + "r" ((unsigned int)(bh)), \ + "%1" ((unsigned int)(al)), \ + "rQR" ((unsigned int)(bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \ + : "=r" ((unsigned int)(sh)), \ + "=&r" ((unsigned int)(sl)) \ + : "0" ((unsigned int)(ah)), \ + "r" ((unsigned int)(bh)), \ + "1" ((unsigned int)(al)), \ + "rQR" ((unsigned int)(bl))) +#define umul_ppmm(xh, xl, m0, m1) \ + do { \ + union {long int __ll; \ + struct {unsigned int __h, __l;} __i; \ + } __xx; \ + unsigned int __m0 = (m0), __m1 = (m1); \ + __asm__ ("mult %S0,%H3" \ + : "=r" (__xx.__i.__h), \ + "=r" (__xx.__i.__l) \ + : "%1" (__m0), \ + "rQR" (__m1)); \ + (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ + (xh) += ((((signed int) __m0 >> 15) & __m1) \ + + (((signed int) __m1 >> 15) & __m0)); \ + } while (0) +#endif /* __z8000__ */ + +#endif /* __GNUC__ */ +#endif /* !__riscos__ */ + + +/*************************************** + *********** Generic Versions ******** + ***************************************/ +#if !defined (umul_ppmm) && defined (__umulsidi3) +#define umul_ppmm(ph, pl, m0, m1) \ + { \ + UDWtype __ll = __umulsidi3 (m0, m1); \ + ph = (UWtype) (__ll >> W_TYPE_SIZE); \ + pl = (UWtype) __ll; \ + } +#endif + +#if !defined (__umulsidi3) +#define __umulsidi3(u, v) \ + ({UWtype __hi, __lo; \ + umul_ppmm (__hi, __lo, u, v); \ + ((UDWtype) __hi << W_TYPE_SIZE) | __lo; }) +#endif + +/* If this machine has no inline assembler, use C macros. */ + +#if !defined (add_ssaaaa) +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + do { \ + UWtype __x; \ + __x = (al) + (bl); \ + (sh) = (ah) + (bh) + (__x < (al)); \ + (sl) = __x; \ + } while (0) +#endif + +#if !defined (sub_ddmmss) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + do { \ + UWtype __x; \ + __x = (al) - (bl); \ + (sh) = (ah) - (bh) - (__x > (al)); \ + (sl) = __x; \ + } while (0) +#endif + +#if !defined (umul_ppmm) +#define umul_ppmm(w1, w0, u, v) \ + do { \ + UWtype __x0, __x1, __x2, __x3; \ + UHWtype __ul, __vl, __uh, __vh; \ + UWtype __u = (u), __v = (v); \ + \ + __ul = __ll_lowpart (__u); \ + __uh = __ll_highpart (__u); \ + __vl = __ll_lowpart (__v); \ + __vh = __ll_highpart (__v); \ + \ + __x0 = (UWtype) __ul * __vl; \ + __x1 = (UWtype) __ul * __vh; \ + __x2 = (UWtype) __uh * __vl; \ + __x3 = (UWtype) __uh * __vh; \ + \ + __x1 += __ll_highpart (__x0);/* this can't give carry */ \ + __x1 += __x2; /* but this indeed can */ \ + if (__x1 < __x2) /* did we get it? */ \ + __x3 += __ll_B; /* yes, add it in the proper pos. */ \ + \ + (w1) = __x3 + __ll_highpart (__x1); \ + (w0) = (__ll_lowpart (__x1) << W_TYPE_SIZE/2) + __ll_lowpart (__x0);\ + } while (0) +#endif + +#if !defined (umul_ppmm) +#define smul_ppmm(w1, w0, u, v) \ + do { \ + UWtype __w1; \ + UWtype __m0 = (u), __m1 = (v); \ + umul_ppmm (__w1, w0, __m0, __m1); \ + (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \ + - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \ + } while (0) +#endif + +/* Define this unconditionally, so it can be used for debugging. */ +#define __udiv_qrnnd_c(q, r, n1, n0, d) \ + do { \ + UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ + __d1 = __ll_highpart (d); \ + __d0 = __ll_lowpart (d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (UWtype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart (n0); \ + if (__r1 < __m) \ + { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (UWtype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ + if (__r0 < __m) \ + { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + \ + (q) = (UWtype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ + } while (0) + +/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through + __udiv_w_sdiv (defined in libgcc or elsewhere). */ +#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd) +#define udiv_qrnnd(q, r, nh, nl, d) \ + do { \ + UWtype __r; \ + (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ + (r) = __r; \ + } while (0) +#endif + +/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */ +#if !defined (udiv_qrnnd) +#define UDIV_NEEDS_NORMALIZATION 1 +#define udiv_qrnnd __udiv_qrnnd_c +#endif + +#if !defined (count_leading_zeros) +extern +#ifdef __STDC__ +const +#endif +unsigned char _gcry_clz_tab[]; +#define MPI_INTERNAL_NEED_CLZ_TAB 1 +#define count_leading_zeros(count, x) \ + do { \ + UWtype __xr = (x); \ + UWtype __a; \ + \ + if (W_TYPE_SIZE <= 32) \ + { \ + __a = __xr < ((UWtype) 1 << 2*__BITS4) \ + ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \ + : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4);\ + } \ + else \ + { \ + for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \ + if (((__xr >> __a) & 0xff) != 0) \ + break; \ + } \ + \ + (count) = W_TYPE_SIZE - (_gcry_clz_tab[__xr >> __a] + __a); \ + } while (0) +/* This version gives a well-defined value for zero. */ +#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE +#endif + +#if !defined (count_trailing_zeros) +/* Define count_trailing_zeros using count_leading_zeros. The latter might be + defined in asm, but if it is not, the C version above is good enough. */ +#define count_trailing_zeros(count, x) \ + do { \ + UWtype __ctz_x = (x); \ + UWtype __ctz_c; \ + count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \ + (count) = W_TYPE_SIZE - 1 - __ctz_c; \ + } while (0) +#endif + +#ifndef UDIV_NEEDS_NORMALIZATION +#define UDIV_NEEDS_NORMALIZATION 0 +#endif diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-add.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-add.c new file mode 100644 index 0000000..f8f6094 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-add.c @@ -0,0 +1,237 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-add.c - MPI functions + * Copyright (C) 1994, 1996, 1998, 2001, 2002, 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> + +#include "mpi-internal.h" + + +/**************** + * Add the unsigned integer V to the mpi-integer U and store the + * result in W. U and V may be the same. + */ +void +gcry_mpi_add_ui(gcry_mpi_t w, gcry_mpi_t u, unsigned long v ) +{ + mpi_ptr_t wp, up; + mpi_size_t usize, wsize; + int usign, wsign; + + usize = u->nlimbs; + usign = u->sign; + wsign = 0; + + /* If not space for W (and possible carry), increase space. */ + wsize = usize + 1; + if( w->alloced < wsize ) + mpi_resize(w, wsize); + + /* These must be after realloc (U may be the same as W). */ + up = u->d; + wp = w->d; + + if( !usize ) { /* simple */ + wp[0] = v; + wsize = v? 1:0; + } + else if( !usign ) { /* mpi is not negative */ + mpi_limb_t cy; + cy = _gcry_mpih_add_1(wp, up, usize, v); + wp[usize] = cy; + wsize = usize + cy; + } + else { /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. */ + if( usize == 1 && up[0] < v ) { + wp[0] = v - up[0]; + wsize = 1; + } + else { + _gcry_mpih_sub_1(wp, up, usize, v); + /* Size can decrease with at most one limb. */ + wsize = usize - (wp[usize-1]==0); + wsign = 1; + } + } + + w->nlimbs = wsize; + w->sign = wsign; +} + + +void +gcry_mpi_add(gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v) +{ + mpi_ptr_t wp, up, vp; + mpi_size_t usize, vsize, wsize; + int usign, vsign, wsign; + + if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */ + usize = v->nlimbs; + usign = v->sign; + vsize = u->nlimbs; + vsign = u->sign; + wsize = usize + 1; + RESIZE_IF_NEEDED(w, wsize); + /* These must be after realloc (u or v may be the same as w). */ + up = v->d; + vp = u->d; + } + else { + usize = u->nlimbs; + usign = u->sign; + vsize = v->nlimbs; + vsign = v->sign; + wsize = usize + 1; + RESIZE_IF_NEEDED(w, wsize); + /* These must be after realloc (u or v may be the same as w). */ + up = u->d; + vp = v->d; + } + wp = w->d; + wsign = 0; + + if( !vsize ) { /* simple */ + MPN_COPY(wp, up, usize ); + wsize = usize; + wsign = usign; + } + else if( usign != vsign ) { /* different sign */ + /* This test is right since USIZE >= VSIZE */ + if( usize != vsize ) { + _gcry_mpih_sub(wp, up, usize, vp, vsize); + wsize = usize; + MPN_NORMALIZE(wp, wsize); + wsign = usign; + } + else if( _gcry_mpih_cmp(up, vp, usize) < 0 ) { + _gcry_mpih_sub_n(wp, vp, up, usize); + wsize = usize; + MPN_NORMALIZE(wp, wsize); + if( !usign ) + wsign = 1; + } + else { + _gcry_mpih_sub_n(wp, up, vp, usize); + wsize = usize; + MPN_NORMALIZE(wp, wsize); + if( usign ) + wsign = 1; + } + } + else { /* U and V have same sign. Add them. */ + mpi_limb_t cy = _gcry_mpih_add(wp, up, usize, vp, vsize); + wp[usize] = cy; + wsize = usize + cy; + if( usign ) + wsign = 1; + } + + w->nlimbs = wsize; + w->sign = wsign; +} + + +/**************** + * Subtract the unsigned integer V from the mpi-integer U and store the + * result in W. + */ +void +gcry_mpi_sub_ui(gcry_mpi_t w, gcry_mpi_t u, unsigned long v ) +{ + mpi_ptr_t wp, up; + mpi_size_t usize, wsize; + int usign, wsign; + + usize = u->nlimbs; + usign = u->sign; + wsign = 0; + + /* If not space for W (and possible carry), increase space. */ + wsize = usize + 1; + if( w->alloced < wsize ) + mpi_resize(w, wsize); + + /* These must be after realloc (U may be the same as W). */ + up = u->d; + wp = w->d; + + if( !usize ) { /* simple */ + wp[0] = v; + wsize = v? 1:0; + wsign = 1; + } + else if( usign ) { /* mpi and v are negative */ + mpi_limb_t cy; + cy = _gcry_mpih_add_1(wp, up, usize, v); + wp[usize] = cy; + wsize = usize + cy; + } + else { /* The signs are different. Need exact comparison to determine + * which operand to subtract from which. */ + if( usize == 1 && up[0] < v ) { + wp[0] = v - up[0]; + wsize = 1; + wsign = 1; + } + else { + _gcry_mpih_sub_1(wp, up, usize, v); + /* Size can decrease with at most one limb. */ + wsize = usize - (wp[usize-1]==0); + } + } + + w->nlimbs = wsize; + w->sign = wsign; +} + +void +gcry_mpi_sub(gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v) +{ + gcry_mpi_t vv = mpi_copy (v); + vv->sign = ! vv->sign; + gcry_mpi_add (w, u, vv); + mpi_free (vv); +} + + +void +gcry_mpi_addm( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, gcry_mpi_t m) +{ + gcry_mpi_add(w, u, v); + _gcry_mpi_fdiv_r( w, w, m ); +} + +void +gcry_mpi_subm( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, gcry_mpi_t m) +{ + gcry_mpi_sub(w, u, v); + _gcry_mpi_fdiv_r( w, w, m ); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-asm-defs.h b/grub-core/lib/libgcrypt-grub/mpi/mpi-asm-defs.h new file mode 100644 index 0000000..13424e2 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-asm-defs.h @@ -0,0 +1,10 @@ +/* This file defines some basic constants for the MPI machinery. We + * need to define the types on a per-CPU basis, so it is done with + * this file here. */ +#define BYTES_PER_MPI_LIMB (SIZEOF_UNSIGNED_LONG) + + + + + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-bit.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-bit.c new file mode 100644 index 0000000..05b9f12 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-bit.c @@ -0,0 +1,366 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-bit.c - MPI bit level functions + * Copyright (C) 1998, 1999, 2001, 2002, 2006 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +#ifdef MPI_INTERNAL_NEED_CLZ_TAB +#ifdef __STDC__ +const +#endif +unsigned char +_gcry_clz_tab[] = +{ + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +}; +#endif + + +#define A_LIMB_1 ((mpi_limb_t)1) + + +/**************** + * Sometimes we have MSL (most significant limbs) which are 0; + * this is for some reasons not good, so this function removes them. + */ +void +_gcry_mpi_normalize( gcry_mpi_t a ) +{ + if( mpi_is_opaque(a) ) + return; + + for( ; a->nlimbs && !a->d[a->nlimbs-1]; a->nlimbs-- ) + ; +} + + + +/**************** + * Return the number of bits in A. + */ +unsigned int +gcry_mpi_get_nbits( gcry_mpi_t a ) +{ + unsigned n; + + if( mpi_is_opaque(a) ) { + return a->sign; /* which holds the number of bits */ + } + + _gcry_mpi_normalize( a ); + if( a->nlimbs ) { + mpi_limb_t alimb = a->d[a->nlimbs-1]; + if( alimb ) + count_leading_zeros( n, alimb ); + else + n = BITS_PER_MPI_LIMB; + n = BITS_PER_MPI_LIMB - n + (a->nlimbs-1) * BITS_PER_MPI_LIMB; + } + else + n = 0; + return n; +} + + +/**************** + * Test whether bit N is set. + */ +int +gcry_mpi_test_bit( gcry_mpi_t a, unsigned int n ) +{ + unsigned int limbno, bitno; + mpi_limb_t limb; + + limbno = n / BITS_PER_MPI_LIMB; + bitno = n % BITS_PER_MPI_LIMB; + + if( limbno >= a->nlimbs ) + return 0; /* too far left: this is a 0 */ + limb = a->d[limbno]; + return (limb & (A_LIMB_1 << bitno))? 1: 0; +} + + +/**************** + * Set bit N of A. + */ +void +gcry_mpi_set_bit( gcry_mpi_t a, unsigned int n ) +{ + unsigned int limbno, bitno; + + limbno = n / BITS_PER_MPI_LIMB; + bitno = n % BITS_PER_MPI_LIMB; + + if ( limbno >= a->nlimbs ) + { + mpi_resize (a, limbno+1 ); + a->nlimbs = limbno+1; + } + a->d[limbno] |= (A_LIMB_1<<bitno); +} + +/**************** + * Set bit N of A. and clear all bits above + */ +void +gcry_mpi_set_highbit( gcry_mpi_t a, unsigned int n ) +{ + unsigned int limbno, bitno; + + limbno = n / BITS_PER_MPI_LIMB; + bitno = n % BITS_PER_MPI_LIMB; + + if ( limbno >= a->nlimbs ) + { + mpi_resize (a, limbno+1 ); + a->nlimbs = limbno+1; + } + a->d[limbno] |= (A_LIMB_1<<bitno); + for ( bitno++; bitno < BITS_PER_MPI_LIMB; bitno++ ) + a->d[limbno] &= ~(A_LIMB_1 << bitno); + a->nlimbs = limbno+1; +} + +/**************** + * clear bit N of A and all bits above + */ +void +gcry_mpi_clear_highbit( gcry_mpi_t a, unsigned int n ) +{ + unsigned int limbno, bitno; + + limbno = n / BITS_PER_MPI_LIMB; + bitno = n % BITS_PER_MPI_LIMB; + + if( limbno >= a->nlimbs ) + return; /* not allocated, therefore no need to clear bits + :-) */ + + for( ; bitno < BITS_PER_MPI_LIMB; bitno++ ) + a->d[limbno] &= ~(A_LIMB_1 << bitno); + a->nlimbs = limbno+1; +} + +/**************** + * Clear bit N of A. + */ +void +gcry_mpi_clear_bit( gcry_mpi_t a, unsigned int n ) +{ + unsigned int limbno, bitno; + + limbno = n / BITS_PER_MPI_LIMB; + bitno = n % BITS_PER_MPI_LIMB; + + if( limbno >= a->nlimbs ) + return; /* don't need to clear this bit, it's to far to left */ + a->d[limbno] &= ~(A_LIMB_1 << bitno); +} + + +/**************** + * Shift A by COUNT limbs to the right + * This is used only within the MPI library + */ +void +_gcry_mpi_rshift_limbs( gcry_mpi_t a, unsigned int count ) +{ + mpi_ptr_t ap = a->d; + mpi_size_t n = a->nlimbs; + unsigned int i; + + if( count >= n ) { + a->nlimbs = 0; + return; + } + + for( i = 0; i < n - count; i++ ) + ap[i] = ap[i+count]; + ap[i] = 0; + a->nlimbs -= count; +} + + +/* + * Shift A by N bits to the right. + */ +void +gcry_mpi_rshift ( gcry_mpi_t x, gcry_mpi_t a, unsigned int n ) +{ + mpi_size_t xsize; + unsigned int i; + unsigned int nlimbs = (n/BITS_PER_MPI_LIMB); + unsigned int nbits = (n%BITS_PER_MPI_LIMB); + + if ( x == a ) + { + /* In-place operation. */ + if ( nlimbs >= x->nlimbs ) + { + x->nlimbs = 0; + return; + } + + if (nlimbs) + { + for (i=0; i < x->nlimbs - nlimbs; i++ ) + x->d[i] = x->d[i+nlimbs]; + x->d[i] = 0; + x->nlimbs -= nlimbs; + + } + if ( x->nlimbs && nbits ) + _gcry_mpih_rshift ( x->d, x->d, x->nlimbs, nbits ); + } + else if ( nlimbs ) + { + /* Copy and shift by more or equal bits than in a limb. */ + xsize = a->nlimbs; + x->sign = a->sign; + RESIZE_IF_NEEDED (x, xsize); + x->nlimbs = xsize; + for (i=0; i < a->nlimbs; i++ ) + x->d[i] = a->d[i]; + x->nlimbs = i; + + if ( nlimbs >= x->nlimbs ) + { + x->nlimbs = 0; + return; + } + + if (nlimbs) + { + for (i=0; i < x->nlimbs - nlimbs; i++ ) + x->d[i] = x->d[i+nlimbs]; + x->d[i] = 0; + x->nlimbs -= nlimbs; + } + + if ( x->nlimbs && nbits ) + _gcry_mpih_rshift ( x->d, x->d, x->nlimbs, nbits ); + } + else + { + /* Copy and shift by less than bits in a limb. */ + xsize = a->nlimbs; + x->sign = a->sign; + RESIZE_IF_NEEDED (x, xsize); + x->nlimbs = xsize; + + if ( xsize ) + { + if (nbits ) + _gcry_mpih_rshift (x->d, a->d, x->nlimbs, nbits ); + else + { + /* The rshift helper function is not specified for + NBITS==0, thus we do a plain copy here. */ + for (i=0; i < x->nlimbs; i++ ) + x->d[i] = a->d[i]; + } + } + } + MPN_NORMALIZE (x->d, x->nlimbs); +} + + +/**************** + * Shift A by COUNT limbs to the left + * This is used only within the MPI library + */ +void +_gcry_mpi_lshift_limbs (gcry_mpi_t a, unsigned int count) +{ + mpi_ptr_t ap; + int n = a->nlimbs; + int i; + + if (!count || !n) + return; + + RESIZE_IF_NEEDED (a, n+count); + + ap = a->d; + for (i = n-1; i >= 0; i--) + ap[i+count] = ap[i]; + for (i=0; i < count; i++ ) + ap[i] = 0; + a->nlimbs += count; +} + + +/* + * Shift A by N bits to the left. + */ +void +gcry_mpi_lshift ( gcry_mpi_t x, gcry_mpi_t a, unsigned int n ) +{ + unsigned int nlimbs = (n/BITS_PER_MPI_LIMB); + unsigned int nbits = (n%BITS_PER_MPI_LIMB); + + if (x == a && !n) + return; /* In-place shift with an amount of zero. */ + + if ( x != a ) + { + /* Copy A to X. */ + unsigned int alimbs = a->nlimbs; + int asign = a->sign; + mpi_ptr_t xp, ap; + + RESIZE_IF_NEEDED (x, alimbs+nlimbs+1); + xp = x->d; + ap = a->d; + MPN_COPY (xp, ap, alimbs); + x->nlimbs = alimbs; + x->flags = a->flags; + x->sign = asign; + } + + if (nlimbs && !nbits) + { + /* Shift a full number of limbs. */ + _gcry_mpi_lshift_limbs (x, nlimbs); + } + else if (n) + { + /* We use a very dump approach: Shift left by the number of + limbs plus one and than fix it up by an rshift. */ + _gcry_mpi_lshift_limbs (x, nlimbs+1); + gcry_mpi_rshift (x, x, BITS_PER_MPI_LIMB - nbits); + } + + MPN_NORMALIZE (x->d, x->nlimbs); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-cmp.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-cmp.c new file mode 100644 index 0000000..b0ebbc1 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-cmp.c @@ -0,0 +1,109 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-cmp.c - MPI functions + * Copyright (C) 1998, 1999, 2001, 2002, 2005 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + +int +gcry_mpi_cmp_ui (gcry_mpi_t u, unsigned long v) +{ + mpi_limb_t limb = v; + + _gcry_mpi_normalize (u); + + /* Handle the case that U contains no limb. */ + if (u->nlimbs == 0) + return -(limb != 0); + + /* Handle the case that U is negative. */ + if (u->sign) + return -1; + + if (u->nlimbs == 1) + { + /* Handle the case that U contains exactly one limb. */ + + if (u->d[0] > limb) + return 1; + if (u->d[0] < limb) + return -1; + return 0; + } + else + /* Handle the case that U contains more than one limb. */ + return 1; +} + + +int +gcry_mpi_cmp (gcry_mpi_t u, gcry_mpi_t v) +{ + mpi_size_t usize; + mpi_size_t vsize; + int cmp; + + if (mpi_is_opaque (u) || mpi_is_opaque (v)) + { + if (mpi_is_opaque (u) && !mpi_is_opaque (v)) + return -1; + if (!mpi_is_opaque (u) && mpi_is_opaque (v)) + return 1; + if (!u->sign && !v->sign) + return 0; /* Empty buffers are identical. */ + if (u->sign < v->sign) + return -1; + if (u->sign > v->sign) + return 1; + return memcmp (u->d, v->d, (u->sign+7)/8); + } + else + { + _gcry_mpi_normalize (u); + _gcry_mpi_normalize (v); + + usize = u->nlimbs; + vsize = v->nlimbs; + + /* Compare sign bits. */ + + if (!u->sign && v->sign) + return 1; + if (u->sign && !v->sign) + return -1; + + /* U and V are either both positive or both negative. */ + + if (usize != vsize && !u->sign && !v->sign) + return usize - vsize; + if (usize != vsize && u->sign && v->sign) + return vsize + usize; + if (!usize ) + return 0; + if (!(cmp = _gcry_mpih_cmp (u->d, v->d, usize))) + return 0; + if ((cmp < 0?1:0) == (u->sign?1:0)) + return 1; + } + return -1; +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-div.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-div.c new file mode 100644 index 0000000..5218caa --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-div.c @@ -0,0 +1,357 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-div.c - MPI functions + * Copyright (C) 1994, 1996, 1998, 2001, 2002, + * 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" +#include "g10lib.h" + + +void +_gcry_mpi_fdiv_r( gcry_mpi_t rem, gcry_mpi_t dividend, gcry_mpi_t divisor ) +{ + int divisor_sign = divisor->sign; + gcry_mpi_t temp_divisor = NULL; + + /* We need the original value of the divisor after the remainder has been + * preliminary calculated. We have to copy it to temporary space if it's + * the same variable as REM. */ + if( rem == divisor ) { + temp_divisor = mpi_copy( divisor ); + divisor = temp_divisor; + } + + _gcry_mpi_tdiv_r( rem, dividend, divisor ); + + if( ((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs ) + gcry_mpi_add( rem, rem, divisor); + + if( temp_divisor ) + mpi_free(temp_divisor); +} + + + +/**************** + * Division rounding the quotient towards -infinity. + * The remainder gets the same sign as the denominator. + * rem is optional + */ + +ulong +_gcry_mpi_fdiv_r_ui( gcry_mpi_t rem, gcry_mpi_t dividend, ulong divisor ) +{ + mpi_limb_t rlimb; + + rlimb = _gcry_mpih_mod_1( dividend->d, dividend->nlimbs, divisor ); + if( rlimb && dividend->sign ) + rlimb = divisor - rlimb; + + if( rem ) { + rem->d[0] = rlimb; + rem->nlimbs = rlimb? 1:0; + } + return rlimb; +} + + +void +_gcry_mpi_fdiv_q( gcry_mpi_t quot, gcry_mpi_t dividend, gcry_mpi_t divisor ) +{ + gcry_mpi_t tmp = mpi_alloc( mpi_get_nlimbs(quot) ); + _gcry_mpi_fdiv_qr( quot, tmp, dividend, divisor); + mpi_free(tmp); +} + +void +_gcry_mpi_fdiv_qr( gcry_mpi_t quot, gcry_mpi_t rem, gcry_mpi_t dividend, gcry_mpi_t divisor ) +{ + int divisor_sign = divisor->sign; + gcry_mpi_t temp_divisor = NULL; + + if( quot == divisor || rem == divisor ) { + temp_divisor = mpi_copy( divisor ); + divisor = temp_divisor; + } + + _gcry_mpi_tdiv_qr( quot, rem, dividend, divisor ); + + if( (divisor_sign ^ dividend->sign) && rem->nlimbs ) { + gcry_mpi_sub_ui( quot, quot, 1 ); + gcry_mpi_add( rem, rem, divisor); + } + + if( temp_divisor ) + mpi_free(temp_divisor); +} + + +/* If den == quot, den needs temporary storage. + * If den == rem, den needs temporary storage. + * If num == quot, num needs temporary storage. + * If den has temporary storage, it can be normalized while being copied, + * i.e no extra storage should be allocated. + */ + +void +_gcry_mpi_tdiv_r( gcry_mpi_t rem, gcry_mpi_t num, gcry_mpi_t den) +{ + _gcry_mpi_tdiv_qr(NULL, rem, num, den ); +} + +void +_gcry_mpi_tdiv_qr( gcry_mpi_t quot, gcry_mpi_t rem, gcry_mpi_t num, gcry_mpi_t den) +{ + mpi_ptr_t np, dp; + mpi_ptr_t qp, rp; + mpi_size_t nsize = num->nlimbs; + mpi_size_t dsize = den->nlimbs; + mpi_size_t qsize, rsize; + mpi_size_t sign_remainder = num->sign; + mpi_size_t sign_quotient = num->sign ^ den->sign; + unsigned normalization_steps; + mpi_limb_t q_limb; + mpi_ptr_t marker[5]; + unsigned int marker_nlimbs[5]; + int markidx=0; + + /* Ensure space is enough for quotient and remainder. + * We need space for an extra limb in the remainder, because it's + * up-shifted (normalized) below. */ + rsize = nsize + 1; + mpi_resize( rem, rsize); + + qsize = rsize - dsize; /* qsize cannot be bigger than this. */ + if( qsize <= 0 ) { + if( num != rem ) { + rem->nlimbs = num->nlimbs; + rem->sign = num->sign; + MPN_COPY(rem->d, num->d, nsize); + } + if( quot ) { + /* This needs to follow the assignment to rem, in case the + * numerator and quotient are the same. */ + quot->nlimbs = 0; + quot->sign = 0; + } + return; + } + + if( quot ) + mpi_resize( quot, qsize); + + /* Read pointers here, when reallocation is finished. */ + np = num->d; + dp = den->d; + rp = rem->d; + + /* Optimize division by a single-limb divisor. */ + if( dsize == 1 ) { + mpi_limb_t rlimb; + if( quot ) { + qp = quot->d; + rlimb = _gcry_mpih_divmod_1( qp, np, nsize, dp[0] ); + qsize -= qp[qsize - 1] == 0; + quot->nlimbs = qsize; + quot->sign = sign_quotient; + } + else + rlimb = _gcry_mpih_mod_1( np, nsize, dp[0] ); + rp[0] = rlimb; + rsize = rlimb != 0?1:0; + rem->nlimbs = rsize; + rem->sign = sign_remainder; + return; + } + + + if( quot ) { + qp = quot->d; + /* Make sure QP and NP point to different objects. Otherwise the + * numerator would be gradually overwritten by the quotient limbs. */ + if(qp == np) { /* Copy NP object to temporary space. */ + marker_nlimbs[markidx] = nsize; + np = marker[markidx++] = mpi_alloc_limb_space(nsize, + mpi_is_secure(quot)); + MPN_COPY(np, qp, nsize); + } + } + else /* Put quotient at top of remainder. */ + qp = rp + dsize; + + count_leading_zeros( normalization_steps, dp[dsize - 1] ); + + /* Normalize the denominator, i.e. make its most significant bit set by + * shifting it NORMALIZATION_STEPS bits to the left. Also shift the + * numerator the same number of steps (to keep the quotient the same!). + */ + if( normalization_steps ) { + mpi_ptr_t tp; + mpi_limb_t nlimb; + + /* Shift up the denominator setting the most significant bit of + * the most significant word. Use temporary storage not to clobber + * the original contents of the denominator. */ + marker_nlimbs[markidx] = dsize; + tp = marker[markidx++] = mpi_alloc_limb_space(dsize,mpi_is_secure(den)); + _gcry_mpih_lshift( tp, dp, dsize, normalization_steps ); + dp = tp; + + /* Shift up the numerator, possibly introducing a new most + * significant word. Move the shifted numerator in the remainder + * meanwhile. */ + nlimb = _gcry_mpih_lshift(rp, np, nsize, normalization_steps); + if( nlimb ) { + rp[nsize] = nlimb; + rsize = nsize + 1; + } + else + rsize = nsize; + } + else { + /* The denominator is already normalized, as required. Copy it to + * temporary space if it overlaps with the quotient or remainder. */ + if( dp == rp || (quot && (dp == qp))) { + mpi_ptr_t tp; + + marker_nlimbs[markidx] = dsize; + tp = marker[markidx++] = mpi_alloc_limb_space(dsize, + mpi_is_secure(den)); + MPN_COPY( tp, dp, dsize ); + dp = tp; + } + + /* Move the numerator to the remainder. */ + if( rp != np ) + MPN_COPY(rp, np, nsize); + + rsize = nsize; + } + + q_limb = _gcry_mpih_divrem( qp, 0, rp, rsize, dp, dsize ); + + if( quot ) { + qsize = rsize - dsize; + if(q_limb) { + qp[qsize] = q_limb; + qsize += 1; + } + + quot->nlimbs = qsize; + quot->sign = sign_quotient; + } + + rsize = dsize; + MPN_NORMALIZE (rp, rsize); + + if( normalization_steps && rsize ) { + _gcry_mpih_rshift(rp, rp, rsize, normalization_steps); + rsize -= rp[rsize - 1] == 0?1:0; + } + + rem->nlimbs = rsize; + rem->sign = sign_remainder; + while( markidx ) + { + markidx--; + _gcry_mpi_free_limb_space (marker[markidx], marker_nlimbs[markidx]); + } +} + +void +_gcry_mpi_tdiv_q_2exp( gcry_mpi_t w, gcry_mpi_t u, unsigned int count ) +{ + mpi_size_t usize, wsize; + mpi_size_t limb_cnt; + + usize = u->nlimbs; + limb_cnt = count / BITS_PER_MPI_LIMB; + wsize = usize - limb_cnt; + if( limb_cnt >= usize ) + w->nlimbs = 0; + else { + mpi_ptr_t wp; + mpi_ptr_t up; + + RESIZE_IF_NEEDED( w, wsize ); + wp = w->d; + up = u->d; + + count %= BITS_PER_MPI_LIMB; + if( count ) { + _gcry_mpih_rshift( wp, up + limb_cnt, wsize, count ); + wsize -= !wp[wsize - 1]; + } + else { + MPN_COPY_INCR( wp, up + limb_cnt, wsize); + } + + w->nlimbs = wsize; + } +} + +/**************** + * Check whether dividend is divisible by divisor + * (note: divisor must fit into a limb) + */ +int +_gcry_mpi_divisible_ui(gcry_mpi_t dividend, ulong divisor ) +{ + return !_gcry_mpih_mod_1( dividend->d, dividend->nlimbs, divisor ); +} + + +void +gcry_mpi_div (gcry_mpi_t quot, gcry_mpi_t rem, gcry_mpi_t dividend, gcry_mpi_t divisor, int round) +{ + if (!round) + { + if (!rem) + { + gcry_mpi_t tmp = mpi_alloc (mpi_get_nlimbs(quot)); + _gcry_mpi_tdiv_qr (quot, tmp, dividend, divisor); + mpi_free (tmp); + } + else + _gcry_mpi_tdiv_qr (quot, rem, dividend, divisor); + } + else if (round < 0) + { + if (!rem) + _gcry_mpi_fdiv_q (quot, dividend, divisor); + else if (!quot) + _gcry_mpi_fdiv_r (rem, dividend, divisor); + else + _gcry_mpi_fdiv_qr (quot, rem, dividend, divisor); + } + else + log_bug ("mpi rounding to ceiling not yet implemented\n"); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-gcd.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-gcd.c new file mode 100644 index 0000000..34bd249 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-gcd.c @@ -0,0 +1,53 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-gcd.c - MPI functions + * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + +/**************** + * Find the greatest common divisor G of A and B. + * Return: true if this 1, false in all other cases + */ +int +gcry_mpi_gcd( gcry_mpi_t g, gcry_mpi_t xa, gcry_mpi_t xb ) +{ + gcry_mpi_t a, b; + + a = mpi_copy(xa); + b = mpi_copy(xb); + + /* TAOCP Vol II, 4.5.2, Algorithm A */ + a->sign = 0; + b->sign = 0; + while( gcry_mpi_cmp_ui( b, 0 ) ) { + _gcry_mpi_fdiv_r( g, a, b ); /* g used as temorary variable */ + mpi_set(a,b); + mpi_set(b,g); + } + mpi_set(g, a); + + mpi_free(a); + mpi_free(b); + return !gcry_mpi_cmp_ui( g, 1); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.c new file mode 100644 index 0000000..4e3e8f3 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.c @@ -0,0 +1,37 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-inline.c + * Copyright (C) 1999, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> + +/* put the inline functions as real functions into the lib */ +#define G10_MPI_INLINE_DECL + +#include "mpi-internal.h" + +/* always include the header because it is only + * included by mpi-internal if __GCC__ is defined but we + * need it here in all cases and the above definition of + * of the macro allows us to do so + */ +#include "mpi-inline.h" diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.h b/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.h new file mode 100644 index 0000000..0818d20 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-inline.h @@ -0,0 +1,163 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-inline.h - Internal to the Multi Precision Integers + * Copyright (C) 1994, 1996, 1998, 1999, + * 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#ifndef G10_MPI_INLINE_H +#define G10_MPI_INLINE_H + +/* Starting with gcc 4.3 "extern inline" conforms in c99 mode to the + c99 semantics. To keep the useful old semantics we use an + attribute. */ +#ifndef G10_MPI_INLINE_DECL +# ifdef __GNUC_STDC_INLINE__ +# define G10_MPI_INLINE_DECL extern inline __attribute__ ((__gnu_inline__)) +# else +# define G10_MPI_INLINE_DECL extern __inline__ +# endif +#endif + +G10_MPI_INLINE_DECL mpi_limb_t +_gcry_mpih_add_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb) +{ + mpi_limb_t x; + + x = *s1_ptr++; + s2_limb += x; + *res_ptr++ = s2_limb; + if( s2_limb < x ) { /* sum is less than the left operand: handle carry */ + while( --s1_size ) { + x = *s1_ptr++ + 1; /* add carry */ + *res_ptr++ = x; /* and store */ + if( x ) /* not 0 (no overflow): we can stop */ + goto leave; + } + return 1; /* return carry (size of s1 to small) */ + } + + leave: + if( res_ptr != s1_ptr ) { /* not the same variable */ + mpi_size_t i; /* copy the rest */ + for( i=0; i < s1_size-1; i++ ) + res_ptr[i] = s1_ptr[i]; + } + return 0; /* no carry */ +} + + + +G10_MPI_INLINE_DECL mpi_limb_t +_gcry_mpih_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size) +{ + mpi_limb_t cy = 0; + + if( s2_size ) + cy = _gcry_mpih_add_n( res_ptr, s1_ptr, s2_ptr, s2_size ); + + if( s1_size - s2_size ) + cy = _gcry_mpih_add_1( res_ptr + s2_size, s1_ptr + s2_size, + s1_size - s2_size, cy); + return cy; +} + + +G10_MPI_INLINE_DECL mpi_limb_t +_gcry_mpih_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb ) +{ + mpi_limb_t x; + + x = *s1_ptr++; + s2_limb = x - s2_limb; + *res_ptr++ = s2_limb; + if( s2_limb > x ) { + while( --s1_size ) { + x = *s1_ptr++; + *res_ptr++ = x - 1; + if( x ) + goto leave; + } + return 1; + } + + leave: + if( res_ptr != s1_ptr ) { + mpi_size_t i; + for( i=0; i < s1_size-1; i++ ) + res_ptr[i] = s1_ptr[i]; + } + return 0; +} + + + +G10_MPI_INLINE_DECL mpi_limb_t +_gcry_mpih_sub( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size) +{ + mpi_limb_t cy = 0; + + if( s2_size ) + cy = _gcry_mpih_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size); + + if( s1_size - s2_size ) + cy = _gcry_mpih_sub_1(res_ptr + s2_size, s1_ptr + s2_size, + s1_size - s2_size, cy); + return cy; +} + +/**************** + * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE. + * There are no restrictions on the relative sizes of + * the two arguments. + * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2. + */ +G10_MPI_INLINE_DECL int +_gcry_mpih_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size ) +{ + mpi_size_t i; + mpi_limb_t op1_word, op2_word; + + for( i = size - 1; i >= 0 ; i--) { + op1_word = op1_ptr[i]; + op2_word = op2_ptr[i]; + if( op1_word != op2_word ) + goto diff; + } + return 0; + + diff: + /* This can *not* be simplified to + * op2_word - op2_word + * since that expression might give signed overflow. */ + return (op1_word > op2_word) ? 1 : -1; +} + + +#endif /*G10_MPI_INLINE_H*/ diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-internal.h b/grub-core/lib/libgcrypt-grub/mpi/mpi-internal.h new file mode 100644 index 0000000..c9684f2 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-internal.h @@ -0,0 +1,279 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-internal.h - Internal to the Multi Precision Integers + * Copyright (C) 1994, 1996, 1998, 2000, 2002, + * 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#ifndef G10_MPI_INTERNAL_H +#define G10_MPI_INTERNAL_H + +#include "mpi-asm-defs.h" + +#ifndef BITS_PER_MPI_LIMB +#if BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_INT + typedef unsigned int mpi_limb_t; + typedef signed int mpi_limb_signed_t; +#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG + typedef unsigned long int mpi_limb_t; + typedef signed long int mpi_limb_signed_t; +#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_LONG_LONG + typedef unsigned long long int mpi_limb_t; + typedef signed long long int mpi_limb_signed_t; +#elif BYTES_PER_MPI_LIMB == SIZEOF_UNSIGNED_SHORT + typedef unsigned short int mpi_limb_t; + typedef signed short int mpi_limb_signed_t; +#else +#error BYTES_PER_MPI_LIMB does not match any C type +#endif +#define BITS_PER_MPI_LIMB (8*BYTES_PER_MPI_LIMB) +#endif /*BITS_PER_MPI_LIMB*/ + +#include "mpi.h" + +/* If KARATSUBA_THRESHOLD is not already defined, define it to a + * value which is good on most machines. */ + +/* tested 4, 16, 32 and 64, where 16 gave the best performance when + * checking a 768 and a 1024 bit ElGamal signature. + * (wk 22.12.97) */ +#ifndef KARATSUBA_THRESHOLD +#define KARATSUBA_THRESHOLD 16 +#endif + +/* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */ +#if KARATSUBA_THRESHOLD < 2 +#undef KARATSUBA_THRESHOLD +#define KARATSUBA_THRESHOLD 2 +#endif + + +typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ +typedef int mpi_size_t; /* (must be a signed type) */ + +#define ABS(x) (x >= 0 ? x : -x) +#define MIN(l,o) ((l) < (o) ? (l) : (o)) +#define MAX(h,i) ((h) > (i) ? (h) : (i)) +#define RESIZE_IF_NEEDED(a,b) \ + do { \ + if( (a)->alloced < (b) ) \ + mpi_resize((a), (b)); \ + } while(0) + +/* Copy N limbs from S to D. */ +#define MPN_COPY( d, s, n) \ + do { \ + mpi_size_t _i; \ + for( _i = 0; _i < (n); _i++ ) \ + (d)[_i] = (s)[_i]; \ + } while(0) + +#define MPN_COPY_INCR( d, s, n) \ + do { \ + mpi_size_t _i; \ + for( _i = 0; _i < (n); _i++ ) \ + (d)[_i] = (d)[_i]; \ + } while (0) + +#define MPN_COPY_DECR( d, s, n ) \ + do { \ + mpi_size_t _i; \ + for( _i = (n)-1; _i >= 0; _i--) \ + (d)[_i] = (s)[_i]; \ + } while(0) + +/* Zero N limbs at D */ +#define MPN_ZERO(d, n) \ + do { \ + int _i; \ + for( _i = 0; _i < (n); _i++ ) \ + (d)[_i] = 0; \ + } while (0) + +#define MPN_NORMALIZE(d, n) \ + do { \ + while( (n) > 0 ) { \ + if( (d)[(n)-1] ) \ + break; \ + (n)--; \ + } \ + } while(0) + +#define MPN_NORMALIZE_NOT_ZERO(d, n) \ + do { \ + for(;;) { \ + if( (d)[(n)-1] ) \ + break; \ + (n)--; \ + } \ + } while(0) + +#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ + do { \ + if( (size) < KARATSUBA_THRESHOLD ) \ + mul_n_basecase (prodp, up, vp, size); \ + else \ + mul_n (prodp, up, vp, size, tspace); \ + } while (0); + + +/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest + * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB). + * If this would yield overflow, DI should be the largest possible number + * (i.e., only ones). For correct operation, the most significant bit of D + * has to be set. Put the quotient in Q and the remainder in R. + */ +#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \ + do { \ + mpi_limb_t _q, _ql, _r; \ + mpi_limb_t _xh, _xl; \ + umul_ppmm (_q, _ql, (nh), (di)); \ + _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \ + umul_ppmm (_xh, _xl, _q, (d)); \ + sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl); \ + if( _xh ) { \ + sub_ddmmss (_xh, _r, _xh, _r, 0, (d)); \ + _q++; \ + if( _xh) { \ + sub_ddmmss (_xh, _r, _xh, _r, 0, (d)); \ + _q++; \ + } \ + } \ + if( _r >= (d) ) { \ + _r -= (d); \ + _q++; \ + } \ + (r) = _r; \ + (q) = _q; \ + } while (0) + + +/*-- mpiutil.c --*/ +#define mpi_alloc_limb_space(n,f) _gcry_mpi_alloc_limb_space((n),(f)) +mpi_ptr_t _gcry_mpi_alloc_limb_space( unsigned nlimbs, int sec ); +void _gcry_mpi_free_limb_space( mpi_ptr_t a, unsigned int nlimbs ); +void _gcry_mpi_assign_limb_space( gcry_mpi_t a, mpi_ptr_t ap, unsigned nlimbs ); + +/*-- mpi-bit.c --*/ +#define mpi_rshift_limbs(a,n) _gcry_mpi_rshift_limbs ((a), (n)) +#define mpi_lshift_limbs(a,n) _gcry_mpi_lshift_limbs ((a), (n)) + +void _gcry_mpi_rshift_limbs( gcry_mpi_t a, unsigned int count ); +void _gcry_mpi_lshift_limbs( gcry_mpi_t a, unsigned int count ); + + +/*-- mpih-add.c --*/ +mpi_limb_t _gcry_mpih_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb ); +mpi_limb_t _gcry_mpih_add_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size); +mpi_limb_t _gcry_mpih_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size); + +/*-- mpih-sub.c --*/ +mpi_limb_t _gcry_mpih_sub_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb ); +mpi_limb_t _gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size); +mpi_limb_t _gcry_mpih_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_ptr_t s2_ptr, mpi_size_t s2_size); + +/*-- mpih-cmp.c --*/ +int _gcry_mpih_cmp( mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size ); + +/*-- mpih-mul.c --*/ + +struct karatsuba_ctx { + struct karatsuba_ctx *next; + mpi_ptr_t tspace; + unsigned int tspace_nlimbs; + mpi_size_t tspace_size; + mpi_ptr_t tp; + unsigned int tp_nlimbs; + mpi_size_t tp_size; +}; + +void _gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx ); + +mpi_limb_t _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb); +mpi_limb_t _gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb); +void _gcry_mpih_mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, + mpi_size_t size); +mpi_limb_t _gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, + mpi_ptr_t vp, mpi_size_t vsize); +void _gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size ); +void _gcry_mpih_sqr_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, + mpi_ptr_t tspace); + +void _gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp, + mpi_ptr_t up, mpi_size_t usize, + mpi_ptr_t vp, mpi_size_t vsize, + struct karatsuba_ctx *ctx ); + + +/*-- mpih-mul_1.c (or xxx/cpu/ *.S) --*/ +mpi_limb_t _gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb); + +/*-- mpih-div.c --*/ +mpi_limb_t _gcry_mpih_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, + mpi_limb_t divisor_limb); +mpi_limb_t _gcry_mpih_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs, + mpi_ptr_t np, mpi_size_t nsize, + mpi_ptr_t dp, mpi_size_t dsize); +mpi_limb_t _gcry_mpih_divmod_1( mpi_ptr_t quot_ptr, + mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, + mpi_limb_t divisor_limb); + +/*-- mpih-shift.c --*/ +mpi_limb_t _gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, + unsigned cnt); +mpi_limb_t _gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, + unsigned cnt); + + +/* Define stuff for longlong.h. */ +#define W_TYPE_SIZE BITS_PER_MPI_LIMB + typedef mpi_limb_t UWtype; + typedef unsigned int UHWtype; +#if defined (__GNUC__) + typedef unsigned int UQItype __attribute__ ((mode (QI))); + typedef int SItype __attribute__ ((mode (SI))); + typedef unsigned int USItype __attribute__ ((mode (SI))); + typedef int DItype __attribute__ ((mode (DI))); + typedef unsigned int UDItype __attribute__ ((mode (DI))); +#else + typedef unsigned char UQItype; + typedef long SItype; + typedef unsigned long USItype; +#endif + +#ifdef __GNUC__ +#include "mpi-inline.h" +#endif + +#endif /*G10_MPI_INTERNAL_H*/ diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-inv.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-inv.c new file mode 100644 index 0000000..684384f --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-inv.c @@ -0,0 +1,269 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-inv.c - MPI functions + * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "g10lib.h" + +/**************** + * Calculate the multiplicative inverse X of A mod N + * That is: Find the solution x for + * 1 = (a*x) mod n + */ +int +gcry_mpi_invm( gcry_mpi_t x, gcry_mpi_t a, gcry_mpi_t n ) +{ +#if 0 + gcry_mpi_t u, v, u1, u2, u3, v1, v2, v3, q, t1, t2, t3; + gcry_mpi_t ta, tb, tc; + + u = mpi_copy(a); + v = mpi_copy(n); + u1 = mpi_alloc_set_ui(1); + u2 = mpi_alloc_set_ui(0); + u3 = mpi_copy(u); + v1 = mpi_alloc_set_ui(0); + v2 = mpi_alloc_set_ui(1); + v3 = mpi_copy(v); + q = mpi_alloc( mpi_get_nlimbs(u)+1 ); + t1 = mpi_alloc( mpi_get_nlimbs(u)+1 ); + t2 = mpi_alloc( mpi_get_nlimbs(u)+1 ); + t3 = mpi_alloc( mpi_get_nlimbs(u)+1 ); + while( mpi_cmp_ui( v3, 0 ) ) { + mpi_fdiv_q( q, u3, v3 ); + mpi_mul(t1, v1, q); mpi_mul(t2, v2, q); mpi_mul(t3, v3, q); + mpi_sub(t1, u1, t1); mpi_sub(t2, u2, t2); mpi_sub(t3, u3, t3); + mpi_set(u1, v1); mpi_set(u2, v2); mpi_set(u3, v3); + mpi_set(v1, t1); mpi_set(v2, t2); mpi_set(v3, t3); + } + /* log_debug("result:\n"); + log_mpidump("q =", q ); + log_mpidump("u1=", u1); + log_mpidump("u2=", u2); + log_mpidump("u3=", u3); + log_mpidump("v1=", v1); + log_mpidump("v2=", v2); */ + mpi_set(x, u1); + + mpi_free(u1); + mpi_free(u2); + mpi_free(u3); + mpi_free(v1); + mpi_free(v2); + mpi_free(v3); + mpi_free(q); + mpi_free(t1); + mpi_free(t2); + mpi_free(t3); + mpi_free(u); + mpi_free(v); +#elif 0 + /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X) + * modified according to Michael Penk's solution for Exercise 35 */ + + /* FIXME: we can simplify this in most cases (see Knuth) */ + gcry_mpi_t u, v, u1, u2, u3, v1, v2, v3, t1, t2, t3; + unsigned k; + int sign; + + u = mpi_copy(a); + v = mpi_copy(n); + for(k=0; !mpi_test_bit(u,0) && !mpi_test_bit(v,0); k++ ) { + mpi_rshift(u, u, 1); + mpi_rshift(v, v, 1); + } + + + u1 = mpi_alloc_set_ui(1); + u2 = mpi_alloc_set_ui(0); + u3 = mpi_copy(u); + v1 = mpi_copy(v); /* !-- used as const 1 */ + v2 = mpi_alloc( mpi_get_nlimbs(u) ); mpi_sub( v2, u1, u ); + v3 = mpi_copy(v); + if( mpi_test_bit(u, 0) ) { /* u is odd */ + t1 = mpi_alloc_set_ui(0); + t2 = mpi_alloc_set_ui(1); t2->sign = 1; + t3 = mpi_copy(v); t3->sign = !t3->sign; + goto Y4; + } + else { + t1 = mpi_alloc_set_ui(1); + t2 = mpi_alloc_set_ui(0); + t3 = mpi_copy(u); + } + do { + do { + if( mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0) ) { /* one is odd */ + mpi_add(t1, t1, v); + mpi_sub(t2, t2, u); + } + mpi_rshift(t1, t1, 1); + mpi_rshift(t2, t2, 1); + mpi_rshift(t3, t3, 1); + Y4: + ; + } while( !mpi_test_bit( t3, 0 ) ); /* while t3 is even */ + + if( !t3->sign ) { + mpi_set(u1, t1); + mpi_set(u2, t2); + mpi_set(u3, t3); + } + else { + mpi_sub(v1, v, t1); + sign = u->sign; u->sign = !u->sign; + mpi_sub(v2, u, t2); + u->sign = sign; + sign = t3->sign; t3->sign = !t3->sign; + mpi_set(v3, t3); + t3->sign = sign; + } + mpi_sub(t1, u1, v1); + mpi_sub(t2, u2, v2); + mpi_sub(t3, u3, v3); + if( t1->sign ) { + mpi_add(t1, t1, v); + mpi_sub(t2, t2, u); + } + } while( mpi_cmp_ui( t3, 0 ) ); /* while t3 != 0 */ + /* mpi_lshift( u3, k ); */ + mpi_set(x, u1); + + mpi_free(u1); + mpi_free(u2); + mpi_free(u3); + mpi_free(v1); + mpi_free(v2); + mpi_free(v3); + mpi_free(t1); + mpi_free(t2); + mpi_free(t3); +#else + /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X) + * modified according to Michael Penk's solution for Exercise 35 + * with further enhancement */ + gcry_mpi_t u, v, u1, u2=NULL, u3, v1, v2=NULL, v3, t1, t2=NULL, t3; + unsigned k; + int sign; + int odd ; + + u = mpi_copy(a); + v = mpi_copy(n); + + for(k=0; !mpi_test_bit(u,0) && !mpi_test_bit(v,0); k++ ) { + mpi_rshift(u, u, 1); + mpi_rshift(v, v, 1); + } + odd = mpi_test_bit(v,0); + + u1 = mpi_alloc_set_ui(1); + if( !odd ) + u2 = mpi_alloc_set_ui(0); + u3 = mpi_copy(u); + v1 = mpi_copy(v); + if( !odd ) { + v2 = mpi_alloc( mpi_get_nlimbs(u) ); + mpi_sub( v2, u1, u ); /* U is used as const 1 */ + } + v3 = mpi_copy(v); + if( mpi_test_bit(u, 0) ) { /* u is odd */ + t1 = mpi_alloc_set_ui(0); + if( !odd ) { + t2 = mpi_alloc_set_ui(1); t2->sign = 1; + } + t3 = mpi_copy(v); t3->sign = !t3->sign; + goto Y4; + } + else { + t1 = mpi_alloc_set_ui(1); + if( !odd ) + t2 = mpi_alloc_set_ui(0); + t3 = mpi_copy(u); + } + do { + do { + if( !odd ) { + if( mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0) ) { /* one is odd */ + mpi_add(t1, t1, v); + mpi_sub(t2, t2, u); + } + mpi_rshift(t1, t1, 1); + mpi_rshift(t2, t2, 1); + mpi_rshift(t3, t3, 1); + } + else { + if( mpi_test_bit(t1, 0) ) + mpi_add(t1, t1, v); + mpi_rshift(t1, t1, 1); + mpi_rshift(t3, t3, 1); + } + Y4: + ; + } while( !mpi_test_bit( t3, 0 ) ); /* while t3 is even */ + + if( !t3->sign ) { + mpi_set(u1, t1); + if( !odd ) + mpi_set(u2, t2); + mpi_set(u3, t3); + } + else { + mpi_sub(v1, v, t1); + sign = u->sign; u->sign = !u->sign; + if( !odd ) + mpi_sub(v2, u, t2); + u->sign = sign; + sign = t3->sign; t3->sign = !t3->sign; + mpi_set(v3, t3); + t3->sign = sign; + } + mpi_sub(t1, u1, v1); + if( !odd ) + mpi_sub(t2, u2, v2); + mpi_sub(t3, u3, v3); + if( t1->sign ) { + mpi_add(t1, t1, v); + if( !odd ) + mpi_sub(t2, t2, u); + } + } while( mpi_cmp_ui( t3, 0 ) ); /* while t3 != 0 */ + /* mpi_lshift( u3, k ); */ + mpi_set(x, u1); + + mpi_free(u1); + mpi_free(v1); + mpi_free(t1); + if( !odd ) { + mpi_free(u2); + mpi_free(v2); + mpi_free(t2); + } + mpi_free(u3); + mpi_free(v3); + mpi_free(t3); + + mpi_free(u); + mpi_free(v); +#endif + return 1; +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-mod.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-mod.c new file mode 100644 index 0000000..6d9a6a3 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-mod.c @@ -0,0 +1,186 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-mod.c - Modular reduction + Copyright (C) 1998, 1999, 2001, 2002, 2003, + 2007 Free Software Foundation, Inc. + + This file is part of Libgcrypt. + + Libgcrypt is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of + the License, or (at your option) any later version. + + Libgcrypt is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + USA. */ + + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> + +#include "mpi-internal.h" +#include "longlong.h" +#include "g10lib.h" + + +/* Context used with Barrett reduction. */ +struct barrett_ctx_s +{ + gcry_mpi_t m; /* The modulus - may not be modified. */ + int m_copied; /* If true, M needs to be released. */ + int k; + gcry_mpi_t y; + gcry_mpi_t r1; /* Helper MPI. */ + gcry_mpi_t r2; /* Helper MPI. */ + gcry_mpi_t r3; /* Helper MPI allocated on demand. */ +}; + + + +void +_gcry_mpi_mod (gcry_mpi_t rem, gcry_mpi_t dividend, gcry_mpi_t divisor) +{ + _gcry_mpi_fdiv_r (rem, dividend, divisor); + rem->sign = 0; +} + + +/* This function returns a new context for Barrett based operations on + the modulus M. This context needs to be released using + _gcry_mpi_barrett_free. If COPY is true M will be transferred to + the context and the user may change M. If COPY is false, M may not + be changed until gcry_mpi_barrett_free has been called. */ +mpi_barrett_t +_gcry_mpi_barrett_init (gcry_mpi_t m, int copy) +{ + mpi_barrett_t ctx; + gcry_mpi_t tmp; + + mpi_normalize (m); + ctx = gcry_xcalloc (1, sizeof *ctx); + + if (copy) + { + ctx->m = mpi_copy (m); + ctx->m_copied = 1; + } + else + ctx->m = m; + + ctx->k = mpi_get_nlimbs (m); + tmp = mpi_alloc (ctx->k + 1); + + /* Barrett precalculation: y = floor(b^(2k) / m). */ + mpi_set_ui (tmp, 1); + mpi_lshift_limbs (tmp, 2 * ctx->k); + mpi_fdiv_q (tmp, tmp, m); + + ctx->y = tmp; + ctx->r1 = mpi_alloc ( 2 * ctx->k + 1 ); + ctx->r2 = mpi_alloc ( 2 * ctx->k + 1 ); + + return ctx; +} + +void +_gcry_mpi_barrett_free (mpi_barrett_t ctx) +{ + if (ctx) + { + mpi_free (ctx->y); + mpi_free (ctx->r1); + mpi_free (ctx->r2); + if (ctx->r3) + mpi_free (ctx->r3); + if (ctx->m_copied) + mpi_free (ctx->m); + gcry_free (ctx); + } +} + + +/* R = X mod M + + Using Barrett reduction. Before using this function + _gcry_mpi_barrett_init must have been called to do the + precalculations. CTX is the context created by this precalculation + and also conveys M. If the Barret reduction could no be done a + starightforward reduction method is used. + + We assume that these conditions are met: + Input: x =(x_2k-1 ...x_0)_b + m =(m_k-1 ....m_0)_b with m_k-1 != 0 + Output: r = x mod m + */ +void +_gcry_mpi_mod_barrett (gcry_mpi_t r, gcry_mpi_t x, mpi_barrett_t ctx) +{ + gcry_mpi_t m = ctx->m; + int k = ctx->k; + gcry_mpi_t y = ctx->y; + gcry_mpi_t r1 = ctx->r1; + gcry_mpi_t r2 = ctx->r2; + + mpi_normalize (x); + if (mpi_get_nlimbs (x) > 2*k ) + { + mpi_mod (r, x, m); + return; + } + + /* 1. q1 = floor( x / b^k-1) + * q2 = q1 * y + * q3 = floor( q2 / b^k+1 ) + * Actually, we don't need qx, we can work direct on r2 + */ + mpi_set ( r2, x ); + mpi_rshift_limbs ( r2, k-1 ); + mpi_mul ( r2, r2, y ); + mpi_rshift_limbs ( r2, k+1 ); + + /* 2. r1 = x mod b^k+1 + * r2 = q3 * m mod b^k+1 + * r = r1 - r2 + * 3. if r < 0 then r = r + b^k+1 + */ + mpi_set ( r1, x ); + if ( r1->nlimbs > k+1 ) /* Quick modulo operation. */ + r1->nlimbs = k+1; + mpi_mul ( r2, r2, m ); + if ( r2->nlimbs > k+1 ) /* Quick modulo operation. */ + r2->nlimbs = k+1; + mpi_sub ( r, r1, r2 ); + + if ( mpi_is_neg( r ) ) + { + if (!ctx->r3) + { + ctx->r3 = mpi_alloc ( k + 2 ); + mpi_set_ui (ctx->r3, 1); + mpi_lshift_limbs (ctx->r3, k + 1 ); + } + mpi_add ( r, r, ctx->r3 ); + } + + /* 4. while r >= m do r = r - m */ + while ( mpi_cmp( r, m ) >= 0 ) + mpi_sub ( r, r, m ); + +} + + +void +_gcry_mpi_mul_barrett (gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, + mpi_barrett_t ctx) +{ + gcry_mpi_mul (w, u, v); + mpi_mod_barrett (w, w, ctx); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-mpow.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-mpow.c new file mode 100644 index 0000000..afef433 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-mpow.c @@ -0,0 +1,225 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-mpow.c - MPI functions + * Copyright (C) 1998, 1999, 2001, 2002, 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> + +#include "mpi-internal.h" +#include "longlong.h" +#include "g10lib.h" + + +/* Barrett is slower than the classical way. It can be tweaked by + * using partial multiplications + */ +/*#define USE_BARRETT*/ + + + +#ifdef USE_BARRETT +static void barrett_mulm( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, gcry_mpi_t m, gcry_mpi_t y, int k, gcry_mpi_t r1, gcry_mpi_t r2 ); +static gcry_mpi_t init_barrett( gcry_mpi_t m, int *k, gcry_mpi_t *r1, gcry_mpi_t *r2 ); +static int calc_barrett( gcry_mpi_t r, gcry_mpi_t x, gcry_mpi_t m, gcry_mpi_t y, int k, gcry_mpi_t r1, gcry_mpi_t r2 ); +#else +#define barrett_mulm( w, u, v, m, y, k, r1, r2 ) gcry_mpi_mulm( (w), (u), (v), (m) ) +#endif + + +static int +build_index( gcry_mpi_t *exparray, int k, int i, int t ) +{ + int j, bitno; + int idx = 0; + + bitno = t-i; + for(j=k-1; j >= 0; j-- ) { + idx <<= 1; + if( mpi_test_bit( exparray[j], bitno ) ) + idx |= 1; + } + /*log_debug("t=%d i=%d idx=%d\n", t, i, idx );*/ + return idx; +} + +/**************** + * RES = (BASE[0] ^ EXP[0]) * (BASE[1] ^ EXP[1]) * ... * mod M + */ +void +_gcry_mpi_mulpowm( gcry_mpi_t res, gcry_mpi_t *basearray, gcry_mpi_t *exparray, gcry_mpi_t m) +{ + int k; /* number of elements */ + int t; /* bit size of largest exponent */ + int i, j, idx; + gcry_mpi_t *G; /* table with precomputed values of size 2^k */ + gcry_mpi_t tmp; +#ifdef USE_BARRETT + gcry_mpi_t barrett_y, barrett_r1, barrett_r2; + int barrett_k; +#endif + + for(k=0; basearray[k]; k++ ) + ; + gcry_assert(k); + for(t=0, i=0; (tmp=exparray[i]); i++ ) { + /*log_mpidump("exp: ", tmp );*/ + j = mpi_get_nbits(tmp); + if( j > t ) + t = j; + } + /*log_mpidump("mod: ", m );*/ + gcry_assert (i==k); + gcry_assert (t); + gcry_assert (k < 10); + + G = gcry_xcalloc( (1<<k) , sizeof *G ); +#ifdef USE_BARRETT + barrett_y = init_barrett( m, &barrett_k, &barrett_r1, &barrett_r2 ); +#endif + /* and calculate */ + tmp = mpi_alloc( mpi_get_nlimbs(m)+1 ); + mpi_set_ui( res, 1 ); + for(i = 1; i <= t; i++ ) { + barrett_mulm(tmp, res, res, m, barrett_y, barrett_k, + barrett_r1, barrett_r2 ); + idx = build_index( exparray, k, i, t ); + gcry_assert (idx >= 0 && idx < (1<<k)); + if( !G[idx] ) { + if( !idx ) + G[0] = mpi_alloc_set_ui( 1 ); + else { + for(j=0; j < k; j++ ) { + if( (idx & (1<<j) ) ) { + if( !G[idx] ) + G[idx] = mpi_copy( basearray[j] ); + else + barrett_mulm( G[idx], G[idx], basearray[j], + m, barrett_y, barrett_k, barrett_r1, barrett_r2 ); + } + } + if( !G[idx] ) + G[idx] = mpi_alloc(0); + } + } + barrett_mulm(res, tmp, G[idx], m, barrett_y, barrett_k, barrett_r1, barrett_r2 ); + } + + /* cleanup */ + mpi_free(tmp); +#ifdef USE_BARRETT + mpi_free(barrett_y); + mpi_free(barrett_r1); + mpi_free(barrett_r2); +#endif + for(i=0; i < (1<<k); i++ ) + mpi_free(G[i]); + gcry_free(G); +} + + + +#ifdef USE_BARRETT +static void +barrett_mulm( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, gcry_mpi_t m, gcry_mpi_t y, int k, gcry_mpi_t r1, gcry_mpi_t r2 ) +{ + mpi_mul(w, u, v); + if( calc_barrett( w, w, m, y, k, r1, r2 ) ) + mpi_fdiv_r( w, w, m ); +} + +/**************** + * Barrett precalculation: y = floor(b^(2k) / m) + */ +static gcry_mpi_t +init_barrett( gcry_mpi_t m, int *k, gcry_mpi_t *r1, gcry_mpi_t *r2 ) +{ + gcry_mpi_t tmp; + + mpi_normalize( m ); + *k = mpi_get_nlimbs( m ); + tmp = mpi_alloc( *k + 1 ); + mpi_set_ui( tmp, 1 ); + mpi_lshift_limbs( tmp, 2 * *k ); + mpi_fdiv_q( tmp, tmp, m ); + *r1 = mpi_alloc( 2* *k + 1 ); + *r2 = mpi_alloc( 2* *k + 1 ); + return tmp; +} + +/**************** + * Barrett reduction: We assume that these conditions are met: + * Given x =(x_2k-1 ...x_0)_b + * m =(m_k-1 ....m_0)_b with m_k-1 != 0 + * Output r = x mod m + * Before using this function init_barret must be used to calucalte y and k. + * Returns: false = no error + * true = can't perform barret reduction + */ +static int +calc_barrett( gcry_mpi_t r, gcry_mpi_t x, gcry_mpi_t m, gcry_mpi_t y, int k, gcry_mpi_t r1, gcry_mpi_t r2 ) +{ + int xx = k > 3 ? k-3:0; + + mpi_normalize( x ); + if( mpi_get_nlimbs(x) > 2*k ) + return 1; /* can't do it */ + + /* 1. q1 = floor( x / b^k-1) + * q2 = q1 * y + * q3 = floor( q2 / b^k+1 ) + * Actually, we don't need qx, we can work direct on r2 + */ + mpi_set( r2, x ); + mpi_rshift_limbs( r2, k-1 ); + mpi_mul( r2, r2, y ); + mpi_rshift_limbs( r2, k+1 ); + + /* 2. r1 = x mod b^k+1 + * r2 = q3 * m mod b^k+1 + * r = r1 - r2 + * 3. if r < 0 then r = r + b^k+1 + */ + mpi_set( r1, x ); + if( r1->nlimbs > k+1 ) /* quick modulo operation */ + r1->nlimbs = k+1; + mpi_mul( r2, r2, m ); + if( r2->nlimbs > k+1 ) /* quick modulo operation */ + r2->nlimbs = k+1; + mpi_sub( r, r1, r2 ); + + if( mpi_is_neg( r ) ) { + gcry_mpi_t tmp; + + tmp = mpi_alloc( k + 2 ); + mpi_set_ui( tmp, 1 ); + mpi_lshift_limbs( tmp, k+1 ); + mpi_add( r, r, tmp ); + mpi_free(tmp); + } + + /* 4. while r >= m do r = r - m */ + while( mpi_cmp( r, m ) >= 0 ) + mpi_sub( r, r, m ); + + return 0; +} +#endif /* USE_BARRETT */ diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-mul.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-mul.c new file mode 100644 index 0000000..d305547 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-mul.c @@ -0,0 +1,214 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-mul.c - MPI functions + * Copyright (C) 1994, 1996, 1998, 2001, 2002, 2003 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + + +void +gcry_mpi_mul_ui( gcry_mpi_t prod, gcry_mpi_t mult, unsigned long small_mult ) +{ + mpi_size_t size, prod_size; + mpi_ptr_t prod_ptr; + mpi_limb_t cy; + int sign; + + size = mult->nlimbs; + sign = mult->sign; + + if( !size || !small_mult ) { + prod->nlimbs = 0; + prod->sign = 0; + return; + } + + prod_size = size + 1; + if( prod->alloced < prod_size ) + mpi_resize( prod, prod_size ); + prod_ptr = prod->d; + + cy = _gcry_mpih_mul_1( prod_ptr, mult->d, size, (mpi_limb_t)small_mult ); + if( cy ) + prod_ptr[size++] = cy; + prod->nlimbs = size; + prod->sign = sign; +} + + +void +gcry_mpi_mul_2exp( gcry_mpi_t w, gcry_mpi_t u, unsigned long cnt) +{ + mpi_size_t usize, wsize, limb_cnt; + mpi_ptr_t wp; + mpi_limb_t wlimb; + int usign, wsign; + + usize = u->nlimbs; + usign = u->sign; + + if( !usize ) { + w->nlimbs = 0; + w->sign = 0; + return; + } + + limb_cnt = cnt / BITS_PER_MPI_LIMB; + wsize = usize + limb_cnt + 1; + if( w->alloced < wsize ) + mpi_resize(w, wsize ); + wp = w->d; + wsize = usize + limb_cnt; + wsign = usign; + + cnt %= BITS_PER_MPI_LIMB; + if( cnt ) { + wlimb = _gcry_mpih_lshift( wp + limb_cnt, u->d, usize, cnt ); + if( wlimb ) { + wp[wsize] = wlimb; + wsize++; + } + } + else { + MPN_COPY_DECR( wp + limb_cnt, u->d, usize ); + } + + /* Zero all whole limbs at low end. Do it here and not before calling + * mpn_lshift, not to lose for U == W. */ + MPN_ZERO( wp, limb_cnt ); + + w->nlimbs = wsize; + w->sign = wsign; +} + + +void +gcry_mpi_mul( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v) +{ + mpi_size_t usize, vsize, wsize; + mpi_ptr_t up, vp, wp; + mpi_limb_t cy; + int usign, vsign, usecure, vsecure, sign_product; + int assign_wp=0; + mpi_ptr_t tmp_limb=NULL; + unsigned int tmp_limb_nlimbs = 0; + + if( u->nlimbs < v->nlimbs ) { /* Swap U and V. */ + usize = v->nlimbs; + usign = v->sign; + usecure = mpi_is_secure(v); + up = v->d; + vsize = u->nlimbs; + vsign = u->sign; + vsecure = mpi_is_secure(u); + vp = u->d; + } + else { + usize = u->nlimbs; + usign = u->sign; + usecure = mpi_is_secure(u); + up = u->d; + vsize = v->nlimbs; + vsign = v->sign; + vsecure = mpi_is_secure(v); + vp = v->d; + } + sign_product = usign ^ vsign; + wp = w->d; + + /* Ensure W has space enough to store the result. */ + wsize = usize + vsize; + if ( !mpi_is_secure (w) && (mpi_is_secure (u) || mpi_is_secure (v)) ) { + /* w is not allocated in secure space but u or v is. To make sure + * that no temporray results are stored in w, we temporary use + * a newly allocated limb space for w */ + wp = mpi_alloc_limb_space( wsize, 1 ); + assign_wp = 2; /* mark it as 2 so that we can later copy it back to + * mormal memory */ + } + else if( w->alloced < wsize ) { + if( wp == up || wp == vp ) { + wp = mpi_alloc_limb_space( wsize, mpi_is_secure(w) ); + assign_wp = 1; + } + else { + mpi_resize(w, wsize ); + wp = w->d; + } + } + else { /* Make U and V not overlap with W. */ + if( wp == up ) { + /* W and U are identical. Allocate temporary space for U. */ + tmp_limb_nlimbs = usize; + up = tmp_limb = mpi_alloc_limb_space( usize, usecure ); + /* Is V identical too? Keep it identical with U. */ + if( wp == vp ) + vp = up; + /* Copy to the temporary space. */ + MPN_COPY( up, wp, usize ); + } + else if( wp == vp ) { + /* W and V are identical. Allocate temporary space for V. */ + tmp_limb_nlimbs = vsize; + vp = tmp_limb = mpi_alloc_limb_space( vsize, vsecure ); + /* Copy to the temporary space. */ + MPN_COPY( vp, wp, vsize ); + } + } + + if( !vsize ) + wsize = 0; + else { + cy = _gcry_mpih_mul( wp, up, usize, vp, vsize ); + wsize -= cy? 0:1; + } + + if( assign_wp ) { + if (assign_wp == 2) { + /* copy the temp wp from secure memory back to normal memory */ + mpi_ptr_t tmp_wp = mpi_alloc_limb_space (wsize, 0); + MPN_COPY (tmp_wp, wp, wsize); + _gcry_mpi_free_limb_space (wp, 0); + wp = tmp_wp; + } + _gcry_mpi_assign_limb_space( w, wp, wsize ); + } + w->nlimbs = wsize; + w->sign = sign_product; + if( tmp_limb ) + _gcry_mpi_free_limb_space (tmp_limb, tmp_limb_nlimbs); +} + + +void +gcry_mpi_mulm( gcry_mpi_t w, gcry_mpi_t u, gcry_mpi_t v, gcry_mpi_t m) +{ + gcry_mpi_mul(w, u, v); + _gcry_mpi_fdiv_r( w, w, m ); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpi-pow.c b/grub-core/lib/libgcrypt-grub/mpi/mpi-pow.c new file mode 100644 index 0000000..f6b3f58 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpi-pow.c @@ -0,0 +1,340 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpi-pow.c - MPI functions for exponentiation + * Copyright (C) 1994, 1996, 1998, 2000, 2002 + * 2003 Free Software Foundation, Inc. + * 2013 g10 Code GmbH + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "mpi-internal.h" +#include "longlong.h" + + +/**************** + * RES = BASE ^ EXPO mod MOD + */ +void +gcry_mpi_powm (gcry_mpi_t res, + gcry_mpi_t base, gcry_mpi_t expo, gcry_mpi_t mod) +{ + /* Pointer to the limbs of the arguments, their size and signs. */ + mpi_ptr_t rp, ep, mp, bp; + mpi_size_t esize, msize, bsize, rsize; + int msign, bsign, rsign; + /* Flags telling the secure allocation status of the arguments. */ + int esec, msec, bsec; + /* Size of the result including space for temporary values. */ + mpi_size_t size; + /* Helper. */ + int mod_shift_cnt; + int negative_result; + mpi_ptr_t mp_marker = NULL; + mpi_ptr_t bp_marker = NULL; + mpi_ptr_t ep_marker = NULL; + mpi_ptr_t xp_marker = NULL; + unsigned int mp_nlimbs = 0; + unsigned int bp_nlimbs = 0; + unsigned int ep_nlimbs = 0; + unsigned int xp_nlimbs = 0; + mpi_ptr_t tspace = NULL; + mpi_size_t tsize = 0; + + + esize = expo->nlimbs; + msize = mod->nlimbs; + size = 2 * msize; + msign = mod->sign; + + esec = mpi_is_secure(expo); + msec = mpi_is_secure(mod); + bsec = mpi_is_secure(base); + + rp = res->d; + ep = expo->d; + + if (!msize) + grub_fatal ("mpi division by zero"); + + if (!esize) + { + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending + on if MOD equals 1. */ + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; + if (res->nlimbs) + { + RESIZE_IF_NEEDED (res, 1); + rp = res->d; + rp[0] = 1; + } + res->sign = 0; + goto leave; + } + + /* Normalize MOD (i.e. make its most significant bit set) as + required by mpn_divrem. This will make the intermediate values + in the calculation slightly larger, but the correct result is + obtained after a final reduction using the original MOD value. */ + mp_nlimbs = msec? msize:0; + mp = mp_marker = mpi_alloc_limb_space(msize, msec); + count_leading_zeros (mod_shift_cnt, mod->d[msize-1]); + if (mod_shift_cnt) + _gcry_mpih_lshift (mp, mod->d, msize, mod_shift_cnt); + else + MPN_COPY( mp, mod->d, msize ); + + bsize = base->nlimbs; + bsign = base->sign; + if (bsize > msize) + { + /* The base is larger than the module. Reduce it. + + Allocate (BSIZE + 1) with space for remainder and quotient. + (The quotient is (bsize - msize + 1) limbs.) */ + bp_nlimbs = bsec ? (bsize + 1):0; + bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec ); + MPN_COPY ( bp, base->d, bsize ); + /* We don't care about the quotient, store it above the + * remainder, at BP + MSIZE. */ + _gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize ); + bsize = msize; + /* Canonicalize the base, since we are going to multiply with it + quite a few times. */ + MPN_NORMALIZE( bp, bsize ); + } + else + bp = base->d; + + if (!bsize) + { + res->nlimbs = 0; + res->sign = 0; + goto leave; + } + + + /* Make BASE, EXPO and MOD not overlap with RES. */ + if ( rp == bp ) + { + /* RES and BASE are identical. Allocate temp. space for BASE. */ + gcry_assert (!bp_marker); + bp_nlimbs = bsec? bsize:0; + bp = bp_marker = mpi_alloc_limb_space( bsize, bsec ); + MPN_COPY(bp, rp, bsize); + } + if ( rp == ep ) + { + /* RES and EXPO are identical. Allocate temp. space for EXPO. */ + ep_nlimbs = esec? esize:0; + ep = ep_marker = mpi_alloc_limb_space( esize, esec ); + MPN_COPY(ep, rp, esize); + } + if ( rp == mp ) + { + /* RES and MOD are identical. Allocate temporary space for MOD.*/ + gcry_assert (!mp_marker); + mp_nlimbs = msec?msize:0; + mp = mp_marker = mpi_alloc_limb_space( msize, msec ); + MPN_COPY(mp, rp, msize); + } + + /* Copy base to the result. */ + if (res->alloced < size) + { + mpi_resize (res, size); + rp = res->d; + } + MPN_COPY ( rp, bp, bsize ); + rsize = bsize; + rsign = bsign; + + /* Main processing. */ + { + mpi_size_t i; + mpi_ptr_t xp; + int c; + mpi_limb_t e; + mpi_limb_t carry_limb; + struct karatsuba_ctx karactx; + + xp_nlimbs = msec? (2 * (msize + 1)):0; + xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec ); + + memset( &karactx, 0, sizeof karactx ); + negative_result = (ep[0] & 1) && base->sign; + + i = esize - 1; + e = ep[i]; + count_leading_zeros (c, e); + e = (e << c) << 1; /* Shift the expo bits to the left, lose msb. */ + c = BITS_PER_MPI_LIMB - 1 - c; + + /* Main loop. + + Make the result be pointed to alternately by XP and RP. This + helps us avoid block copying, which would otherwise be + necessary with the overlap restrictions of + _gcry_mpih_divmod. With 50% probability the result after this + loop will be in the area originally pointed by RP (==RES->d), + and with 50% probability in the area originally pointed to by XP. */ + for (;;) + { + while (c) + { + mpi_ptr_t tp; + mpi_size_t xsize; + + /*mpih_mul_n(xp, rp, rp, rsize);*/ + if ( rsize < KARATSUBA_THRESHOLD ) + _gcry_mpih_sqr_n_basecase( xp, rp, rsize ); + else + { + if ( !tspace ) + { + tsize = 2 * rsize; + tspace = mpi_alloc_limb_space( tsize, 0 ); + } + else if ( tsize < (2*rsize) ) + { + _gcry_mpi_free_limb_space (tspace, 0); + tsize = 2 * rsize; + tspace = mpi_alloc_limb_space (tsize, 0 ); + } + _gcry_mpih_sqr_n (xp, rp, rsize, tspace); + } + + xsize = 2 * rsize; + if ( xsize > msize ) + { + _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize); + xsize = msize; + } + + tp = rp; rp = xp; xp = tp; + rsize = xsize; + + /* To mitigate the Yarom/Falkner flush+reload cache + * side-channel attack on the RSA secret exponent, we do + * the multiplication regardless of the value of the + * high-bit of E. But to avoid this performance penalty + * we do it only if the exponent has been stored in secure + * memory and we can thus assume it is a secret exponent. */ + if (esec || (mpi_limb_signed_t)e < 0) + { + /*mpih_mul( xp, rp, rsize, bp, bsize );*/ + if( bsize < KARATSUBA_THRESHOLD ) + _gcry_mpih_mul ( xp, rp, rsize, bp, bsize ); + else + _gcry_mpih_mul_karatsuba_case (xp, rp, rsize, bp, bsize, + &karactx); + + xsize = rsize + bsize; + if ( xsize > msize ) + { + _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize); + xsize = msize; + } + } + if ( (mpi_limb_signed_t)e < 0 ) + { + tp = rp; rp = xp; xp = tp; + rsize = xsize; + } + e <<= 1; + c--; + } + + i--; + if ( i < 0 ) + break; + e = ep[i]; + c = BITS_PER_MPI_LIMB; + } + + /* We shifted MOD, the modulo reduction argument, left + MOD_SHIFT_CNT steps. Adjust the result by reducing it with the + original MOD. + + Also make sure the result is put in RES->d (where it already + might be, see above). */ + if ( mod_shift_cnt ) + { + carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt); + rp = res->d; + if ( carry_limb ) + { + rp[rsize] = carry_limb; + rsize++; + } + } + else if (res->d != rp) + { + MPN_COPY (res->d, rp, rsize); + rp = res->d; + } + + if ( rsize >= msize ) + { + _gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize); + rsize = msize; + } + + /* Remove any leading zero words from the result. */ + if ( mod_shift_cnt ) + _gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt); + MPN_NORMALIZE (rp, rsize); + + _gcry_mpih_release_karatsuba_ctx (&karactx ); + } + + /* Fixup for negative results. */ + if ( negative_result && rsize ) + { + if ( mod_shift_cnt ) + _gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt); + _gcry_mpih_sub( rp, mp, msize, rp, rsize); + rsize = msize; + rsign = msign; + MPN_NORMALIZE(rp, rsize); + } + gcry_assert (res->d == rp); + res->nlimbs = rsize; + res->sign = rsign; + + leave: + if (mp_marker) + _gcry_mpi_free_limb_space( mp_marker, mp_nlimbs ); + if (bp_marker) + _gcry_mpi_free_limb_space( bp_marker, bp_nlimbs ); + if (ep_marker) + _gcry_mpi_free_limb_space( ep_marker, ep_nlimbs ); + if (xp_marker) + _gcry_mpi_free_limb_space( xp_marker, xp_nlimbs ); + if (tspace) + _gcry_mpi_free_limb_space( tspace, 0 ); +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpicoder.c b/grub-core/lib/libgcrypt-grub/mpi/mpicoder.c new file mode 100644 index 0000000..e734dcf --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpicoder.c @@ -0,0 +1,755 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpicoder.c - Coder for the external representation of MPIs + * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 + * 2008 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <config.h> +#include <stdio.h> +#include <string.h> +#include <stdlib.h> + +#include "mpi-internal.h" +#include "g10lib.h" + +#define MAX_EXTERN_MPI_BITS 16384 + +/* Helper used to scan PGP style MPIs. Returns NULL on failure. */ +static gcry_mpi_t +mpi_read_from_buffer (const unsigned char *buffer, unsigned *ret_nread, + int secure) +{ + int i, j; + unsigned int nbits, nbytes, nlimbs, nread=0; + mpi_limb_t a; + gcry_mpi_t val = MPI_NULL; + + if ( *ret_nread < 2 ) + goto leave; + nbits = buffer[0] << 8 | buffer[1]; + if ( nbits > MAX_EXTERN_MPI_BITS ) + { +/* log_debug ("mpi too large (%u bits)\n", nbits); */ + goto leave; + } + buffer += 2; + nread = 2; + + nbytes = (nbits+7) / 8; + nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB; + val = secure? mpi_alloc_secure (nlimbs) : mpi_alloc (nlimbs); + i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; + i %= BYTES_PER_MPI_LIMB; + j= val->nlimbs = nlimbs; + val->sign = 0; + for ( ; j > 0; j-- ) + { + a = 0; + for (; i < BYTES_PER_MPI_LIMB; i++ ) + { + if ( ++nread > *ret_nread ) + { +/* log_debug ("mpi larger than buffer"); */ + mpi_free (val); + val = NULL; + goto leave; + } + a <<= 8; + a |= *buffer++; + } + i = 0; + val->d[j-1] = a; + } + + leave: + *ret_nread = nread; + return val; +} + + +/**************** + * Fill the mpi VAL from the hex string in STR. + */ +static int +mpi_fromstr (gcry_mpi_t val, const char *str) +{ + int sign = 0; + int prepend_zero = 0; + int i, j, c, c1, c2; + unsigned int nbits, nbytes, nlimbs; + mpi_limb_t a; + + if ( *str == '-' ) + { + sign = 1; + str++; + } + + /* Skip optional hex prefix. */ + if ( *str == '0' && str[1] == 'x' ) + str += 2; + + nbits = 4 * strlen (str); + if ((nbits % 8)) + prepend_zero = 1; + + nbytes = (nbits+7) / 8; + nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB; + + if ( val->alloced < nlimbs ) + mpi_resize (val, nlimbs); + + i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB); + i %= BYTES_PER_MPI_LIMB; + j = val->nlimbs = nlimbs; + val->sign = sign; + for (; j > 0; j--) + { + a = 0; + for (; i < BYTES_PER_MPI_LIMB; i++) + { + if (prepend_zero) + { + c1 = '0'; + prepend_zero = 0; + } + else + c1 = *str++; + + if (!c1) + { + mpi_clear (val); + return 1; /* Error. */ + } + c2 = *str++; + if (!c2) + { + mpi_clear (val); + return 1; /* Error. */ + } + if ( c1 >= '0' && c1 <= '9' ) + c = c1 - '0'; + else if ( c1 >= 'a' && c1 <= 'f' ) + c = c1 - 'a' + 10; + else if ( c1 >= 'A' && c1 <= 'F' ) + c = c1 - 'A' + 10; + else + { + mpi_clear (val); + return 1; /* Error. */ + } + c <<= 4; + if ( c2 >= '0' && c2 <= '9' ) + c |= c2 - '0'; + else if( c2 >= 'a' && c2 <= 'f' ) + c |= c2 - 'a' + 10; + else if( c2 >= 'A' && c2 <= 'F' ) + c |= c2 - 'A' + 10; + else + { + mpi_clear(val); + return 1; /* Error. */ + } + a <<= 8; + a |= c; + } + i = 0; + val->d[j-1] = a; + } + + return 0; /* Okay. */ +} + + +/* Dump the value of A in a format suitable for debugging to + Libgcrypt's logging stream. Note that one leading space but no + trailing space or linefeed will be printed. It is okay to pass + NULL for A. */ +void +gcry_mpi_dump (const gcry_mpi_t a) +{ + int i; + + log_printf (" "); + if (!a) + log_printf ("[MPI_NULL]"); + else + { + if (a->sign) + log_printf ( "-"); +#if BYTES_PER_MPI_LIMB == 2 +# define X "4" +#elif BYTES_PER_MPI_LIMB == 4 +# define X "8" +#elif BYTES_PER_MPI_LIMB == 8 +# define X "16" +#elif BYTES_PER_MPI_LIMB == 16 +# define X "32" +#else +# error please define the format here +#endif + for (i=a->nlimbs; i > 0 ; i-- ) + { + log_printf (i != a->nlimbs? "%0" X "lX":"%lX", (ulong)a->d[i-1]); + } +#undef X + if (!a->nlimbs) + log_printf ("0"); + } +} + +/* Convience function used internally. */ +void +_gcry_log_mpidump (const char *text, gcry_mpi_t a) +{ + log_printf ("%s:", text); + gcry_mpi_dump (a); + log_printf ("\n"); +} + + +/* Return an allocated buffer with the MPI (msb first). NBYTES + receives the length of this buffer. Caller must free the return + string. This function returns an allocated buffer with NBYTES set + to zero if the value of A is zero. If sign is not NULL, it will be + set to the sign of the A. On error NULL is returned and ERRNO set + appropriately. */ +static unsigned char * +do_get_buffer (gcry_mpi_t a, unsigned int *nbytes, int *sign, int force_secure) +{ + unsigned char *p, *buffer; + mpi_limb_t alimb; + int i; + size_t n; + + if (sign) + *sign = a->sign; + + *nbytes = a->nlimbs * BYTES_PER_MPI_LIMB; + n = *nbytes? *nbytes:1; /* Allocate at least one byte. */ + p = buffer = (force_secure || mpi_is_secure(a))? gcry_malloc_secure (n) + : gcry_malloc (n); + if (!buffer) + return NULL; + + for (i=a->nlimbs-1; i >= 0; i--) + { + alimb = a->d[i]; +#if BYTES_PER_MPI_LIMB == 4 + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb ; +#elif BYTES_PER_MPI_LIMB == 8 + *p++ = alimb >> 56; + *p++ = alimb >> 48; + *p++ = alimb >> 40; + *p++ = alimb >> 32; + *p++ = alimb >> 24; + *p++ = alimb >> 16; + *p++ = alimb >> 8; + *p++ = alimb ; +#else +# error please implement for this limb size. +#endif + } + + /* This is sub-optimal but we need to do the shift operation because + the caller has to free the returned buffer. */ + for (p=buffer; *nbytes && !*p; p++, --*nbytes) + ; + if (p != buffer) + memmove (buffer,p, *nbytes); + return buffer; +} + + +byte * +_gcry_mpi_get_buffer (gcry_mpi_t a, unsigned int *nbytes, int *sign) +{ + return do_get_buffer (a, nbytes, sign, 0); +} + +byte * +_gcry_mpi_get_secure_buffer (gcry_mpi_t a, unsigned *nbytes, int *sign) +{ + return do_get_buffer (a, nbytes, sign, 1); +} + + +/* + * Use the NBYTES at BUFFER_ARG to update A. Set the sign of a to + * SIGN. + */ +void +_gcry_mpi_set_buffer (gcry_mpi_t a, const void *buffer_arg, + unsigned int nbytes, int sign) +{ + const unsigned char *buffer = (const unsigned char*)buffer_arg; + const unsigned char *p; + mpi_limb_t alimb; + int nlimbs; + int i; + + nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + RESIZE_IF_NEEDED(a, nlimbs); + a->sign = sign; + + for (i=0, p = buffer+nbytes-1; p >= buffer+BYTES_PER_MPI_LIMB; ) + { +#if BYTES_PER_MPI_LIMB == 4 + alimb = *p-- ; + alimb |= *p-- << 8 ; + alimb |= *p-- << 16 ; + alimb |= *p-- << 24 ; +#elif BYTES_PER_MPI_LIMB == 8 + alimb = (mpi_limb_t)*p-- ; + alimb |= (mpi_limb_t)*p-- << 8 ; + alimb |= (mpi_limb_t)*p-- << 16 ; + alimb |= (mpi_limb_t)*p-- << 24 ; + alimb |= (mpi_limb_t)*p-- << 32 ; + alimb |= (mpi_limb_t)*p-- << 40 ; + alimb |= (mpi_limb_t)*p-- << 48 ; + alimb |= (mpi_limb_t)*p-- << 56 ; +#else +# error please implement for this limb size. +#endif + a->d[i++] = alimb; + } + if ( p >= buffer ) + { +#if BYTES_PER_MPI_LIMB == 4 + alimb = *p--; + if (p >= buffer) + alimb |= *p-- << 8; + if (p >= buffer) + alimb |= *p-- << 16; + if (p >= buffer) + alimb |= *p-- << 24; +#elif BYTES_PER_MPI_LIMB == 8 + alimb = (mpi_limb_t)*p--; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 8; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 16; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 24; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 32; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 40; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 48; + if (p >= buffer) + alimb |= (mpi_limb_t)*p-- << 56; +#else +# error please implement for this limb size. +#endif + a->d[i++] = alimb; + } + a->nlimbs = i; + gcry_assert (i == nlimbs); +} + + +/* Convert the external representation of an integer stored in BUFFER + with a length of BUFLEN into a newly create MPI returned in + RET_MPI. If NBYTES is not NULL, it will receive the number of + bytes actually scanned after a successful operation. */ +gcry_error_t +gcry_mpi_scan (struct gcry_mpi **ret_mpi, enum gcry_mpi_format format, + const void *buffer_arg, size_t buflen, size_t *nscanned) +{ + const unsigned char *buffer = (const unsigned char*)buffer_arg; + struct gcry_mpi *a = NULL; + unsigned int len; + int secure = (buffer && gcry_is_secure (buffer)); + + if (!buffer) + return gcry_error (GPG_ERR_INV_ARG); + + if (format == GCRYMPI_FMT_SSH) + len = 0; + else + len = buflen; + + if (format == GCRYMPI_FMT_STD) + { + const unsigned char *s = buffer; + + a = secure? mpi_alloc_secure ((len+BYTES_PER_MPI_LIMB-1) + /BYTES_PER_MPI_LIMB) + : mpi_alloc ((len+BYTES_PER_MPI_LIMB-1)/BYTES_PER_MPI_LIMB); + if (len) + { + a->sign = !!(*s & 0x80); + if (a->sign) + { + /* FIXME: we have to convert from 2compl to magnitude format */ + mpi_free (a); + return gcry_error (GPG_ERR_INTERNAL); + } + else + _gcry_mpi_set_buffer (a, s, len, 0); + } + if (ret_mpi) + { + mpi_normalize ( a ); + *ret_mpi = a; + } + else + mpi_free(a); + return 0; + } + else if (format == GCRYMPI_FMT_USG) + { + a = secure? mpi_alloc_secure ((len+BYTES_PER_MPI_LIMB-1) + /BYTES_PER_MPI_LIMB) + : mpi_alloc ((len+BYTES_PER_MPI_LIMB-1)/BYTES_PER_MPI_LIMB); + + if (len) + _gcry_mpi_set_buffer (a, buffer, len, 0); + if (ret_mpi) + { + mpi_normalize ( a ); + *ret_mpi = a; + } + else + mpi_free(a); + return 0; + } + else if (format == GCRYMPI_FMT_PGP) + { + a = mpi_read_from_buffer (buffer, &len, secure); + if (nscanned) + *nscanned = len; + if (ret_mpi && a) + { + mpi_normalize (a); + *ret_mpi = a; + } + else if (a) + { + mpi_free(a); + a = NULL; + } + return a? 0 : gcry_error (GPG_ERR_INV_OBJ); + } + else if (format == GCRYMPI_FMT_SSH) + { + const unsigned char *s = buffer; + size_t n; + + /* This test is not strictly necessary and an assert (!len) + would be sufficient. We keep this test in case we later + allow the BUFLEN argument to act as a sanitiy check. Same + below. */ + if (len && len < 4) + return gcry_error (GPG_ERR_TOO_SHORT); + + n = ((size_t)s[0] << 24 | (size_t)s[1] << 16 | (size_t)s[2] << 8 | (size_t)s[3]); + s += 4; + if (len) + len -= 4; + if (len && n > len) + return gcry_error (GPG_ERR_TOO_LARGE); + + a = secure? mpi_alloc_secure ((n+BYTES_PER_MPI_LIMB-1) + /BYTES_PER_MPI_LIMB) + : mpi_alloc ((n+BYTES_PER_MPI_LIMB-1)/BYTES_PER_MPI_LIMB); + if (n) + { + a->sign = !!(*s & 0x80); + if (a->sign) + { + /* FIXME: we have to convert from 2compl to magnitude format */ + mpi_free(a); + return gcry_error (GPG_ERR_INTERNAL); + } + else + _gcry_mpi_set_buffer( a, s, n, 0 ); + } + if (nscanned) + *nscanned = n+4; + if (ret_mpi) + { + mpi_normalize ( a ); + *ret_mpi = a; + } + else + mpi_free(a); + return 0; + } + else if (format == GCRYMPI_FMT_HEX) + { + /* We can only handle C strings for now. */ + if (buflen) + return gcry_error (GPG_ERR_INV_ARG); + + a = secure? mpi_alloc_secure (0) : mpi_alloc(0); + if (mpi_fromstr (a, (const char *)buffer)) + { + mpi_free (a); + return gcry_error (GPG_ERR_INV_OBJ); + } + if (ret_mpi) + { + mpi_normalize ( a ); + *ret_mpi = a; + } + else + mpi_free(a); + return 0; + } + else + return gcry_error (GPG_ERR_INV_ARG); +} + + +/* Convert the big integer A into the external representation + described by FORMAT and store it in the provided BUFFER which has + been allocated by the user with a size of BUFLEN bytes. NWRITTEN + receives the actual length of the external representation unless it + has been passed as NULL. BUFFER may be NULL to query the required + length. */ +gcry_error_t +gcry_mpi_print (enum gcry_mpi_format format, + unsigned char *buffer, size_t buflen, + size_t *nwritten, struct gcry_mpi *a) +{ + unsigned int nbits = mpi_get_nbits (a); + size_t len; + size_t dummy_nwritten; + + if (!nwritten) + nwritten = &dummy_nwritten; + + len = buflen; + *nwritten = 0; + if (format == GCRYMPI_FMT_STD) + { + unsigned char *tmp; + int extra = 0; + unsigned int n; + + if (a->sign) + return gcry_error (GPG_ERR_INTERNAL); /* Can't handle it yet. */ + + tmp = _gcry_mpi_get_buffer (a, &n, NULL); + if (!tmp) + return gpg_error_from_syserror (); + if (n && (*tmp & 0x80)) + { + n++; + extra=1; + } + + if (buffer && n > len) + { + /* The provided buffer is too short. */ + gcry_free (tmp); + return gcry_error (GPG_ERR_TOO_SHORT); + } + if (buffer) + { + unsigned char *s = buffer; + + if (extra) + *s++ = 0; + memcpy (s, tmp, n-extra); + } + gcry_free(tmp); + *nwritten = n; + return 0; + } + else if (format == GCRYMPI_FMT_USG) + { + unsigned int n = (nbits + 7)/8; + + /* Note: We ignore the sign for this format. */ + /* FIXME: for performance reasons we should put this into + mpi_aprint because we can then use the buffer directly. */ + if (buffer && n > len) + return gcry_error (GPG_ERR_TOO_SHORT); + if (buffer) + { + unsigned char *tmp; + + tmp = _gcry_mpi_get_buffer (a, &n, NULL); + if (!tmp) + return gpg_error_from_syserror (); + memcpy (buffer, tmp, n); + gcry_free (tmp); + } + *nwritten = n; + return 0; + } + else if (format == GCRYMPI_FMT_PGP) + { + unsigned int n = (nbits + 7)/8; + + /* The PGP format can only handle unsigned integers. */ + if( a->sign ) + return gcry_error (GPG_ERR_INV_ARG); + + if (buffer && n+2 > len) + return gcry_error (GPG_ERR_TOO_SHORT); + + if (buffer) + { + unsigned char *tmp; + unsigned char *s = buffer; + + s[0] = nbits >> 8; + s[1] = nbits; + + tmp = _gcry_mpi_get_buffer (a, &n, NULL); + if (!tmp) + return gpg_error_from_syserror (); + memcpy (s+2, tmp, n); + gcry_free (tmp); + } + *nwritten = n+2; + return 0; + } + else if (format == GCRYMPI_FMT_SSH) + { + unsigned char *tmp; + int extra = 0; + unsigned int n; + + if (a->sign) + return gcry_error (GPG_ERR_INTERNAL); /* Can't handle it yet. */ + + tmp = _gcry_mpi_get_buffer (a, &n, NULL); + if (!tmp) + return gpg_error_from_syserror (); + if (n && (*tmp & 0x80)) + { + n++; + extra=1; + } + + if (buffer && n+4 > len) + { + gcry_free(tmp); + return gcry_error (GPG_ERR_TOO_SHORT); + } + + if (buffer) + { + unsigned char *s = buffer; + + *s++ = n >> 24; + *s++ = n >> 16; + *s++ = n >> 8; + *s++ = n; + if (extra) + *s++ = 0; + + memcpy (s, tmp, n-extra); + } + gcry_free (tmp); + *nwritten = 4+n; + return 0; + } + else if (format == GCRYMPI_FMT_HEX) + { + unsigned char *tmp; + int i; + int extra = 0; + unsigned int n = 0; + + tmp = _gcry_mpi_get_buffer (a, &n, NULL); + if (!tmp) + return gpg_error_from_syserror (); + if (!n || (*tmp & 0x80)) + extra = 2; + + if (buffer && 2*n + extra + !!a->sign + 1 > len) + { + gcry_free(tmp); + return gcry_error (GPG_ERR_TOO_SHORT); + } + if (buffer) + { + unsigned char *s = buffer; + + if (a->sign) + *s++ = '-'; + if (extra) + { + *s++ = '0'; + *s++ = '0'; + } + + for (i=0; i < n; i++) + { + unsigned int c = tmp[i]; + + *s++ = (c >> 4) < 10? '0'+(c>>4) : 'A'+(c>>4)-10 ; + c &= 15; + *s++ = c < 10? '0'+c : 'A'+c-10 ; + } + *s++ = 0; + *nwritten = s - buffer; + } + else + { + *nwritten = 2*n + extra + !!a->sign + 1; + } + gcry_free (tmp); + return 0; + } + else + return gcry_error (GPG_ERR_INV_ARG); +} + + +/* + * Like gcry_mpi_print but this function allocates the buffer itself. + * The caller has to supply the address of a pointer. NWRITTEN may be + * NULL. + */ +gcry_error_t +gcry_mpi_aprint (enum gcry_mpi_format format, + unsigned char **buffer, size_t *nwritten, + struct gcry_mpi *a) +{ + size_t n; + gcry_error_t rc; + + *buffer = NULL; + rc = gcry_mpi_print (format, NULL, 0, &n, a); + if (rc) + return rc; + + *buffer = mpi_is_secure(a) ? gcry_malloc_secure (n) : gcry_malloc (n); + if (!*buffer) + return gpg_error_from_syserror (); + rc = gcry_mpi_print( format, *buffer, n, &n, a ); + if (rc) + { + gcry_free(*buffer); + *buffer = NULL; + } + else if (nwritten) + *nwritten = n; + return rc; +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-add1.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-add1.c new file mode 100644 index 0000000..4a84df6 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-add1.c @@ -0,0 +1,65 @@ +/* mpihelp-add_1.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, + * 2000, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_add_n (mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size) +{ + mpi_limb_t x, y, cy; + mpi_size_t j; + + /* The loop counter and index J goes from -SIZE to -1. This way + the loop becomes faster. */ + j = -size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + s2_ptr -= j; + res_ptr -= j; + + cy = 0; + do + { + y = s2_ptr[j]; + x = s1_ptr[j]; + y += cy; /* add previous carry to one addend */ + cy = y < cy; /* get out carry from that addition */ + y += x; /* add other addend */ + cy += y < x; /* get out carry from that add, combine */ + res_ptr[j] = y; + } + while ( ++j ); + + return cy; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-div.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-div.c new file mode 100644 index 0000000..797c083 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-div.c @@ -0,0 +1,536 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpih-div.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 2000, + * 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +#ifndef UMUL_TIME +#define UMUL_TIME 1 +#endif +#ifndef UDIV_TIME +#define UDIV_TIME UMUL_TIME +#endif + +/* FIXME: We should be using invert_limb (or invert_normalized_limb) + * here (not udiv_qrnnd). + */ + +mpi_limb_t +_gcry_mpih_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, + mpi_limb_t divisor_limb) +{ + mpi_size_t i; + mpi_limb_t n1, n0, r; + int dummy; + + /* Botch: Should this be handled at all? Rely on callers? */ + if( !dividend_size ) + return 0; + + /* If multiplication is much faster than division, and the + * dividend is large, pre-invert the divisor, and use + * only multiplications in the inner loop. + * + * This test should be read: + * Does it ever help to use udiv_qrnnd_preinv? + * && Does what we save compensate for the inversion overhead? + */ + if( UDIV_TIME > (2 * UMUL_TIME + 6) + && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) { + int normalization_steps; + + count_leading_zeros( normalization_steps, divisor_limb ); + if( normalization_steps ) { + mpi_limb_t divisor_limb_inverted; + + divisor_limb <<= normalization_steps; + + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the + * most significant bit (with weight 2**N) implicit. + * + * Special case for DIVISOR_LIMB == 100...000. + */ + if( !(divisor_limb << 1) ) + divisor_limb_inverted = ~(mpi_limb_t)0; + else + udiv_qrnnd(divisor_limb_inverted, dummy, + -divisor_limb, 0, divisor_limb); + + n1 = dividend_ptr[dividend_size - 1]; + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps); + + /* Possible optimization: + * if (r == 0 + * && divisor_limb > ((n1 << normalization_steps) + * | (dividend_ptr[dividend_size - 2] >> ...))) + * ...one division less... + */ + for( i = dividend_size - 2; i >= 0; i--) { + n0 = dividend_ptr[i]; + UDIV_QRNND_PREINV(dummy, r, r, + ((n1 << normalization_steps) + | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))), + divisor_limb, divisor_limb_inverted); + n1 = n0; + } + UDIV_QRNND_PREINV(dummy, r, r, + n1 << normalization_steps, + divisor_limb, divisor_limb_inverted); + return r >> normalization_steps; + } + else { + mpi_limb_t divisor_limb_inverted; + + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the + * most significant bit (with weight 2**N) implicit. + * + * Special case for DIVISOR_LIMB == 100...000. + */ + if( !(divisor_limb << 1) ) + divisor_limb_inverted = ~(mpi_limb_t)0; + else + udiv_qrnnd(divisor_limb_inverted, dummy, + -divisor_limb, 0, divisor_limb); + + i = dividend_size - 1; + r = dividend_ptr[i]; + + if( r >= divisor_limb ) + r = 0; + else + i--; + + for( ; i >= 0; i--) { + n0 = dividend_ptr[i]; + UDIV_QRNND_PREINV(dummy, r, r, + n0, divisor_limb, divisor_limb_inverted); + } + return r; + } + } + else { + if( UDIV_NEEDS_NORMALIZATION ) { + int normalization_steps; + + count_leading_zeros(normalization_steps, divisor_limb); + if( normalization_steps ) { + divisor_limb <<= normalization_steps; + + n1 = dividend_ptr[dividend_size - 1]; + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps); + + /* Possible optimization: + * if (r == 0 + * && divisor_limb > ((n1 << normalization_steps) + * | (dividend_ptr[dividend_size - 2] >> ...))) + * ...one division less... + */ + for(i = dividend_size - 2; i >= 0; i--) { + n0 = dividend_ptr[i]; + udiv_qrnnd (dummy, r, r, + ((n1 << normalization_steps) + | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))), + divisor_limb); + n1 = n0; + } + udiv_qrnnd (dummy, r, r, + n1 << normalization_steps, + divisor_limb); + return r >> normalization_steps; + } + } + /* No normalization needed, either because udiv_qrnnd doesn't require + * it, or because DIVISOR_LIMB is already normalized. */ + i = dividend_size - 1; + r = dividend_ptr[i]; + + if(r >= divisor_limb) + r = 0; + else + i--; + + for(; i >= 0; i--) { + n0 = dividend_ptr[i]; + udiv_qrnnd (dummy, r, r, n0, divisor_limb); + } + return r; + } +} + +/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write + * the NSIZE-DSIZE least significant quotient limbs at QP + * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is + * non-zero, generate that many fraction bits and append them after the + * other quotient limbs. + * Return the most significant limb of the quotient, this is always 0 or 1. + * + * Preconditions: + * 0. NSIZE >= DSIZE. + * 1. The most significant bit of the divisor must be set. + * 2. QP must either not overlap with the input operands at all, or + * QP + DSIZE >= NP must hold true. (This means that it's + * possible to put the quotient in the high part of NUM, right after the + * remainder in NUM. + * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero. + */ + +mpi_limb_t +_gcry_mpih_divrem( mpi_ptr_t qp, mpi_size_t qextra_limbs, + mpi_ptr_t np, mpi_size_t nsize, + mpi_ptr_t dp, mpi_size_t dsize) +{ + mpi_limb_t most_significant_q_limb = 0; + + switch(dsize) { + case 0: + /* We are asked to divide by zero, so go ahead and do it! (To make + the compiler not remove this statement, return the value.) */ + grub_fatal ("mpi division by zero"); + return 0; + + case 1: + { + mpi_size_t i; + mpi_limb_t n1; + mpi_limb_t d; + + d = dp[0]; + n1 = np[nsize - 1]; + + if( n1 >= d ) { + n1 -= d; + most_significant_q_limb = 1; + } + + qp += qextra_limbs; + for( i = nsize - 2; i >= 0; i--) + udiv_qrnnd( qp[i], n1, n1, np[i], d ); + qp -= qextra_limbs; + + for( i = qextra_limbs - 1; i >= 0; i-- ) + udiv_qrnnd (qp[i], n1, n1, 0, d); + + np[0] = n1; + } + break; + + case 2: + { + mpi_size_t i; + mpi_limb_t n1, n0, n2; + mpi_limb_t d1, d0; + + np += nsize - 2; + d1 = dp[1]; + d0 = dp[0]; + n1 = np[1]; + n0 = np[0]; + + if( n1 >= d1 && (n1 > d1 || n0 >= d0) ) { + sub_ddmmss (n1, n0, n1, n0, d1, d0); + most_significant_q_limb = 1; + } + + for( i = qextra_limbs + nsize - 2 - 1; i >= 0; i-- ) { + mpi_limb_t q; + mpi_limb_t r; + + if( i >= qextra_limbs ) + np--; + else + np[0] = 0; + + if( n1 == d1 ) { + /* Q should be either 111..111 or 111..110. Need special + * treatment of this rare case as normal division would + * give overflow. */ + q = ~(mpi_limb_t)0; + + r = n0 + d1; + if( r < d1 ) { /* Carry in the addition? */ + add_ssaaaa( n1, n0, r - d0, np[0], 0, d0 ); + qp[i] = q; + continue; + } + n1 = d0 - (d0 != 0?1:0); + n0 = -d0; + } + else { + udiv_qrnnd (q, r, n1, n0, d1); + umul_ppmm (n1, n0, d0, q); + } + + n2 = np[0]; + q_test: + if( n1 > r || (n1 == r && n0 > n2) ) { + /* The estimated Q was too large. */ + q--; + sub_ddmmss (n1, n0, n1, n0, 0, d0); + r += d1; + if( r >= d1 ) /* If not carry, test Q again. */ + goto q_test; + } + + qp[i] = q; + sub_ddmmss (n1, n0, r, n2, n1, n0); + } + np[1] = n1; + np[0] = n0; + } + break; + + default: + { + mpi_size_t i; + mpi_limb_t dX, d1, n0; + + np += nsize - dsize; + dX = dp[dsize - 1]; + d1 = dp[dsize - 2]; + n0 = np[dsize - 1]; + + if( n0 >= dX ) { + if(n0 > dX || _gcry_mpih_cmp(np, dp, dsize - 1) >= 0 ) { + _gcry_mpih_sub_n(np, np, dp, dsize); + n0 = np[dsize - 1]; + most_significant_q_limb = 1; + } + } + + for( i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) { + mpi_limb_t q; + mpi_limb_t n1, n2; + mpi_limb_t cy_limb; + + if( i >= qextra_limbs ) { + np--; + n2 = np[dsize]; + } + else { + n2 = np[dsize - 1]; + MPN_COPY_DECR (np + 1, np, dsize - 1); + np[0] = 0; + } + + if( n0 == dX ) { + /* This might over-estimate q, but it's probably not worth + * the extra code here to find out. */ + q = ~(mpi_limb_t)0; + } + else { + mpi_limb_t r; + + udiv_qrnnd(q, r, n0, np[dsize - 1], dX); + umul_ppmm(n1, n0, d1, q); + + while( n1 > r || (n1 == r && n0 > np[dsize - 2])) { + q--; + r += dX; + if( r < dX ) /* I.e. "carry in previous addition?" */ + break; + n1 -= n0 < d1; + n0 -= d1; + } + } + + /* Possible optimization: We already have (q * n0) and (1 * n1) + * after the calculation of q. Taking advantage of that, we + * could make this loop make two iterations less. */ + cy_limb = _gcry_mpih_submul_1(np, dp, dsize, q); + + if( n2 != cy_limb ) { + _gcry_mpih_add_n(np, np, dp, dsize); + q--; + } + + qp[i] = q; + n0 = np[dsize - 1]; + } + } + } + + return most_significant_q_limb; +} + + +/**************** + * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB. + * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR. + * Return the single-limb remainder. + * There are no constraints on the value of the divisor. + * + * QUOT_PTR and DIVIDEND_PTR might point to the same limb. + */ + +mpi_limb_t +_gcry_mpih_divmod_1( mpi_ptr_t quot_ptr, + mpi_ptr_t dividend_ptr, mpi_size_t dividend_size, + mpi_limb_t divisor_limb) +{ + mpi_size_t i; + mpi_limb_t n1, n0, r; + int dummy; + + if( !dividend_size ) + return 0; + + /* If multiplication is much faster than division, and the + * dividend is large, pre-invert the divisor, and use + * only multiplications in the inner loop. + * + * This test should be read: + * Does it ever help to use udiv_qrnnd_preinv? + * && Does what we save compensate for the inversion overhead? + */ + if( UDIV_TIME > (2 * UMUL_TIME + 6) + && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME ) { + int normalization_steps; + + count_leading_zeros( normalization_steps, divisor_limb ); + if( normalization_steps ) { + mpi_limb_t divisor_limb_inverted; + + divisor_limb <<= normalization_steps; + + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the + * most significant bit (with weight 2**N) implicit. + */ + /* Special case for DIVISOR_LIMB == 100...000. */ + if( !(divisor_limb << 1) ) + divisor_limb_inverted = ~(mpi_limb_t)0; + else + udiv_qrnnd(divisor_limb_inverted, dummy, + -divisor_limb, 0, divisor_limb); + + n1 = dividend_ptr[dividend_size - 1]; + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps); + + /* Possible optimization: + * if (r == 0 + * && divisor_limb > ((n1 << normalization_steps) + * | (dividend_ptr[dividend_size - 2] >> ...))) + * ...one division less... + */ + for( i = dividend_size - 2; i >= 0; i--) { + n0 = dividend_ptr[i]; + UDIV_QRNND_PREINV( quot_ptr[i + 1], r, r, + ((n1 << normalization_steps) + | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))), + divisor_limb, divisor_limb_inverted); + n1 = n0; + } + UDIV_QRNND_PREINV( quot_ptr[0], r, r, + n1 << normalization_steps, + divisor_limb, divisor_limb_inverted); + return r >> normalization_steps; + } + else { + mpi_limb_t divisor_limb_inverted; + + /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The + * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the + * most significant bit (with weight 2**N) implicit. + */ + /* Special case for DIVISOR_LIMB == 100...000. */ + if( !(divisor_limb << 1) ) + divisor_limb_inverted = ~(mpi_limb_t) 0; + else + udiv_qrnnd(divisor_limb_inverted, dummy, + -divisor_limb, 0, divisor_limb); + + i = dividend_size - 1; + r = dividend_ptr[i]; + + if( r >= divisor_limb ) + r = 0; + else + quot_ptr[i--] = 0; + + for( ; i >= 0; i-- ) { + n0 = dividend_ptr[i]; + UDIV_QRNND_PREINV( quot_ptr[i], r, r, + n0, divisor_limb, divisor_limb_inverted); + } + return r; + } + } + else { + if(UDIV_NEEDS_NORMALIZATION) { + int normalization_steps; + + count_leading_zeros (normalization_steps, divisor_limb); + if( normalization_steps ) { + divisor_limb <<= normalization_steps; + + n1 = dividend_ptr[dividend_size - 1]; + r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps); + + /* Possible optimization: + * if (r == 0 + * && divisor_limb > ((n1 << normalization_steps) + * | (dividend_ptr[dividend_size - 2] >> ...))) + * ...one division less... + */ + for( i = dividend_size - 2; i >= 0; i--) { + n0 = dividend_ptr[i]; + udiv_qrnnd (quot_ptr[i + 1], r, r, + ((n1 << normalization_steps) + | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))), + divisor_limb); + n1 = n0; + } + udiv_qrnnd (quot_ptr[0], r, r, + n1 << normalization_steps, + divisor_limb); + return r >> normalization_steps; + } + } + /* No normalization needed, either because udiv_qrnnd doesn't require + * it, or because DIVISOR_LIMB is already normalized. */ + i = dividend_size - 1; + r = dividend_ptr[i]; + + if(r >= divisor_limb) + r = 0; + else + quot_ptr[i--] = 0; + + for(; i >= 0; i--) { + n0 = dividend_ptr[i]; + udiv_qrnnd( quot_ptr[i], r, r, n0, divisor_limb ); + } + return r; + } +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-lshift.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-lshift.c new file mode 100644 index 0000000..f48c12c --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-lshift.c @@ -0,0 +1,68 @@ +/* mpi-lshift.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + +/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left + * and store the USIZE least significant digits of the result at WP. + * Return the bits shifted out from the most significant digit. + * + * Argument constraints: + * 1. 0 < CNT < BITS_PER_MP_LIMB + * 2. If the result is to be written over the input, WP must be >= UP. + */ + +mpi_limb_t +_gcry_mpih_lshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, + unsigned int cnt) +{ + mpi_limb_t high_limb, low_limb; + unsigned sh_1, sh_2; + mpi_size_t i; + mpi_limb_t retval; + + sh_1 = cnt; + wp += 1; + sh_2 = BITS_PER_MPI_LIMB - sh_1; + i = usize - 1; + low_limb = up[i]; + retval = low_limb >> sh_2; + high_limb = low_limb; + while ( --i >= 0 ) + { + low_limb = up[i]; + wp[i] = (high_limb << sh_1) | (low_limb >> sh_2); + high_limb = low_limb; + } + wp[i] = high_limb << sh_1; + + return retval; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-mul.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul.c new file mode 100644 index 0000000..44b3686 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul.c @@ -0,0 +1,530 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpih-mul.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 1999, 2000, + * 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include "mpi-internal.h" +#include "longlong.h" +#include "g10lib.h" + +#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \ + do { \ + if( (size) < KARATSUBA_THRESHOLD ) \ + mul_n_basecase (prodp, up, vp, size); \ + else \ + mul_n (prodp, up, vp, size, tspace); \ + } while (0); + +#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \ + do { \ + if ((size) < KARATSUBA_THRESHOLD) \ + _gcry_mpih_sqr_n_basecase (prodp, up, size); \ + else \ + _gcry_mpih_sqr_n (prodp, up, size, tspace); \ + } while (0); + + + + +/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP), + * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are + * always stored. Return the most significant limb. + * + * Argument constraints: + * 1. PRODP != UP and PRODP != VP, i.e. the destination + * must be distinct from the multiplier and the multiplicand. + * + * + * Handle simple cases with traditional multiplication. + * + * This is the most critical code of multiplication. All multiplies rely + * on this, both small and huge. Small ones arrive here immediately. Huge + * ones arrive here as this is the base case for Karatsuba's recursive + * algorithm below. + */ + +static mpi_limb_t +mul_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, + mpi_ptr_t vp, mpi_size_t size) +{ + mpi_size_t i; + mpi_limb_t cy; + mpi_limb_t v_limb; + + /* Multiply by the first limb in V separately, as the result can be + * stored (not added) to PROD. We also avoid a loop for zeroing. */ + v_limb = vp[0]; + if( v_limb <= 1 ) { + if( v_limb == 1 ) + MPN_COPY( prodp, up, size ); + else + MPN_ZERO( prodp, size ); + cy = 0; + } + else + cy = _gcry_mpih_mul_1( prodp, up, size, v_limb ); + + prodp[size] = cy; + prodp++; + + /* For each iteration in the outer loop, multiply one limb from + * U with one limb from V, and add it to PROD. */ + for( i = 1; i < size; i++ ) { + v_limb = vp[i]; + if( v_limb <= 1 ) { + cy = 0; + if( v_limb == 1 ) + cy = _gcry_mpih_add_n(prodp, prodp, up, size); + } + else + cy = _gcry_mpih_addmul_1(prodp, up, size, v_limb); + + prodp[size] = cy; + prodp++; + } + + return cy; +} + + +static void +mul_n( mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, + mpi_size_t size, mpi_ptr_t tspace ) +{ + if( size & 1 ) { + /* The size is odd, and the code below doesn't handle that. + * Multiply the least significant (size - 1) limbs with a recursive + * call, and handle the most significant limb of S1 and S2 + * separately. + * A slightly faster way to do this would be to make the Karatsuba + * code below behave as if the size were even, and let it check for + * odd size in the end. I.e., in essence move this code to the end. + * Doing so would save us a recursive call, and potentially make the + * stack grow a lot less. + */ + mpi_size_t esize = size - 1; /* even size */ + mpi_limb_t cy_limb; + + MPN_MUL_N_RECURSE( prodp, up, vp, esize, tspace ); + cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, vp[esize] ); + prodp[esize + esize] = cy_limb; + cy_limb = _gcry_mpih_addmul_1( prodp + esize, vp, size, up[esize] ); + prodp[esize + size] = cy_limb; + } + else { + /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm. + * + * Split U in two pieces, U1 and U0, such that + * U = U0 + U1*(B**n), + * and V in V1 and V0, such that + * V = V0 + V1*(B**n). + * + * UV is then computed recursively using the identity + * + * 2n n n n + * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V + * 1 1 1 0 0 1 0 0 + * + * Where B = 2**BITS_PER_MP_LIMB. + */ + mpi_size_t hsize = size >> 1; + mpi_limb_t cy; + int negflg; + + /* Product H. ________________ ________________ + * |_____U1 x V1____||____U0 x V0_____| + * Put result in upper part of PROD and pass low part of TSPACE + * as new TSPACE. + */ + MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize, tspace); + + /* Product M. ________________ + * |_(U1-U0)(V0-V1)_| + */ + if( _gcry_mpih_cmp(up + hsize, up, hsize) >= 0 ) { + _gcry_mpih_sub_n(prodp, up + hsize, up, hsize); + negflg = 0; + } + else { + _gcry_mpih_sub_n(prodp, up, up + hsize, hsize); + negflg = 1; + } + if( _gcry_mpih_cmp(vp + hsize, vp, hsize) >= 0 ) { + _gcry_mpih_sub_n(prodp + hsize, vp + hsize, vp, hsize); + negflg ^= 1; + } + else { + _gcry_mpih_sub_n(prodp + hsize, vp, vp + hsize, hsize); + /* No change of NEGFLG. */ + } + /* Read temporary operands from low part of PROD. + * Put result in low part of TSPACE using upper part of TSPACE + * as new TSPACE. + */ + MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize, tspace + size); + + /* Add/copy product H. */ + MPN_COPY (prodp + hsize, prodp + size, hsize); + cy = _gcry_mpih_add_n( prodp + size, prodp + size, + prodp + size + hsize, hsize); + + /* Add product M (if NEGFLG M is a negative number) */ + if(negflg) + cy -= _gcry_mpih_sub_n(prodp + hsize, prodp + hsize, tspace, size); + else + cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size); + + /* Product L. ________________ ________________ + * |________________||____U0 x V0_____| + * Read temporary operands from low part of PROD. + * Put result in low part of TSPACE using upper part of TSPACE + * as new TSPACE. + */ + MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size); + + /* Add/copy Product L (twice) */ + + cy += _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace, size); + if( cy ) + _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size, hsize, cy); + + MPN_COPY(prodp, tspace, hsize); + cy = _gcry_mpih_add_n(prodp + hsize, prodp + hsize, tspace + hsize, hsize); + if( cy ) + _gcry_mpih_add_1(prodp + size, prodp + size, size, 1); + } +} + + +void +_gcry_mpih_sqr_n_basecase( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size ) +{ + mpi_size_t i; + mpi_limb_t cy_limb; + mpi_limb_t v_limb; + + /* Multiply by the first limb in V separately, as the result can be + * stored (not added) to PROD. We also avoid a loop for zeroing. */ + v_limb = up[0]; + if( v_limb <= 1 ) { + if( v_limb == 1 ) + MPN_COPY( prodp, up, size ); + else + MPN_ZERO(prodp, size); + cy_limb = 0; + } + else + cy_limb = _gcry_mpih_mul_1( prodp, up, size, v_limb ); + + prodp[size] = cy_limb; + prodp++; + + /* For each iteration in the outer loop, multiply one limb from + * U with one limb from V, and add it to PROD. */ + for( i=1; i < size; i++) { + v_limb = up[i]; + if( v_limb <= 1 ) { + cy_limb = 0; + if( v_limb == 1 ) + cy_limb = _gcry_mpih_add_n(prodp, prodp, up, size); + } + else + cy_limb = _gcry_mpih_addmul_1(prodp, up, size, v_limb); + + prodp[size] = cy_limb; + prodp++; + } +} + + +void +_gcry_mpih_sqr_n( mpi_ptr_t prodp, + mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace) +{ + if( size & 1 ) { + /* The size is odd, and the code below doesn't handle that. + * Multiply the least significant (size - 1) limbs with a recursive + * call, and handle the most significant limb of S1 and S2 + * separately. + * A slightly faster way to do this would be to make the Karatsuba + * code below behave as if the size were even, and let it check for + * odd size in the end. I.e., in essence move this code to the end. + * Doing so would save us a recursive call, and potentially make the + * stack grow a lot less. + */ + mpi_size_t esize = size - 1; /* even size */ + mpi_limb_t cy_limb; + + MPN_SQR_N_RECURSE( prodp, up, esize, tspace ); + cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, esize, up[esize] ); + prodp[esize + esize] = cy_limb; + cy_limb = _gcry_mpih_addmul_1( prodp + esize, up, size, up[esize] ); + + prodp[esize + size] = cy_limb; + } + else { + mpi_size_t hsize = size >> 1; + mpi_limb_t cy; + + /* Product H. ________________ ________________ + * |_____U1 x U1____||____U0 x U0_____| + * Put result in upper part of PROD and pass low part of TSPACE + * as new TSPACE. + */ + MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace); + + /* Product M. ________________ + * |_(U1-U0)(U0-U1)_| + */ + if( _gcry_mpih_cmp( up + hsize, up, hsize) >= 0 ) + _gcry_mpih_sub_n( prodp, up + hsize, up, hsize); + else + _gcry_mpih_sub_n (prodp, up, up + hsize, hsize); + + /* Read temporary operands from low part of PROD. + * Put result in low part of TSPACE using upper part of TSPACE + * as new TSPACE. */ + MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size); + + /* Add/copy product H */ + MPN_COPY(prodp + hsize, prodp + size, hsize); + cy = _gcry_mpih_add_n(prodp + size, prodp + size, + prodp + size + hsize, hsize); + + /* Add product M (if NEGFLG M is a negative number). */ + cy -= _gcry_mpih_sub_n (prodp + hsize, prodp + hsize, tspace, size); + + /* Product L. ________________ ________________ + * |________________||____U0 x U0_____| + * Read temporary operands from low part of PROD. + * Put result in low part of TSPACE using upper part of TSPACE + * as new TSPACE. */ + MPN_SQR_N_RECURSE (tspace, up, hsize, tspace + size); + + /* Add/copy Product L (twice). */ + cy += _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace, size); + if( cy ) + _gcry_mpih_add_1(prodp + hsize + size, prodp + hsize + size, + hsize, cy); + + MPN_COPY(prodp, tspace, hsize); + cy = _gcry_mpih_add_n (prodp + hsize, prodp + hsize, tspace + hsize, hsize); + if( cy ) + _gcry_mpih_add_1 (prodp + size, prodp + size, size, 1); + } +} + + +/* This should be made into an inline function in gmp.h. */ +void +_gcry_mpih_mul_n( mpi_ptr_t prodp, + mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size) +{ + int secure; + + if( up == vp ) { + if( size < KARATSUBA_THRESHOLD ) + _gcry_mpih_sqr_n_basecase( prodp, up, size ); + else { + mpi_ptr_t tspace; + secure = gcry_is_secure( up ); + tspace = mpi_alloc_limb_space( 2 * size, secure ); + _gcry_mpih_sqr_n( prodp, up, size, tspace ); + _gcry_mpi_free_limb_space (tspace, 2 * size ); + } + } + else { + if( size < KARATSUBA_THRESHOLD ) + mul_n_basecase( prodp, up, vp, size ); + else { + mpi_ptr_t tspace; + secure = gcry_is_secure( up ) || gcry_is_secure( vp ); + tspace = mpi_alloc_limb_space( 2 * size, secure ); + mul_n (prodp, up, vp, size, tspace); + _gcry_mpi_free_limb_space (tspace, 2 * size ); + } + } +} + + + +void +_gcry_mpih_mul_karatsuba_case( mpi_ptr_t prodp, + mpi_ptr_t up, mpi_size_t usize, + mpi_ptr_t vp, mpi_size_t vsize, + struct karatsuba_ctx *ctx ) +{ + mpi_limb_t cy; + + if( !ctx->tspace || ctx->tspace_size < vsize ) { + if( ctx->tspace ) + _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs ); + ctx->tspace_nlimbs = 2 * vsize; + ctx->tspace = mpi_alloc_limb_space( 2 * vsize, + (gcry_is_secure( up ) + || gcry_is_secure( vp )) ); + ctx->tspace_size = vsize; + } + + MPN_MUL_N_RECURSE( prodp, up, vp, vsize, ctx->tspace ); + + prodp += vsize; + up += vsize; + usize -= vsize; + if( usize >= vsize ) { + if( !ctx->tp || ctx->tp_size < vsize ) { + if( ctx->tp ) + _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs ); + ctx->tp_nlimbs = 2 * vsize; + ctx->tp = mpi_alloc_limb_space( 2 * vsize, gcry_is_secure( up ) + || gcry_is_secure( vp ) ); + ctx->tp_size = vsize; + } + + do { + MPN_MUL_N_RECURSE( ctx->tp, up, vp, vsize, ctx->tspace ); + cy = _gcry_mpih_add_n( prodp, prodp, ctx->tp, vsize ); + _gcry_mpih_add_1( prodp + vsize, ctx->tp + vsize, vsize, cy ); + prodp += vsize; + up += vsize; + usize -= vsize; + } while( usize >= vsize ); + } + + if( usize ) { + if( usize < KARATSUBA_THRESHOLD ) { + _gcry_mpih_mul( ctx->tspace, vp, vsize, up, usize ); + } + else { + if( !ctx->next ) { + ctx->next = gcry_xcalloc( 1, sizeof *ctx ); + } + _gcry_mpih_mul_karatsuba_case( ctx->tspace, + vp, vsize, + up, usize, + ctx->next ); + } + + cy = _gcry_mpih_add_n( prodp, prodp, ctx->tspace, vsize); + _gcry_mpih_add_1( prodp + vsize, ctx->tspace + vsize, usize, cy ); + } +} + + +void +_gcry_mpih_release_karatsuba_ctx( struct karatsuba_ctx *ctx ) +{ + struct karatsuba_ctx *ctx2; + + if( ctx->tp ) + _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs ); + if( ctx->tspace ) + _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs ); + for( ctx=ctx->next; ctx; ctx = ctx2 ) { + ctx2 = ctx->next; + if( ctx->tp ) + _gcry_mpi_free_limb_space( ctx->tp, ctx->tp_nlimbs ); + if( ctx->tspace ) + _gcry_mpi_free_limb_space( ctx->tspace, ctx->tspace_nlimbs ); + gcry_free( ctx ); + } +} + +/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs) + * and v (pointed to by VP, with VSIZE limbs), and store the result at + * PRODP. USIZE + VSIZE limbs are always stored, but if the input + * operands are normalized. Return the most significant limb of the + * result. + * + * NOTE: The space pointed to by PRODP is overwritten before finished + * with U and V, so overlap is an error. + * + * Argument constraints: + * 1. USIZE >= VSIZE. + * 2. PRODP != UP and PRODP != VP, i.e. the destination + * must be distinct from the multiplier and the multiplicand. + */ + +mpi_limb_t +_gcry_mpih_mul( mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, + mpi_ptr_t vp, mpi_size_t vsize) +{ + mpi_ptr_t prod_endp = prodp + usize + vsize - 1; + mpi_limb_t cy; + struct karatsuba_ctx ctx; + + if( vsize < KARATSUBA_THRESHOLD ) { + mpi_size_t i; + mpi_limb_t v_limb; + + if( !vsize ) + return 0; + + /* Multiply by the first limb in V separately, as the result can be + * stored (not added) to PROD. We also avoid a loop for zeroing. */ + v_limb = vp[0]; + if( v_limb <= 1 ) { + if( v_limb == 1 ) + MPN_COPY( prodp, up, usize ); + else + MPN_ZERO( prodp, usize ); + cy = 0; + } + else + cy = _gcry_mpih_mul_1( prodp, up, usize, v_limb ); + + prodp[usize] = cy; + prodp++; + + /* For each iteration in the outer loop, multiply one limb from + * U with one limb from V, and add it to PROD. */ + for( i = 1; i < vsize; i++ ) { + v_limb = vp[i]; + if( v_limb <= 1 ) { + cy = 0; + if( v_limb == 1 ) + cy = _gcry_mpih_add_n(prodp, prodp, up, usize); + } + else + cy = _gcry_mpih_addmul_1(prodp, up, usize, v_limb); + + prodp[usize] = cy; + prodp++; + } + + return cy; + } + + memset( &ctx, 0, sizeof ctx ); + _gcry_mpih_mul_karatsuba_case( prodp, up, usize, vp, vsize, &ctx ); + _gcry_mpih_release_karatsuba_ctx( &ctx ); + return *prod_endp; +} diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-mul1.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul1.c new file mode 100644 index 0000000..0e8197d --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul1.c @@ -0,0 +1,62 @@ +/* mpihelp-mul_1.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_mul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, + mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + + /* The loop counter and index J goes from -S1_SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + res_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb ); + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + res_ptr[j] = prod_low; + } + while( ++j ); + + return cy_limb; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-mul2.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul2.c new file mode 100644 index 0000000..3b75496 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul2.c @@ -0,0 +1,68 @@ +/* mpih-mul2.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +mpi_limb_t +_gcry_mpih_addmul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + mpi_limb_t x; + + /* The loop counter and index J goes from -SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + res_ptr -= j; + s1_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb ); + + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + + x = res_ptr[j]; + prod_low = x + prod_low; + cy_limb += prod_low < x?1:0; + res_ptr[j] = prod_low; + } + while ( ++j ); + + return cy_limb; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-mul3.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul3.c new file mode 100644 index 0000000..5e84f94 --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-mul3.c @@ -0,0 +1,68 @@ +/* mpih-mul3.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + + +mpi_limb_t +_gcry_mpih_submul_1( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_size_t s1_size, mpi_limb_t s2_limb) +{ + mpi_limb_t cy_limb; + mpi_size_t j; + mpi_limb_t prod_high, prod_low; + mpi_limb_t x; + + /* The loop counter and index J goes from -SIZE to -1. This way + * the loop becomes faster. */ + j = -s1_size; + res_ptr -= j; + s1_ptr -= j; + + cy_limb = 0; + do + { + umul_ppmm( prod_high, prod_low, s1_ptr[j], s2_limb); + + prod_low += cy_limb; + cy_limb = (prod_low < cy_limb?1:0) + prod_high; + + x = res_ptr[j]; + prod_low = x - prod_low; + cy_limb += prod_low > x?1:0; + res_ptr[j] = prod_low; + } + while( ++j ); + + return cy_limb; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-rshift.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-rshift.c new file mode 100644 index 0000000..e40794f --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-rshift.c @@ -0,0 +1,67 @@ +/* mpih-rshift.c - MPI helper functions + * Copyright (C) 1994, 1996, 1998, 1999, + * 2000, 2001, 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" + + +/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right + * and store the USIZE least significant limbs of the result at WP. + * The bits shifted out to the right are returned. + * + * Argument constraints: + * 1. 0 < CNT < BITS_PER_MP_LIMB + * 2. If the result is to be written over the input, WP must be <= UP. + */ + +mpi_limb_t +_gcry_mpih_rshift( mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt) +{ + mpi_limb_t high_limb, low_limb; + unsigned sh_1, sh_2; + mpi_size_t i; + mpi_limb_t retval; + + sh_1 = cnt; + wp -= 1; + sh_2 = BITS_PER_MPI_LIMB - sh_1; + high_limb = up[0]; + retval = high_limb << sh_2; + low_limb = high_limb; + for (i=1; i < usize; i++) + { + high_limb = up[i]; + wp[i] = (low_limb >> sh_1) | (high_limb << sh_2); + low_limb = high_limb; + } + wp[i] = low_limb >> sh_1; + + return retval; +} + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpih-sub1.c b/grub-core/lib/libgcrypt-grub/mpi/mpih-sub1.c new file mode 100644 index 0000000..e88821b --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpih-sub1.c @@ -0,0 +1,66 @@ +/* mpihelp-add_2.c - MPI helper functions + * Copyright (C) 1994, 1996, 1997, 1998, 2001, + * 2002 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + * + * Note: This code is heavily based on the GNU MP Library. + * Actually it's the same code with only minor changes in the + * way the data is stored; this is to support the abstraction + * of an optional secure memory allocation which may be used + * to avoid revealing of sensitive data due to paging etc. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include "mpi-internal.h" +#include "longlong.h" + +mpi_limb_t +_gcry_mpih_sub_n( mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, + mpi_ptr_t s2_ptr, mpi_size_t size) +{ + mpi_limb_t x, y, cy; + mpi_size_t j; + + /* The loop counter and index J goes from -SIZE to -1. This way + the loop becomes faster. */ + j = -size; + + /* Offset the base pointers to compensate for the negative indices. */ + s1_ptr -= j; + s2_ptr -= j; + res_ptr -= j; + + cy = 0; + do + { + y = s2_ptr[j]; + x = s1_ptr[j]; + y += cy; /* add previous carry to subtrahend */ + cy = y < cy; /* get out carry from that addition */ + y = x - y; /* main subtract */ + cy += y > x; /* get out carry from the subtract, combine */ + res_ptr[j] = y; + } + while( ++j ); + + return cy; +} + + diff --git a/grub-core/lib/libgcrypt-grub/mpi/mpiutil.c b/grub-core/lib/libgcrypt-grub/mpi/mpiutil.c new file mode 100644 index 0000000..3fec8db --- /dev/null +++ b/grub-core/lib/libgcrypt-grub/mpi/mpiutil.c @@ -0,0 +1,435 @@ +/* This file was automatically imported with + import_gcry.py. Please don't modify it */ +/* mpiutil.ac - Utility functions for MPI + * Copyright (C) 1998, 2000, 2001, 2002, 2003, + * 2007 Free Software Foundation, Inc. + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <config.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include "g10lib.h" +#include "mpi-internal.h" + + + + +/**************** + * Note: It was a bad idea to use the number of limbs to allocate + * because on a alpha the limbs are large but we normally need + * integers of n bits - So we should change this to bits (or bytes). + * + * But mpi_alloc is used in a lot of places :-(. New code + * should use mpi_new. + */ +gcry_mpi_t +_gcry_mpi_alloc( unsigned nlimbs ) +{ + gcry_mpi_t a; + + a = gcry_xmalloc( sizeof *a ); + a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 0 ) : NULL; + a->alloced = nlimbs; + a->nlimbs = 0; + a->sign = 0; + a->flags = 0; + return a; +} + +void +_gcry_mpi_m_check( gcry_mpi_t a ) +{ + _gcry_check_heap(a); + _gcry_check_heap(a->d); +} + +gcry_mpi_t +_gcry_mpi_alloc_secure( unsigned nlimbs ) +{ + gcry_mpi_t a; + + a = gcry_xmalloc( sizeof *a ); + a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 1 ) : NULL; + a->alloced = nlimbs; + a->flags = 1; + a->nlimbs = 0; + a->sign = 0; + return a; +} + + + +mpi_ptr_t +_gcry_mpi_alloc_limb_space( unsigned int nlimbs, int secure ) +{ + mpi_ptr_t p; + size_t len; + + len = (nlimbs ? nlimbs : 1) * sizeof (mpi_limb_t); + p = secure ? gcry_xmalloc_secure (len) : gcry_xmalloc (len); + if (! nlimbs) + *p = 0; + + return p; +} + +void +_gcry_mpi_free_limb_space( mpi_ptr_t a, unsigned int nlimbs) +{ + if (a) + { + size_t len = nlimbs * sizeof(mpi_limb_t); + + /* If we have information on the number of allocated limbs, we + better wipe that space out. This is a failsafe feature if + secure memory has been disabled or was not properly + implemented in user provided allocation functions. */ + if (len) + wipememory (a, len); + gcry_free(a); + } +} + + +void +_gcry_mpi_assign_limb_space( gcry_mpi_t a, mpi_ptr_t ap, unsigned int nlimbs ) +{ + _gcry_mpi_free_limb_space (a->d, a->alloced); + a->d = ap; + a->alloced = nlimbs; +} + + + +/**************** + * Resize the array of A to NLIMBS. The additional space is cleared + * (set to 0). + */ +void +_gcry_mpi_resize (gcry_mpi_t a, unsigned nlimbs) +{ + size_t i; + + if (nlimbs <= a->alloced) + { + /* We only need to clear the new space (this is a nop if the + limb space is already of the correct size. */ + for (i=a->nlimbs; i < a->alloced; i++) + a->d[i] = 0; + return; + } + + /* Actually resize the limb space. */ + if (a->d) + { + a->d = gcry_xrealloc (a->d, nlimbs * sizeof (mpi_limb_t)); + for (i=a->alloced; i < nlimbs; i++) + a->d[i] = 0; + } + else + { + if (a->flags & 1) + /* Secure memory is wanted. */ + a->d = gcry_xcalloc_secure (nlimbs , sizeof (mpi_limb_t)); + else + /* Standard memory. */ + a->d = gcry_xcalloc (nlimbs , sizeof (mpi_limb_t)); + } + a->alloced = nlimbs; +} + +void +_gcry_mpi_clear( gcry_mpi_t a ) +{ + a->nlimbs = 0; + a->flags = 0; +} + + +void +_gcry_mpi_free( gcry_mpi_t a ) +{ + if (!a ) + return; + if ((a->flags & 4)) + gcry_free( a->d ); + else + { + _gcry_mpi_free_limb_space(a->d, a->alloced); + } + if ((a->flags & ~7)) + log_bug("invalid flag value in mpi\n"); + gcry_free(a); +} + +static void +mpi_set_secure( gcry_mpi_t a ) +{ + mpi_ptr_t ap, bp; + + if ( (a->flags & 1) ) + return; + a->flags |= 1; + ap = a->d; + if (!a->nlimbs) + { + gcry_assert (!ap); + return; + } + bp = mpi_alloc_limb_space (a->nlimbs, 1); + MPN_COPY( bp, ap, a->nlimbs ); + a->d = bp; + _gcry_mpi_free_limb_space (ap, a->alloced); +} + + +gcry_mpi_t +gcry_mpi_set_opaque( gcry_mpi_t a, void *p, unsigned int nbits ) +{ + if (!a) + a = mpi_alloc(0); + + if( a->flags & 4 ) + gcry_free( a->d ); + else + _gcry_mpi_free_limb_space (a->d, a->alloced); + + a->d = p; + a->alloced = 0; + a->nlimbs = 0; + a->sign = nbits; + a->flags = 4; + return a; +} + + +void * +gcry_mpi_get_opaque( gcry_mpi_t a, unsigned int *nbits ) +{ + if( !(a->flags & 4) ) + log_bug("mpi_get_opaque on normal mpi\n"); + if( nbits ) + *nbits = a->sign; + return a->d; +} + + +/**************** + * Note: This copy function should not interpret the MPI + * but copy it transparently. + */ +gcry_mpi_t +gcry_mpi_copy( gcry_mpi_t a ) +{ + int i; + gcry_mpi_t b; + + if( a && (a->flags & 4) ) { + void *p = gcry_is_secure(a->d)? gcry_xmalloc_secure( (a->sign+7)/8 ) + : gcry_xmalloc( (a->sign+7)/8 ); + memcpy( p, a->d, (a->sign+7)/8 ); + b = gcry_mpi_set_opaque( NULL, p, a->sign ); + } + else if( a ) { + b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs ) + : mpi_alloc( a->nlimbs ); + b->nlimbs = a->nlimbs; + b->sign = a->sign; + b->flags = a->flags; + for(i=0; i < b->nlimbs; i++ ) + b->d[i] = a->d[i]; + } + else + b = NULL; + return b; +} + + +/**************** + * This function allocates an MPI which is optimized to hold + * a value as large as the one given in the argument and allocates it + * with the same flags as A. + */ +gcry_mpi_t +_gcry_mpi_alloc_like( gcry_mpi_t a ) +{ + gcry_mpi_t b; + + if( a && (a->flags & 4) ) { + int n = (a->sign+7)/8; + void *p = gcry_is_secure(a->d)? gcry_malloc_secure( n ) + : gcry_malloc( n ); + memcpy( p, a->d, n ); + b = gcry_mpi_set_opaque( NULL, p, a->sign ); + } + else if( a ) { + b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs ) + : mpi_alloc( a->nlimbs ); + b->nlimbs = 0; + b->sign = 0; + b->flags = a->flags; + } + else + b = NULL; + return b; +} + + +gcry_mpi_t +gcry_mpi_set( gcry_mpi_t w, gcry_mpi_t u) +{ + mpi_ptr_t wp, up; + mpi_size_t usize = u->nlimbs; + int usign = u->sign; + + if (!w) + w = _gcry_mpi_alloc( mpi_get_nlimbs(u) ); + RESIZE_IF_NEEDED(w, usize); + wp = w->d; + up = u->d; + MPN_COPY( wp, up, usize ); + w->nlimbs = usize; + w->flags = u->flags; + w->sign = usign; + return w; +} + + +gcry_mpi_t +gcry_mpi_set_ui( gcry_mpi_t w, unsigned long u) +{ + if (!w) + w = _gcry_mpi_alloc (1); + /* FIXME: If U is 0 we have no need to resize and thus possible + allocating the the limbs. */ + RESIZE_IF_NEEDED(w, 1); + w->d[0] = u; + w->nlimbs = u? 1:0; + w->sign = 0; + w->flags = 0; + return w; +} + +gcry_err_code_t +_gcry_mpi_get_ui (gcry_mpi_t w, unsigned long *u) +{ + gcry_err_code_t err = GPG_ERR_NO_ERROR; + unsigned long x = 0; + + if (w->nlimbs > 1) + err = GPG_ERR_TOO_LARGE; + else if (w->nlimbs == 1) + x = w->d[0]; + else + x = 0; + + if (! err) + *u = x; + + return err; +} + +gcry_error_t +gcry_mpi_get_ui (gcry_mpi_t w, unsigned long *u) +{ + gcry_err_code_t err = GPG_ERR_NO_ERROR; + + err = _gcry_mpi_get_ui (w, u); + + return gcry_error (err); +} + +gcry_mpi_t +_gcry_mpi_alloc_set_ui( unsigned long u) +{ + gcry_mpi_t w = mpi_alloc(1); + w->d[0] = u; + w->nlimbs = u? 1:0; + w->sign = 0; + return w; +} + +void +gcry_mpi_swap( gcry_mpi_t a, gcry_mpi_t b) +{ + struct gcry_mpi tmp; + + tmp = *a; *a = *b; *b = tmp; +} + + +gcry_mpi_t +gcry_mpi_new( unsigned int nbits ) +{ + return _gcry_mpi_alloc ( (nbits+BITS_PER_MPI_LIMB-1) + / BITS_PER_MPI_LIMB ); +} + + +gcry_mpi_t +gcry_mpi_snew( unsigned int nbits ) +{ + return _gcry_mpi_alloc_secure ( (nbits+BITS_PER_MPI_LIMB-1) + / BITS_PER_MPI_LIMB ); +} + +void +gcry_mpi_release( gcry_mpi_t a ) +{ + _gcry_mpi_free( a ); +} + + + +void +gcry_mpi_set_flag( gcry_mpi_t a, enum gcry_mpi_flag flag ) +{ + switch( flag ) { + case GCRYMPI_FLAG_SECURE: mpi_set_secure(a); break; + case GCRYMPI_FLAG_OPAQUE: + default: log_bug("invalid flag value\n"); + } +} + +void +gcry_mpi_clear_flag( gcry_mpi_t a, enum gcry_mpi_flag flag ) +{ + (void)a; /* Not yet used. */ + + switch (flag) + { + case GCRYMPI_FLAG_SECURE: + case GCRYMPI_FLAG_OPAQUE: + default: log_bug("invalid flag value\n"); + } +} + +int +gcry_mpi_get_flag( gcry_mpi_t a, enum gcry_mpi_flag flag ) +{ + switch (flag) + { + case GCRYMPI_FLAG_SECURE: return (a->flags & 1); + case GCRYMPI_FLAG_OPAQUE: return (a->flags & 4); + default: log_bug("invalid flag value\n"); + } + /*NOTREACHED*/ + return 0; +} |