2011-04-10 19:42:50 -04:00
|
|
|
/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
|
|
|
|
|
|
|
|
Copyright 1991, 1992, 1993, 1994, 1996, 1997, 1999, 2000, 2001, 2002, 2003,
|
|
|
|
2004, 2005 Free Software Foundation, Inc.
|
|
|
|
|
|
|
|
This file is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2.1 of the License, or (at your
|
|
|
|
option) any later version.
|
|
|
|
|
|
|
|
This file is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
|
|
|
License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
|
|
|
along with this file; see the file COPYING.LIB. If not, write to
|
|
|
|
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
MA 02110-1301, USA. */
|
|
|
|
|
2011-04-27 20:27:14 -04:00
|
|
|
#if defined (__GNUC__)
|
|
|
|
|
|
|
|
#define __CLOBBER_CC : "cc"
|
|
|
|
#define __AND_CLOBBER_CC , "cc"
|
|
|
|
|
2011-04-10 19:06:16 -04:00
|
|
|
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
|
|
|
|
__asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
|
|
|
|
: "=r" (sh), "=&r" (sl) \
|
|
|
|
: "rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl) \
|
|
|
|
__CLOBBER_CC)
|
|
|
|
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
|
|
|
|
__asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
|
|
|
|
: "=r" (sh), "=&r" (sl) \
|
|
|
|
: "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl) \
|
|
|
|
__CLOBBER_CC)
|
|
|
|
/* FIXME: When gcc -mcpu=v9 is used on solaris, gcc/config/sol2-sld-64.h
|
|
|
|
doesn't define anything to indicate that to us, it only sets __sparcv8. */
|
|
|
|
#if defined (__sparc_v9__) || defined (__sparcv9)
|
|
|
|
/* Perhaps we should use floating-point operations here? */
|
|
|
|
#if 0
|
|
|
|
/* Triggers a bug making mpz/tests/t-gcd.c fail.
|
|
|
|
Perhaps we simply need explicitly zero-extend the inputs? */
|
|
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
|
|
__asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" : \
|
|
|
|
"=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
|
|
|
|
#else
|
|
|
|
/* Use v8 umul until above bug is fixed. */
|
|
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
|
|
__asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
|
|
|
|
#endif
|
|
|
|
/* Use a plain v8 divide for v9. */
|
|
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
|
|
do { \
|
|
|
|
USItype __q; \
|
|
|
|
__asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
|
|
|
|
: "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
|
|
|
|
(r) = (n0) - __q * (d); \
|
|
|
|
(q) = __q; \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#if defined (__sparc_v8__) /* gcc normal */ \
|
2011-08-30 18:06:14 -04:00
|
|
|
|| defined (__sparcv8) /* gcc solaris */
|
2011-04-10 19:06:16 -04:00
|
|
|
/* Don't match immediate range because, 1) it is not often useful,
|
|
|
|
2) the 'I' flag thinks of the range as a 13 bit signed interval,
|
|
|
|
while we want to match a 13 bit interval, sign extended to 32 bits,
|
|
|
|
but INTERPRETED AS UNSIGNED. */
|
|
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
|
|
__asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
|
|
|
|
|
|
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
|
|
do { \
|
|
|
|
USItype __q; \
|
|
|
|
__asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
|
|
|
|
: "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
|
|
|
|
(r) = (n0) - __q * (d); \
|
|
|
|
(q) = __q; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else /* ! __sparc_v8__ */
|
|
|
|
#if defined (__sparclite__)
|
|
|
|
/* This has hardware multiply but not divide. It also has two additional
|
|
|
|
instructions scan (ffs from high bit) and divscc. */
|
|
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
|
|
__asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
|
|
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
|
|
__asm__ ("! Inlined udiv_qrnnd\n" \
|
|
|
|
" wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
|
|
|
|
" tst %%g0\n" \
|
|
|
|
" divscc %3,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%%g1\n" \
|
|
|
|
" divscc %%g1,%4,%0\n" \
|
|
|
|
" rd %%y,%1\n" \
|
|
|
|
" bl,a 1f\n" \
|
|
|
|
" add %1,%4,%1\n" \
|
|
|
|
"1: ! End of inline udiv_qrnnd" \
|
|
|
|
: "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d) \
|
|
|
|
: "%g1" __AND_CLOBBER_CC)
|
|
|
|
#define count_leading_zeros(count, x) \
|
|
|
|
__asm__ ("scan %1,1,%0" : "=r" (count) : "r" (x))
|
|
|
|
/* Early sparclites return 63 for an argument of 0, but they warn that future
|
|
|
|
implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
|
|
|
|
undefined. */
|
|
|
|
#endif /* __sparclite__ */
|
|
|
|
#endif /* __sparc_v8__ */
|
|
|
|
#endif /* __sparc_v9__ */
|
|
|
|
/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
|
|
|
|
#ifndef umul_ppmm
|
|
|
|
#define umul_ppmm(w1, w0, u, v) \
|
|
|
|
__asm__ ("! Inlined umul_ppmm\n" \
|
|
|
|
" wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
|
|
|
|
" sra %3,31,%%g2 ! Don't move this insn\n" \
|
|
|
|
" and %2,%%g2,%%g2 ! Don't move this insn\n" \
|
|
|
|
" andcc %%g0,0,%%g1 ! Don't move this insn\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,%3,%%g1\n" \
|
|
|
|
" mulscc %%g1,0,%%g1\n" \
|
|
|
|
" add %%g1,%%g2,%0\n" \
|
|
|
|
" rd %%y,%1" \
|
|
|
|
: "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v) \
|
|
|
|
: "%g1", "%g2" __AND_CLOBBER_CC)
|
|
|
|
#endif
|
|
|
|
#ifndef udiv_qrnnd
|
|
|
|
#ifndef LONGLONG_STANDALONE
|
|
|
|
#define udiv_qrnnd(q, r, n1, n0, d) \
|
|
|
|
do { UWtype __r; \
|
|
|
|
(q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
|
|
|
|
(r) = __r; \
|
|
|
|
} while (0)
|
|
|
|
extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
|
|
|
|
#endif /* LONGLONG_STANDALONE */
|
|
|
|
#endif /* udiv_qrnnd */
|
2011-04-27 20:27:14 -04:00
|
|
|
|
|
|
|
#endif
|