home *** CD-ROM | disk | FTP | other *** search
- /* Superoptimizer definitions.
-
- Copyright (C) 1991, 1992 Free Software Foundation, Inc.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2, or (at your option) any
- later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; see the file COPYING. If not, write to the Free
- Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
-
- #if !(defined(SPARC) || defined(RS6000) || defined(M88000) \
- || defined(AM29K) || defined(MC68000) || defined(MC68020) \
- || defined(I386) || defined(PYR) || defined(ALPHA))
- /* If no target instruction set is defined, use host instruction set. */
- #define SPARC (defined(sparc) || defined(__sparc__))
- #define RS6000 (defined(rs6000) || defined(_IBMR2))
- #define M88000 (defined(m88000) || defined(__m88000__))
- #define AM29K (defined(_AM29K) || defined(_AM29000))
- #define MC68020 (defined(m68020) || defined(mc68020))
- #define MC68000 (defined(m68000) || defined(mc68000))
- #define I386 (defined(i386) || defined(i80386))
- #define PYR (defined(pyr) || defined(__pyr__))
- #define ALPHA defined(__alpha)
- #endif
-
- #define M68000 (MC68000 || MC68020)
-
- #if !(SPARC || RS6000 || M88000 || AM29K || M68000 || I386 || PYR || ALPHA)
- You have to choose target CPU type (e.g. -DSPARC).
- #endif
-
- #if MC68000
- #define SHIFT_COST(CNT) ((8+2*(CNT)) / 5) /* internal cost */
- #else
- #define SHIFT_COST(CNT) 1
- #endif
-
- #ifndef USE_ASM
- #define NO_ASM 1
- #endif
-
- #include "longlong.h"
-
- #if ALPHA
- #define BITS_PER_WORD 64
- #else /* ! ALPHA */
- #define BITS_PER_WORD 32
- #endif
-
- #if BITS_PER_WORD == 64
- #ifdef __GNUC__
- typedef unsigned long long int unsigned_word;
- typedef signed long long int signed_word;
- typedef unsigned_word word;
- #elif __alpha /* Native compiler on alpha has 64 bit longs. */
- typedef unsigned long int unsigned_word;
- typedef signed long int signed_word;
- typedef unsigned_word word;
- #else /* Not on alpha, not GCC. Don't have 64 bit type. */
- #error Do not know how to perform 64 bit arithmetic with this compiler.
- #endif
- #else
- typedef unsigned int unsigned_word;
- typedef signed int signed_word;
- typedef unsigned_word word;
- #endif
-
-
- #define TRUNC_CNT(cnt) ((cnt) & (BITS_PER_WORD - 1))
-
- #if defined(sparc) || defined(__GNUC__)
- #define alloca __builtin_alloca
- #endif
-
- #if !defined(__GNUC__) || !defined(__OPTIMIZE__)
- #define inline /* Empty */
- #endif
-
- /* The IMMEDIATE_* macros are for printing assembler. NOT for sequence
- generating or analyze. */
- #define IMMEDIATE_P(op) (op >= 0x20 - 3)
- static const word __immediate_val[] =
- {
- (word) 1 << (BITS_PER_WORD - 1),
- ((word) 1 << (BITS_PER_WORD - 1)) - 1,
- };
- #define IMMEDIATE_VAL(op) \
- ((op) >= 0x20 - 1 ? op - 0x20 : __immediate_val[0x20 - 2 - (op)])
-
- /* Handle immediates by putting all handled values in the VALUE array at
- appropriate indices, and then insert these indices in the code???
- Interesting constants are probably 0, 1, -1, 0x80000000, and
- 0x7FFFFFFF. */
-
- #define CNST_0x80000000 (0x20 - 2)
- #define CNST_0x7FFFFFFF (0x20 - 3)
-
- #define VALUE_MIN_SIGNED 0x80000000
- #define VALUE_MAX_SIGNED 0x7fffffff
-
- #define CNST(n) (0x20 + n)
- #define VALUE(n) n
-
- typedef enum
- {
- #undef DEF_INSN
- #define DEF_INSN(SYM,CLASS,NAME) SYM,
- #include "insn.def"
- } opcode_t;
-
- #define GET_INSN_CLASS(OP) (insn_class[OP])
- #define GET_INSN_NAME(OP) (insn_name[OP])
-
- #define UNARY_OPERATION(insn) (GET_INSN_CLASS (insn.opcode) == '1')
-
- typedef struct
- {
- opcode_t opcode:8;
- unsigned int s1:8;
- unsigned int s2:8;
- unsigned int d:8;
- } insn_t;
-
- #if __GNUC__ < 2
- #define __CLOBBER_CC
- #define __AND_CLOBBER_CC
- #else /* __GNUC__ >= 2 */
- #define __CLOBBER_CC : "cc"
- #define __AND_CLOBBER_CC , "cc"
- #endif /* __GNUC__ < 2 */
-
- /* PERFORM_* for all instructions the search uses. These macros are
- used both in the search phase and in the test phase. */
-
- #if defined(__GNUC__) && defined(USE_ASM)
- /*** Define machine-dependent PERFORM_* here to improve synthesis speed ***/
-
- #if sparc
- #define PERFORM_ADD_CIO(d, co, r1, r2, ci) \
- asm ("subcc %%g0,%4,%%g0 ! set cy if CI != 0
- addxcc %2,%3,%0 ! add R1 and R2
- addx %%g0,%%g0,%1 ! set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "%r" (r1), "rI" (r2), "rI" (ci) \
- __CLOBBER_CC)
- #define PERFORM_ADD_CO(d, co, r1, r2, ci) \
- asm ("addcc %2,%3,%0 ! add R1 and R2
- addx %%g0,%%g0,%1 ! set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "%r" (r1), "rI" (r2) \
- __CLOBBER_CC)
- #define PERFORM_SUB_CIO(d, co, r1, r2, ci) \
- asm ("subcc %%g0,%4,%%g0 ! set cy if CI != 0
- subxcc %2,%3,%0 ! subtract R2 from R1
- addx %%g0,%%g0,%1 ! set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "r" (r1), "rI" (r2), "rI" (ci) \
- __CLOBBER_CC)
- #define PERFORM_SUB_CO(d, co, r1, r2, ci) \
- asm ("subcc %2,%3,%0 ! subtract R2 from R1
- addx %%g0,%%g0,%1 ! set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "r" (r1), "rI" (r2) \
- __CLOBBER_CC)
- #define PERFORM_ADC_CIO(d, co, r1, r2, ci) \
- asm ("subcc %4,1,%%g0 ! cy = (CI == 0)
- subxcc %2,%3,%0 ! subtract R2 from R1
- subx %%g0,-1,%1 ! set CO to !cy" \
- : "=&r" (d), "=r" (co) \
- : "r" (r1), "rI" (r2), "rI" (ci) \
- __CLOBBER_CC)
- #define PERFORM_ADC_CO(d, co, r1, r2, ci) \
- asm ("subcc %2,%3,%0 ! subtract R2 from R1
- subx %%g0,-1,%1 ! set CO to !cy" \
- : "=&r" (d), "=r" (co) \
- : "r" (r1), "rI" (r2) \
- __CLOBBER_CC)
- #endif /* sparc */
-
- #if m88k
- #define PERFORM_ADD_CIO(d, co, r1, r2, ci) \
- asm ("or %0,r0,1
- subu.co r0,%4,%0 ; set cy if CI != 0
- addu.cio %0,%2,%r3 ; add R1 and R2
- addu.ci %1,r0,r0 ; set CO to cy" \
- : "=&r" (d), "=r" (co) \
- : "%r" (r1), "Or" (r2), "r" (ci))
- #define PERFORM_ADD_CO(d, co, r1, r2, ci) \
- asm ("addu.co %0,%2,%r3 ; add R1 and R2
- addu.ci %1,r0,r0 ; set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "%r" (r1), "Or" (r2))
- #define PERFORM_SUB_CIO(d, co, r1, r2, ci) \
- asm ("subu.co r0,r0,%r4 ; reset cy if CI != 0
- subu.cio %0,%2,%r3 ; subtract R2 from R1
- subu.ci %1,r0,r0 ; set CO to -1+cy
- subu %1,r0,%1 ; set CO to !cy" \
- : "=r" (d), "=r" (co) \
- : "r" (r1), "Or" (r2), "Or" (ci))
- #define PERFORM_SUB_CO(d, co, r1, r2, ci) \
- asm ("subu.co %0,%2,%r3 ; subtract R2 from R1
- subu.ci %1,r0,r0 ; set CO to -1+cy
- subu %1,r0,%1 ; set CO to !cy" \
- : "=r" (d), "=r" (co) \
- : "r" (r1), "Or" (r2))
- #define PERFORM_ADC_CIO(d, co, r1, r2, ci) \
- asm ("or %0,r0,1
- subu.co r0,%r4,%0 ; set cy if CI != 0
- subu.cio %0,%2,%r3 ; subtract R2 from R1
- addu.ci %1,r0,r0 ; set CO to cy" \
- : "=&r" (d), "=r" (co) \
- : "r" (r1), "Or" (r2), "Or" (ci))
- #define PERFORM_ADC_CO(d, co, r1, r2, ci) \
- asm ("subu.co %0,%2,%r3 ; subtract R2 from R1
- addu.ci %1,r0,r0 ; set CO to cy" \
- : "=r" (d), "=r" (co) \
- : "r" (r1), "Or" (r2))
- #endif /* m88k */
-
- #endif /* __GNUC__ && USE_ASM*/
-
- /************************* Default PERFORM_* in C *************************/
-
- #define PERFORM_COPY(d, co, r1, ci) \
- ((d) = (r1), (co) = (ci))
- #define PERFORM_EXCHANGE(co, r1, r2, ci) \
- do {word __temp = (r1), (r1) = (r2), (r2) = __temp, (co) = (ci);} while (0)
-
- #define PERFORM_ADD(d, co, r1, r2, ci) \
- ((d) = (r1) + (r2), (co) = (ci))
- #ifndef PERFORM_ADD_CIO
- #define PERFORM_ADD_CIO(d, co, r1, r2, ci) \
- do { word __d = (r1) + (ci); \
- word __cy = __d < (ci); \
- (d) = __d + (r2); \
- (co) = ((d) < __d) + __cy; } while (0)
- #endif
- #ifndef PERFORM_ADD_CI
- #define PERFORM_ADD_CI(d, co, r1, r2, ci) \
- do { word __d = (r1) + (r2) + (ci); \
- (co) = (ci); \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ADD_CO
- #define PERFORM_ADD_CO(d, co, r1, r2, ci) \
- do { word __d = (r1) + (r2); \
- (co) = __d < (r1); \
- (d) = __d; } while (0)
- #endif
-
- #define PERFORM_SUB(d, co, r1, r2, ci) \
- ((d) = (r1) - (r2), (co) = (ci))
- #ifndef PERFORM_SUB_CIO
- #define PERFORM_SUB_CIO(d, co, r1, r2, ci) \
- do { word __d = (r1) - (r2) - (ci); \
- (co) = (ci) ? __d >= (r1) : __d > (r1); \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_SUB_CI
- #define PERFORM_SUB_CI(d, co, r1, r2, ci) \
- do { word __d = (r1) - (r2) - (ci); \
- (co) = (ci); \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_SUB_CO
- #define PERFORM_SUB_CO(d, co, r1, r2, ci) \
- do { word __d = (r1) - (r2); \
- (co) = __d > (r1); \
- (d) = __d; } while (0)
- #endif
-
- #ifndef PERFORM_ADC_CIO
- #define PERFORM_ADC_CIO(d, co, r1, r2, ci) \
- do { word __d = (r1) + ~(r2) + (ci); \
- (co) = (ci) ? __d <= (r1) : __d < (r1); \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ADC_CI
- #define PERFORM_ADC_CI(d, co, r1, r2, ci) \
- do { word __d = (r1) + ~(r2) + (ci); \
- (co) = (ci); \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ADC_CO
- #define PERFORM_ADC_CO(d, co, r1, r2, ci) \
- do { word __d = (r1) - (r2); \
- (co) = __d <= (r1); \
- (d) = __d; } while (0)
- #endif
-
- #ifndef PERFORM_CMP
- #define PERFORM_CMP(d, co, r1, r2, ci) \
- ((co) = (r1) < (r2))
- #endif
- #ifndef PERFORM_CMPPAR
- #define PERFORM_CMPPAR(d, co, r1, r2, ci) \
- do { \
- word __x; \
- union { long w; short h[2]; char b[4]; } __r1, __r2; \
- __r1.w = (r1); __r2.w = (r2); \
- __x = ((__r1.h[0] != __r2.h[0]) && (__r1.h[1] != __r2.h[1])) << 14; \
- __x |= ((__r1.b[0] != __r2.b[0]) && (__r1.b[1] != __r2.b[1]) \
- && (__r1.b[2] != __r2.b[2]) && (__r1.b[3] != __r2.b[3])) << 12; \
- __x |= ((unsigned_word) (r1) >= (unsigned_word) (r2)) << 10; \
- __x |= ((unsigned_word) (r1) <= (unsigned_word) (r2)) << 8; \
- __x |= ((signed_word) (r1) >= (signed_word) (r2)) << 6; \
- __x |= ((signed_word) (r1) <= (signed_word) (r2)) << 4; \
- __x |= ((r1) != (r2)) << 2; \
- (d) = __x + 0x5554; /* binary 0101010101010100 */ \
- (co) = (ci); \
- } while (0)
- #endif
-
- /* Logic operations that don't affect carry. */
- #ifndef PERFORM_AND
- #define PERFORM_AND(d, co, r1, r2, ci) \
- ((d) = (r1) & (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_IOR
- #define PERFORM_IOR(d, co, r1, r2, ci) \
- ((d) = (r1) | (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_XOR
- #define PERFORM_XOR(d, co, r1, r2, ci) \
- ((d) = (r1) ^ (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_ANDC
- #define PERFORM_ANDC(d, co, r1, r2, ci) \
- ((d) = (r1) & ~(r2), (co) = (ci))
- #endif
- #ifndef PERFORM_IORC
- #define PERFORM_IORC(d, co, r1, r2, ci) \
- ((d) = (r1) | ~(r2), (co) = (ci))
- #endif
- #ifndef PERFORM_EQV
- #define PERFORM_EQV(d, co, r1, r2, ci) \
- ((d) = (r1) ^ ~(r2), (co) = (ci))
- #endif
- #ifndef PERFORM_NAND
- #define PERFORM_NAND(d, co, r1, r2, ci) \
- ((d) = ~((r1) & (r2)), (co) = (ci))
- #endif
- #ifndef PERFORM_NOR
- #define PERFORM_NOR(d, co, r1, r2, ci) \
- ((d) = ~((r1) | (r2)), (co) = (ci))
- #endif
-
- /* Logic operations that reset carry. */
- #ifndef PERFORM_AND_RC
- #define PERFORM_AND_RC(d, co, r1, r2, ci) \
- ((d) = (r1) & (r2), (co) = 0)
- #endif
- #ifndef PERFORM_IOR_RC
- #define PERFORM_IOR_RC(d, co, r1, r2, ci) \
- ((d) = (r1) | (r2), (co) = 0)
- #endif
- #ifndef PERFORM_XOR_RC
- #define PERFORM_XOR_RC(d, co, r1, r2, ci) \
- ((d) = (r1) ^ (r2), (co) = 0)
- #endif
- #ifndef PERFORM_ANDC_RC
- #define PERFORM_ANDC_RC(d, co, r1, r2, ci) \
- ((d) = (r1) & ~(r2), (co) = 0)
- #endif
- #ifndef PERFORM_IORC_RC
- #define PERFORM_IORC_RC(d, co, r1, r2, ci) \
- ((d) = (r1) | ~(r2), (co) = 0)
- #endif
- #ifndef PERFORM_EQV_RC
- #define PERFORM_EQV_RC(d, co, r1, r2, ci) \
- ((d) = (r1) ^ ~(r2), (co) = 0)
- #endif
- #ifndef PERFORM_NAND_RC
- #define PERFORM_NAND_RC(d, co, r1, r2, ci) \
- ((d) = ~((r1) & (r2)), (co) = 0)
- #endif
- #ifndef PERFORM_NOR_RC
- #define PERFORM_NOR_RC(d, co, r1, r2, ci) \
- ((d) = ~((r1) | (r2)), (co) = 0)
- #endif
-
- /* Logic operations that clobber carry. */
- #ifndef PERFORM_AND_CC
- #define PERFORM_AND_CC(d, co, r1, r2, ci) \
- ((d) = (r1) & (r2), (co) = -1)
- #endif
- #ifndef PERFORM_IOR_CC
- #define PERFORM_IOR_CC(d, co, r1, r2, ci) \
- ((d) = (r1) | (r2), (co) = -1)
- #endif
- #ifndef PERFORM_XOR_CC
- #define PERFORM_XOR_CC(d, co, r1, r2, ci) \
- ((d) = (r1) ^ (r2), (co) = -1)
- #endif
- #ifndef PERFORM_ANDC_CC
- #define PERFORM_ANDC_CC(d, co, r1, r2, ci) \
- ((d) = (r1) & ~(r2), (co) = -1)
- #endif
- #ifndef PERFORM_IORC_CC
- #define PERFORM_IORC_CC(d, co, r1, r2, ci) \
- ((d) = (r1) | ~(r2), (co) = -1)
- #endif
- #ifndef PERFORM_EQV_CC
- #define PERFORM_EQV_CC(d, co, r1, r2, ci) \
- ((d) = (r1) ^ ~(r2), (co) = -1)
- #endif
- #ifndef PERFORM_NAND_CC
- #define PERFORM_NAND_CC(d, co, r1, r2, ci) \
- ((d) = ~((r1) & (r2)), (co) = -1)
- #endif
- #ifndef PERFORM_NOR_CC
- #define PERFORM_NOR_CC(d, co, r1, r2, ci) \
- ((d) = ~((r1) | (r2)), (co) = -1)
- #endif
-
- #ifndef PERFORM_LSHIFTR
- #define PERFORM_LSHIFTR(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) >> TRUNC_CNT(r2)), \
- (co) = (ci))
- #endif
- #ifndef PERFORM_ASHIFTR
- #define PERFORM_ASHIFTR(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) >> TRUNC_CNT(r2)), \
- (co) = (ci))
- #endif
- #ifndef PERFORM_SHIFTL
- #define PERFORM_SHIFTL(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) << TRUNC_CNT(r2)), (co) = (ci))
- #endif
- #ifndef PERFORM_ROTATEL
- #define PERFORM_ROTATEL(d, co, r1, r2, ci) \
- ((d) = (r2) == 0 ? (r1) \
- : ((r1) << TRUNC_CNT(r2)) | ((r1) >> TRUNC_CNT(BITS_PER_WORD - (r2))),\
- (co) = (ci))
- #endif
- #ifndef PERFORM_LSHIFTR_CO
- #define PERFORM_LSHIFTR_CO(d, co, r1, r2, ci) \
- do { word __d = ((unsigned_word) (r1) >> TRUNC_CNT(r2)); \
- (co) = ((unsigned_word) (r1) >> (TRUNC_CNT(r2) - 1)) & 1; \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ASHIFTR_CO
- #define PERFORM_ASHIFTR_CO(d, co, r1, r2, ci) \
- do { word __d = ((signed_word) (r1) >> TRUNC_CNT(r2)); \
- (co) = ((signed_word) (r1) >> (TRUNC_CNT(r2) - 1)) & 1; \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ASHIFTR_CON
- #define PERFORM_ASHIFTR_CON(d, co, r1, r2, ci) \
- do { word __d = ((signed_word) (r1) >> TRUNC_CNT(r2)); \
- (co) = (signed_word) (r1) < 0 \
- && ((r1) << TRUNC_CNT(BITS_PER_WORD - (r2))) != 0; \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_SHIFTL_CO
- #define PERFORM_SHIFTL_CO(d, co, r1, r2, ci) \
- do { word __d = ((signed_word) (r1) << TRUNC_CNT(r2)); \
- (co) = ((r1) >> TRUNC_CNT(BITS_PER_WORD - (r2))) & 1; \
- (d) = __d; } while (0)
- #endif
- #ifndef PERFORM_ROTATEL_CO
- #define PERFORM_ROTATEL_CO(d, co, r1, r2, ci) \
- ((d) = ((r1) << TRUNC_CNT(r2)) | ((r1) >> TRUNC_CNT(BITS_PER_WORD - (r2))),\
- (co) = (d) & 1)
- #endif
- #ifndef PERFORM_ROTATEXL_CIO
- #define PERFORM_ROTATEXL_CIO(d, co, r1, r2, ci) \
- do { word __d; unsigned cnt = TRUNC_CNT(r2); \
- if (cnt == 1) \
- { \
- __d = ((r1) << 1) | (ci); \
- (co) = (r1) >> (BITS_PER_WORD - 1); \
- } \
- else \
- { \
- __d = ((r1) << cnt) \
- | (ci) << (cnt - 1) \
- | ((r1) >> (BITS_PER_WORD + 1 - cnt)); \
- (co) = ((r1) >> (BITS_PER_WORD - cnt)) & 1; \
- } \
- (d) = __d; \
- } while (0)
- #endif
- #ifndef PERFORM_EXTS1
- #define PERFORM_EXTS1(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) >> TRUNC_CNT(r2)) << 31 >> 31, (co) = (ci))
- #endif
- #ifndef PERFORM_EXTS2
- #define PERFORM_EXTS2(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) >> TRUNC_CNT(r2)) << 30 >> 30, (co) = (ci))
- #endif
- #ifndef PERFORM_EXTU1
- #define PERFORM_EXTU1(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) >> TRUNC_CNT(r2)) & 1, (co) = (ci))
- #endif
- #ifndef PERFORM_EXTU2
- #define PERFORM_EXTU2(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) >> TRUNC_CNT(r2)) & 3, (co) = (ci))
- #endif
-
- #ifndef PERFORM_DOZ
- #define PERFORM_DOZ(d, co, r1, r2, ci) \
- (((d) = (signed_word) (r1) > (signed_word) (r2) ? (r1) - (r2) : 0), \
- (co) = (ci))
- #endif
-
- #ifndef PERFORM_CPEQ
- #define PERFORM_CPEQ(d, co, r1, r2, ci) \
- ((d) = ((r1) == (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPGE
- #define PERFORM_CPGE(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) >= (signed_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPGEU
- #define PERFORM_CPGEU(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) >= (unsigned_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPGT
- #define PERFORM_CPGT(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) > (signed_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPGTU
- #define PERFORM_CPGTU(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) > (unsigned_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPLE
- #define PERFORM_CPLE(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) <= (signed_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPLEU
- #define PERFORM_CPLEU(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) <= (unsigned_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPLT
- #define PERFORM_CPLT(d, co, r1, r2, ci) \
- ((d) = ((signed_word) (r1) < (signed_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPLTU
- #define PERFORM_CPLTU(d, co, r1, r2, ci) \
- ((d) = ((unsigned_word) (r1) < (unsigned_word) (r2)) << 31, (co) = (ci))
- #endif
- #ifndef PERFORM_CPNEQ
- #define PERFORM_CPNEQ(d, co, r1, r2, ci) \
- ((d) = ((r1) != (r2)) << 31, (co) = (ci))
- #endif
-
- #ifndef PERFORM_CMPEQ
- #define PERFORM_CMPEQ(d, co, r1, r2, ci) \
- ((d) = (r1) == (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_CMPLE
- #define PERFORM_CMPLE(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) <= (signed_word) (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_CMPLEU
- #define PERFORM_CMPLEU(d, co, r1, r2, ci) \
- ((d) = (unsigned_word) (r1) <= (unsigned_word) (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_CMPLT
- #define PERFORM_CMPLT(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) < (signed_word) (r2), (co) = (ci))
- #endif
- #ifndef PERFORM_CMPLTU
- #define PERFORM_CMPLTU(d, co, r1, r2, ci) \
- ((d) = (unsigned_word) (r1) < (unsigned_word) (r2), (co) = (ci))
- #endif
-
- /* Unary operations. */
- #ifndef PERFORM_CLZ
- #define PERFORM_CLZ(d, co, r1, ci) \
- do { \
- int __a; \
- __a = (r1) <= 0xffff \
- ? ((r1) <= 0xff ? 0 : 8) \
- : ((r1) <= 0xffffff ? 16 : 24); \
- (d) = clz_tab[(r1) >> __a] - __a; \
- (co) = (ci); \
- } while (0)
- #endif
- #ifndef PERFORM_CTZ
- /* This can be done faster using the (x & -x) trick. */
- #define PERFORM_CTZ(d, co, r1, ci) \
- do { \
- int __a; \
- __a = ((r1) & 0xffff == 0) \
- ? (((r1) & 0xff0000) == 0 ? 24 : 16) \
- : ((r1) & 0xff == 0) ? 8 : 0; \
- (d) = ctz_tab[((r1) >> __a) & 0xff] + __a; \
- (co) = (ci); \
- } while (0)
- #endif
- #ifndef PERFORM_FF1
- #define PERFORM_FF1(d, co, r1, ci) \
- do { \
- int __a; \
- __a = (r1) <= 0xffff \
- ? ((r1) <= 0xff ? 0 : 8) \
- : ((r1) <= 0xffffff ? 16 : 24); \
- (d) = ff1_tab[(r1) >> __a] + __a; \
- (co) = (ci); \
- FF1_CHECK(d,r1) \
- } while (0)
- #endif
- #if m88k
- #define FF1_CHECK(d,r1) \
- { int t; \
- asm ("ff1 %0,%1" : "=r" (t) : "r" (r1)); \
- if (t != (d)) abort (); }
- #else
- #define FF1_CHECK(d,r1)
- #endif
- #ifndef PERFORM_FF0
- #define PERFORM_FF0(d, co, r1, ci) \
- PERFORM_FF1(d, co, ~(r1), ci)
- #endif
- #ifndef PERFORM_FFS
- #define PERFORM_FFS(d, co, r1, ci) \
- do { \
- int __a; \
- word __x = (r1) & (-r1); \
- PERFORM_CLZ(d, co, __x, ci); \
- (d) = 32 - (d); \
- } while (0)
- #endif
- #ifndef PERFORM_ABSVAL
- #define PERFORM_ABSVAL(d, co, r1, ci) \
- ((d) = (signed_word) (r1) < 0 ? -(r1) : (r1), (co) = (ci))
- #endif
- #ifndef PERFORM_NABSVAL
- #define PERFORM_NABSVAL(d, co, r1, ci) \
- ((d) = (signed_word) (r1) > 0 ? -(r1) : (r1), (co) = (ci))
- #endif
-
- #ifndef PERFORM_CMOVEQ
- #define PERFORM_CMOVEQ(d, co, r1, r2, ci) \
- ((d) = (r1) == 0 ? (r2) : (d), (co) = (ci))
- #endif
- #ifndef PERFORM_CMOVNE
- #define PERFORM_CMOVNE(d, co, r1, r2, ci) \
- ((d) = (r1) != 0 ? (r2) : (d), (co) = (ci))
- #endif
- #ifndef PERFORM_CMOVLT
- #define PERFORM_CMOVLT(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) < 0 ? (r2) : (d), (co) = (ci))
- #endif
- #ifndef PERFORM_CMOVGE
- #define PERFORM_CMOVGE(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) >= 0 ? (r2) : (d), (co) = (ci))
- #endif
- #ifndef PERFORM_CMOVLE
- #define PERFORM_CMOVLE(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) <= 0 ? (r2) : (d), (co) = (ci))
- #endif
- #ifndef PERFORM_CMOVGT
- #define PERFORM_CMOVGT(d, co, r1, r2, ci) \
- ((d) = (signed_word) (r1) > 0 ? (r2) : (d), (co) = (ci))
- #endif
-
- #ifndef PERFORM_INVDIV
- #define PERFORM_INVDIV(v, co, r1, ci) \
- do { \
- word __q, __r; \
- udiv_qrnnd (__q, __r, -(r1), 0, (r1)); \
- (v) = __q; \
- (co) = (ci); \
- } while (0)
- #endif
- #ifndef PERFORM_INVMOD
- #define PERFORM_INVMOD(v, co, r1, ci) \
- do { \
- word __q, __r; \
- udiv_qrnnd (__q, __r, -(r1), 0, (r1)); \
- (v) = __r; \
- (co) = (ci); \
- } while (0)
- #endif
- #ifndef PERFORM_MUL
- #define PERFORM_MUL(v, co, r1, r2, ci) \
- do { \
- (v) = (r1) * (r2); \
- (co) = (ci); \
- } while (0)
- #endif
- #ifndef PERFORM_UMULWIDEN_HI
- #define PERFORM_UMULWIDEN_HI(v, co, r1, r2, ci) \
- do { \
- word __ph, __pl; \
- umul_ppmm (__ph, __pl, (r1), (r2)); \
- (v) = __ph; \
- (co) = (ci); \
- } while (0)
- #endif
-
- #ifdef UDIV_WITH_SDIV
- #define PERFORM_SDIV(v, co, r1, r2, ci) \
- do { \
- if ((r2) != 0) \
- (v) = (signed_word) (r1) / (signed_word) (r2); \
- else \
- (v) = 0; \
- (co) = (ci); \
- } while (0)
- #endif /* UDIV_WITH_SDIV */
-
- enum goal_func
- {
- #undef DEF_GOAL
- #define DEF_GOAL(SYM,ARITY,NAME,CODE) SYM,
- #undef DEF_SYNONYM
- #define DEF_SYNONYM(SYM,NAME)
- #include "goal.def"
- LAST_AND_UNUSED_GOAL_CODE
- };
-
- enum prune_flags
- {
- NO_PRUNE = 0,
- CY_0 = 1,
- CY_1 = 2,
- CY_JUST_SET = 4,
- };
-
- void
- synth(insn_t *sequence,
- int n_insns,
- word *values,
- int n_values,
- word desired_value,
- int allowed_cost,
- int cy_in,
- int flags);
- void
- test_sequence(insn_t *sequence, int n_insns);
- int
- run_program(insn_t *sequence, int n_insns, word *values);
-
- extern const char clz_tab[];
- extern const char ctz_tab[];
- extern const char ff1_tab[];
-