+\f
+int is_mul(op,mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == MULT);
+}
+
+int is_div(op,mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == DIV);
+}
+\f
+#ifdef NOTYET
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable.
+ Doesn't share CONST */
+
+rtx
+copy_all_rtx (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ return orig;
+
+#if 0
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+#endif
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+ /* intel1 */
+ copy->is_spill_rtx = orig->is_spill_rtx;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_rtx (XEXP (orig, i));
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+\f
+/* Try to rewrite a memory address to make it valid */
+
+void
+rewrite_address (mem_rtx)
+ rtx mem_rtx;
+{
+ rtx index_rtx, base_rtx, offset_rtx, scale_rtx, ret_rtx;
+ int scale = 1;
+ int offset_adjust = 0;
+ int was_only_offset = 0;
+ rtx mem_addr = XEXP (mem_rtx, 0);
+ char *storage = oballoc (0);
+ int in_struct = 0;
+ int is_spill_rtx = 0;
+
+ in_struct = MEM_IN_STRUCT_P (mem_rtx);
+ is_spill_rtx = RTX_IS_SPILL_P (mem_rtx);
+
+ if (GET_CODE (mem_addr) == PLUS
+ && GET_CODE (XEXP (mem_addr, 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (mem_addr, 1), 0)) == REG)
+ {
+ /* This part is utilized by the combiner. */
+ ret_rtx
+ = gen_rtx (PLUS, GET_MODE (mem_addr),
+ gen_rtx (PLUS, GET_MODE (XEXP (mem_addr, 1)),
+ XEXP (mem_addr, 0), XEXP (XEXP (mem_addr, 1), 0)),
+ XEXP (XEXP (mem_addr, 1), 1));
+
+ if (memory_address_p (GET_MODE (mem_rtx), ret_rtx))
+ {
+ XEXP (mem_rtx, 0) = ret_rtx;
+ RTX_IS_SPILL_P (ret_rtx) = is_spill_rtx;
+ return;
+ }
+
+ obfree (storage);
+ }
+
+ /* This part is utilized by loop.c.
+ If the address contains PLUS (reg,const) and this pattern is invalid
+ in this case - try to rewrite the address to make it valid. */
+ storage = oballoc (0);
+ index_rtx = base_rtx = offset_rtx = NULL;
+
+ /* Find the base index and offset elements of the memory address. */
+ if (GET_CODE (mem_addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (mem_addr, 0)) == REG)
+ {
+ if (GET_CODE (XEXP (mem_addr, 1)) == REG)
+ base_rtx = XEXP (mem_addr, 1), index_rtx = XEXP (mem_addr, 0);
+ else
+ base_rtx = XEXP (mem_addr, 0), offset_rtx = XEXP (mem_addr, 1);
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == MULT)
+ {
+ index_rtx = XEXP (mem_addr, 0);
+ if (GET_CODE (XEXP (mem_addr, 1)) == REG)
+ base_rtx = XEXP (mem_addr, 1);
+ else
+ offset_rtx = XEXP (mem_addr, 1);
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == PLUS)
+ {
+ if (GET_CODE (XEXP (XEXP (mem_addr, 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0), 0))
+ == REG)
+ && (GET_CODE (XEXP (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0), 1))
+ == CONST_INT)
+ && (GET_CODE (XEXP (XEXP (XEXP (mem_addr, 0), 0), 1))
+ == CONST_INT)
+ && GET_CODE (XEXP (XEXP (mem_addr, 0), 1)) == REG
+ && GET_CODE (XEXP (mem_addr, 1)) == SYMBOL_REF)
+ {
+ index_rtx = XEXP (XEXP (XEXP (mem_addr, 0), 0), 0);
+ offset_rtx = XEXP (mem_addr, 1);
+ base_rtx = XEXP (XEXP (mem_addr, 0), 1);
+ offset_adjust = INTVAL (XEXP (XEXP (XEXP (mem_addr, 0), 0), 1));
+ }
+ else
+ {
+ offset_rtx = XEXP (mem_addr, 1);
+ index_rtx = XEXP (XEXP (mem_addr, 0), 0);
+ base_rtx = XEXP (XEXP (mem_addr, 0), 1);
+ }
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == CONST_INT)
+ {
+ was_only_offset = 1;
+ index_rtx = NULL;
+ base_rtx = NULL;
+ offset_rtx = XEXP (mem_addr, 1);
+ offset_adjust = INTVAL (XEXP (mem_addr, 0));
+ if (offset_adjust == 0)
+ {
+ XEXP (mem_rtx, 0) = offset_rtx;
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ return;
+ }
+ }
+ else
+ {
+ obfree (storage);
+ return;
+ }
+ }
+ else if (GET_CODE (mem_addr) == MULT)
+ index_rtx = mem_addr;
+ else
+ {
+ obfree (storage);
+ return;
+ }
+
+ if (index_rtx != 0 && GET_CODE (index_rtx) == MULT)
+ {
+ if (GET_CODE (XEXP (index_rtx, 1)) != CONST_INT)
+ {
+ obfree (storage);
+ return;
+ }
+
+ scale_rtx = XEXP (index_rtx, 1);
+ scale = INTVAL (scale_rtx);
+ index_rtx = copy_all_rtx (XEXP (index_rtx, 0));
+ }
+
+ /* Now find which of the elements are invalid and try to fix them. */
+ if (index_rtx && GET_CODE (index_rtx) == CONST_INT && base_rtx == NULL)
+ {
+ offset_adjust = INTVAL (index_rtx) * scale;
+
+ if (offset_rtx != 0 && CONSTANT_P (offset_rtx))
+ offset_rtx = plus_constant (offset_rtx, offset_adjust);
+ else if (offset_rtx == 0)
+ offset_rtx = const0_rtx;
+
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ XEXP (mem_rtx, 0) = offset_rtx;
+ return;
+ }
+
+ if (base_rtx && GET_CODE (base_rtx) == PLUS
+ && GET_CODE (XEXP (base_rtx, 0)) == REG
+ && GET_CODE (XEXP (base_rtx, 1)) == CONST_INT)
+ {
+ offset_adjust += INTVAL (XEXP (base_rtx, 1));
+ base_rtx = copy_all_rtx (XEXP (base_rtx, 0));
+ }
+
+ else if (base_rtx && GET_CODE (base_rtx) == CONST_INT)
+ {
+ offset_adjust += INTVAL (base_rtx);
+ base_rtx = NULL;
+ }
+
+ if (index_rtx && GET_CODE (index_rtx) == PLUS
+ && GET_CODE (XEXP (index_rtx, 0)) == REG
+ && GET_CODE (XEXP (index_rtx, 1)) == CONST_INT)
+ {
+ offset_adjust += INTVAL (XEXP (index_rtx, 1)) * scale;
+ index_rtx = copy_all_rtx (XEXP (index_rtx, 0));
+ }
+
+ if (index_rtx)
+ {
+ if (! LEGITIMATE_INDEX_P (index_rtx)
+ && ! (index_rtx == stack_pointer_rtx && scale == 1
+ && base_rtx == NULL))
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ if (base_rtx)
+ {
+ if (! LEGITIMATE_INDEX_P (base_rtx) && GET_CODE (base_rtx) != REG)
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ if (offset_adjust != 0)
+ {
+ if (offset_rtx != 0 && CONSTANT_P (offset_rtx))
+ offset_rtx = plus_constant (offset_rtx, offset_adjust);
+ else
+ offset_rtx = const0_rtx;
+
+ if (index_rtx)
+ {
+ if (base_rtx)
+ {
+ if (scale != 1)
+ {
+ ret_rtx = gen_rtx (PLUS, GET_MODE (base_rtx),
+ gen_rtx (MULT, GET_MODE (index_rtx),
+ index_rtx, scale_rtx),
+ base_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ else
+ {
+ ret_rtx = gen_rtx (PLUS, GET_MODE (index_rtx),
+ index_rtx, base_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ }
+ else
+ {
+ if (scale != 1)
+ {
+ ret_rtx = gen_rtx (MULT, GET_MODE (index_rtx),
+ index_rtx, scale_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ else
+ {
+ if (GET_CODE (offset_rtx) == CONST_INT
+ && INTVAL (offset_rtx) == 0)
+ ret_rtx = index_rtx;
+ else
+ ret_rtx = gen_rtx (PLUS, GET_MODE (index_rtx),
+ index_rtx, offset_rtx);
+ }
+ }
+ }
+ else
+ {
+ if (base_rtx)
+ {
+ if (GET_CODE (offset_rtx) == CONST_INT
+ && INTVAL (offset_rtx) == 0)
+ ret_rtx = base_rtx;
+ else
+ ret_rtx = gen_rtx (PLUS, GET_MODE (base_rtx), base_rtx,
+ offset_rtx);
+ }
+ else if (was_only_offset)
+ ret_rtx = offset_rtx;
+ else
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ XEXP (mem_rtx, 0) = ret_rtx;
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ return;
+ }
+ else
+ {
+ obfree (storage);
+ return;
+ }
+}
+#endif /* NOTYET */
+\f
+/* Return 1 if the first insn to set cc before INSN also sets the register
+ REG_RTX; otherwise return 0. */
+int
+last_to_set_cc (reg_rtx, insn)
+ rtx reg_rtx, insn;
+{
+ rtx prev_insn = PREV_INSN (insn);
+
+ while (prev_insn)
+ {
+ if (GET_CODE (prev_insn) == NOTE)
+ ;
+
+ else if (GET_CODE (prev_insn) == INSN)
+ {
+ if (GET_CODE (PATTERN (prev_insn)) != SET)
+ return (0);
+
+ if (rtx_equal_p (SET_DEST (PATTERN (prev_insn)), reg_rtx))
+ {
+ if (sets_condition_code (SET_SRC (PATTERN (prev_insn))))
+ return (1);
+
+ return (0);
+ }
+
+ else if (! doesnt_set_condition_code (SET_SRC (PATTERN (prev_insn))))
+ return (0);
+ }
+
+ else
+ return (0);
+
+ prev_insn = PREV_INSN (prev_insn);
+ }
+
+ return (0);
+}
+\f
+int
+doesnt_set_condition_code (pat)
+ rtx pat;
+{
+ switch (GET_CODE (pat))
+ {
+ case MEM:
+ case REG:
+ return 1;
+
+ default:
+ return 0;
+
+ }
+}
+\f
+int
+sets_condition_code (pat)
+ rtx pat;
+{
+ switch (GET_CODE (pat))
+ {
+ case PLUS:
+ case MINUS:
+ case AND:
+ case IOR:
+ case XOR:
+ case NOT:
+ case NEG:
+ case MULT:
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ return 1;
+
+ default:
+ return (0);
+ }
+}
+\f
+int
+str_immediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT && INTVAL (op) <= 32 && INTVAL (op) >= 0)
+ return 1;
+
+ return 0;
+}
+\f
+int
+is_fp_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if the mode of the SET_DEST of insn is floating point
+ and it is not an fld or a move from memory to memory.
+ Otherwise return 0 */
+
+int
+is_fp_dest (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode)
+ && GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_FLOAT_REG
+ && GET_CODE (SET_SRC (insn)) != MEM)
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if the mode of the SET_DEST of INSN is floating point and is
+ memory and the source is a register. */
+
+int
+is_fp_store (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode)
+ && GET_CODE (SET_DEST (PATTERN (insn))) == MEM
+ && GET_CODE (SET_SRC (PATTERN (insn))) == REG)
+ return 1;
+
+ return 0;
+}
+\f
+/* Return 1 if DEP_INSN sets a register which INSN uses as a base
+ or index to reference memory.
+ otherwise return 0 */
+
+int
+agi_dependent (insn, dep_insn)
+ rtx insn, dep_insn;
+{
+ if (GET_CODE (dep_insn) == INSN
+ && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (dep_insn))) == REG)
+ return reg_mentioned_in_mem (SET_DEST (PATTERN (dep_insn)), insn);
+
+ if (GET_CODE (dep_insn) == INSN && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (dep_insn))) == MEM
+ && push_operand (SET_DEST (PATTERN (dep_insn)),
+ GET_MODE (SET_DEST (PATTERN (dep_insn)))))
+ return reg_mentioned_in_mem (stack_pointer_rtx, insn);
+
+ return 0;
+}
+\f
+/* Return 1 if reg is used in rtl as a base or index for a memory ref
+ otherwise return 0. */
+
+int
+reg_mentioned_in_mem (reg, rtl)
+ rtx reg, rtl;
+{
+ register char *fmt;
+ register int i, j;
+ register enum rtx_code code;
+
+ if (rtl == NULL)
+ return 0;
+
+ code = GET_CODE (rtl);
+
+ switch (code)
+ {
+ case HIGH:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ case SUBREG:
+ return 0;
+ default:
+ break;
+ }
+
+ if (code == MEM && reg_mentioned_p (reg, rtl))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (rtl, i) - 1; j >= 0; j--)
+ if (reg_mentioned_in_mem (reg, XVECEXP (rtl, i, j)))
+ return 1;
+ }
+
+ else if (fmt[i] == 'e' && reg_mentioned_in_mem (reg, XEXP (rtl, i)))
+ return 1;
+ }
+
+ return 0;
+}
+\f
+/* Output the appropriate insns for doing strlen if not just doing repnz; scasb
+
+ operands[0] = result, initialized with the startaddress
+ operands[1] = alignment of the address.
+ operands[2] = scratch register, initialized with the startaddress when
+ not aligned, otherwise undefined
+
+ This is just the body. It needs the initialisations mentioned above and
+ some address computing at the end. These things are done in i386.md. */
+
+char *
+output_strlen_unroll (operands)
+ rtx operands[];
+{
+ rtx xops[18];
+
+ xops[0] = operands[0]; /* Result */
+ /* operands[1]; * Alignment */
+ xops[1] = operands[2]; /* Scratch */
+ xops[2] = GEN_INT (0);
+ xops[3] = GEN_INT (2);
+ xops[4] = GEN_INT (3);
+ xops[5] = GEN_INT (4);
+ /* xops[6] = gen_label_rtx (); * label when aligned to 3-byte */
+ /* xops[7] = gen_label_rtx (); * label when aligned to 2-byte */
+ xops[8] = gen_label_rtx (); /* label of main loop */
+
+ if (TARGET_USE_Q_REG && QI_REG_P (xops[1]))
+ xops[9] = gen_label_rtx (); /* pentium optimisation */
+
+ xops[10] = gen_label_rtx (); /* end label 2 */
+ xops[11] = gen_label_rtx (); /* end label 1 */
+ xops[12] = gen_label_rtx (); /* end label */
+ /* xops[13] * Temporary used */
+ xops[14] = GEN_INT (0xff);
+ xops[15] = GEN_INT (0xff00);
+ xops[16] = GEN_INT (0xff0000);
+ xops[17] = GEN_INT (0xff000000);
+
+ /* Loop to check 1..3 bytes for null to get an aligned pointer. */
+
+ /* Is there a known alignment and is it less than 4? */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) < 4)
+ {
+ /* Is there a known alignment and is it not 2? */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 2)
+ {
+ xops[6] = gen_label_rtx (); /* Label when aligned to 3-byte */
+ xops[7] = gen_label_rtx (); /* Label when aligned to 2-byte */
+
+ /* Leave just the 3 lower bits.
+ If this is a q-register, then the high part is used later
+ therefore use andl rather than andb. */
+ output_asm_insn (AS2 (and%L1,%4,%1), xops);
+
+ /* Is aligned to 4-byte address when zero */
+ output_asm_insn (AS1 (je,%l8), xops);
+
+ /* Side-effect even Parity when %eax == 3 */
+ output_asm_insn (AS1 (jp,%6), xops);
+
+ /* Is it aligned to 2 bytes ? */
+ if (QI_REG_P (xops[1]))
+ output_asm_insn (AS2 (cmp%L1,%3,%1), xops);
+ else
+ output_asm_insn (AS2 (cmp%L1,%3,%1), xops);
+
+ output_asm_insn (AS1 (je,%7), xops);
+ }
+ else
+ {
+ /* Since the alignment is 2, we have to check 2 or 0 bytes;
+ check if is aligned to 4 - byte. */
+ output_asm_insn (AS2 (and%L1,%3,%1), xops);
+
+ /* Is aligned to 4-byte address when zero */
+ output_asm_insn (AS1 (je,%l8), xops);
+ }
+
+ xops[13] = gen_rtx_MEM (QImode, xops[0]);
+
+ /* Now compare the bytes; compare with the high part of a q-reg
+ gives shorter code. */
+ if (QI_REG_P (xops[1]))
+ {
+ /* Compare the first n unaligned byte on a byte per byte basis. */
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+
+ /* When zero we reached the end. */
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Increment the address. */
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ /* Not needed with an alignment of 2 */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 2)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[7]));
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[6]));
+ }
+
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+ }
+ else
+ {
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[7]));
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[6]));
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ }
+
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+ }
+
+ /* Generate loop to check 4 bytes at a time. It is not a good idea to
+ align this loop. It gives only huge programs, but does not help to
+ speed up. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[8]));
+
+ xops[13] = gen_rtx_MEM (SImode, xops[0]);
+ output_asm_insn (AS2 (mov%L1,%13,%1), xops);
+
+ if (QI_REG_P (xops[1]))
+ {
+ /* On i586 it is faster to combine the hi- and lo- part as
+ a kind of lookahead. If anding both yields zero, then one
+ of both *could* be zero, otherwise none of both is zero;
+ this saves one instruction, on i486 this is slower
+ tested with P-90, i486DX2-66, AMD486DX2-66 */
+ if (TARGET_PENTIUM)
+ {
+ output_asm_insn (AS2 (test%B1,%h1,%b1), xops);
+ output_asm_insn (AS1 (jne,%l9), xops);
+ }
+
+ /* Check first byte. */
+ output_asm_insn (AS2 (test%B1,%b1,%b1), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Check second byte. */
+ output_asm_insn (AS2 (test%B1,%h1,%h1), xops);
+ output_asm_insn (AS1 (je,%l11), xops);
+
+ if (TARGET_PENTIUM)
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[9]));
+ }
+
+ else
+ {
+ /* Check first byte. */
+ output_asm_insn (AS2 (test%L1,%14,%1), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Check second byte. */
+ output_asm_insn (AS2 (test%L1,%15,%1), xops);
+ output_asm_insn (AS1 (je,%l11), xops);
+ }
+
+ /* Check third byte. */
+ output_asm_insn (AS2 (test%L1,%16,%1), xops);
+ output_asm_insn (AS1 (je,%l10), xops);
+
+ /* Check fourth byte and increment address. */
+ output_asm_insn (AS2 (add%L0,%5,%0), xops);
+ output_asm_insn (AS2 (test%L1,%17,%1), xops);
+ output_asm_insn (AS1 (jne,%l8), xops);
+
+ /* Now generate fixups when the compare stops within a 4-byte word. */
+ output_asm_insn (AS2 (sub%L0,%4,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[10]));
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[11]));
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[12]));
+
+ return "";
+}
+
+char *
+output_fp_conditional_move (which_alternative, operands)
+ int which_alternative;
+ rtx operands[];
+{
+ int code = GET_CODE (operands[1]);
+
+ /* This is very tricky. We have to do it right. For a code segement
+ like:
+
+ int foo;
+ double bar;
+ ....
+ foo = foo - x;
+ if (foo >= 0)
+ bar = y;
+
+ final_scan_insn () may delete the insn which sets CC. We have to
+ tell final_scan_insn () if it should be reinserted. When CODE is
+ GT or LE, we have to check the CC_NO_OVERFLOW bit and return
+ NULL_PTR to tell final to reinsert the test insn because the
+ conditional move cannot be handled properly without it. */
+ if ((code == GT || code == LE)
+ && (cc_prev_status.flags & CC_NO_OVERFLOW))
+ return NULL_PTR;
+
+ switch (which_alternative)
+ {
+ case 0:
+ /* r <- cond ? arg : r */
+ output_asm_insn (AS2 (fcmov%F1,%2,%0), operands);
+ break;
+
+ case 1:
+ /* r <- cond ? r : arg */
+ output_asm_insn (AS2 (fcmov%f1,%3,%0), operands);
+ break;
+
+ case 2:
+ /* r <- cond ? r : arg */
+ output_asm_insn (AS2 (fcmov%F1,%2,%0), operands);
+ output_asm_insn (AS2 (fcmov%f1,%3,%0), operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+char *
+output_int_conditional_move (which_alternative, operands)
+ int which_alternative;
+ rtx operands[];
+{
+ int code = GET_CODE (operands[1]);
+ enum machine_mode mode;
+ rtx xops[4];
+
+ /* This is very tricky. We have to do it right. For a code segement
+ like:
+
+ int foo, bar;
+ ....
+ foo = foo - x;
+ if (foo >= 0)
+ bar = y;
+
+ final_scan_insn () may delete the insn which sets CC. We have to
+ tell final_scan_insn () if it should be reinserted. When CODE is
+ GT or LE, we have to check the CC_NO_OVERFLOW bit and return
+ NULL_PTR to tell final to reinsert the test insn because the
+ conditional move cannot be handled properly without it. */
+ if ((code == GT || code == LE)
+ && (cc_prev_status.flags & CC_NO_OVERFLOW))
+ return NULL_PTR;
+
+ mode = GET_MODE (operands [0]);
+ if (mode == DImode)
+ {
+ xops [0] = gen_rtx_SUBREG (SImode, operands [0], 1);
+ xops [1] = operands [1];
+ xops [2] = gen_rtx_SUBREG (SImode, operands [2], 1);
+ xops [3] = gen_rtx_SUBREG (SImode, operands [3], 1);
+ }
+
+ switch (which_alternative)
+ {
+ case 0:
+ /* r <- cond ? arg : r */
+ output_asm_insn (AS2 (cmov%C1,%2,%0), operands);
+ if (mode == DImode)
+ output_asm_insn (AS2 (cmov%C1,%2,%0), xops);
+ break;
+
+ case 1:
+ /* r <- cond ? r : arg */
+ output_asm_insn (AS2 (cmov%c1,%3,%0), operands);
+ if (mode == DImode)
+ output_asm_insn (AS2 (cmov%c1,%3,%0), xops);
+ break;
+
+ case 2:
+ /* rm <- cond ? arg1 : arg2 */
+ output_asm_insn (AS2 (cmov%C1,%2,%0), operands);
+ output_asm_insn (AS2 (cmov%c1,%3,%0), operands);
+ if (mode == DImode)
+ {
+ output_asm_insn (AS2 (cmov%C1,%2,%0), xops);
+ output_asm_insn (AS2 (cmov%c1,%3,%0), xops);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}