+Fri Jun 28 17:22:37 2002 Denis Chertykov <denisc@overta.ru>
+ Frank Ch. Eigler <fche@redhat.com>
+ Matthew Green <mrg@redhat.com>
+ Richard Henderson <rtl@redhat.com>
+ Dave Hudson <dave.hudson@ubicom.com>
+ Jeff Johnston <jjohnstn@redhat.com>
+ Alan Lehotsky <apl@alum.mit.edu>
+ Bernd Schmidt <bernds@redhat.com>
+ Graham Stott <grahams@redhat.com>
+
+ * doc/extend.texi: Add ip2k port to description of attribute
+ naked.
+ * doc/install.texi (Specific): Add ip2k description.
+ * doc/install-old.texi (Configurations): Add ip2k to possible
+ cpu types.
+ * doc/md.texi: Document ip2k constraints.
+ * config/ip2k/crt0.S: New file.
+ * config/ip2k/ip2k-protos.h: New file.
+ * config/ip2k/ip2k.c: New file.
+ * config/ip2k/ip2k.h: New file.
+ * config/ip2k/ip2k.md: New file.
+ * config/ip2k/libgcc.S: New file.
+ * config/ip2k/t-ip2k: New file.
+
2002-06-30 Hans-Peter Nilsson <hp@bitrange.com>
* config/mmix/mmix.md ("return"): New pattern.
--- /dev/null
+;
+; Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+; Contributed by Red Hat, Inc.
+;
+; This file is part of GNU CC.
+;
+; GNU CC is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+;
+; In addition to the permissions in the GNU General Public License, the
+; Free Software Foundation gives you unlimited permission to link the
+; compiled version of this file with other programs, and to distribute
+; those programs without any restriction coming from the use of this
+; file. (The General Public License restrictions do apply in other
+; respects; for example, they cover modification of the file, and
+; distribution when not linked into another program.)
+;
+; GNU CC is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GNU CC; see the file COPYING. If not, write to
+; the Free Software Foundation, 59 Temple Place - Suite 330,
+; Boston, MA 02111-1307, USA.
+;
+
+ .file "crt0.S"
+ .text
+ .global __start
+ .func __start
+__start:
+ clr $ff ; Insure we have a zero available
+ mov w,#%hi8data(__stack) ; set up stack
+ mov sph,w ;
+ mov w,#%lo8data(__stack)
+ mov spl,w
+
+ push #0 ; Set argc/argv.
+ push #0 ; Only required for testing
+ push #0 ; purposes and "ansi" main.
+ push #0
+ page _main
+ call _main
+ push $81 ; use return value to call exit()
+ push $80
+ page _exit
+ call _exit
+ break ; Should never return
+ .endfunc
--- /dev/null
+/* Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc and Ubicom, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+extern void asm_file_start PARAMS ((FILE *));
+extern void asm_file_end PARAMS ((FILE *));
+
+extern void function_prologue PARAMS ((FILE *, int));
+extern void function_epilogue PARAMS ((FILE *, int));
+extern int find_one_set_bit_p PARAMS ((HOST_WIDE_INT));
+extern int find_one_clear_bit_p PARAMS ((HOST_WIDE_INT));
+
+#ifdef TREE_CODE
+extern void unique_section PARAMS ((tree, int));
+extern void encode_section_info PARAMS ((tree));
+extern void asm_output_section_name PARAMS ((FILE *, tree, const char *,
+ int));
+extern int valid_machine_type_attribute PARAMS ((tree, tree, tree, tree));
+extern int valid_machine_decl_attribute PARAMS ((tree, tree, tree, tree));
+extern int ip2k_return_pops_args PARAMS ((tree, tree, int));
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+extern int legitimate_address_p PARAMS ((enum machine_mode, rtx, int));
+extern void machine_dependent_reorg PARAMS ((rtx));
+extern int ip2k_address_cost PARAMS ((rtx));
+extern int ip2k_extra_constraint PARAMS ((rtx, int));
+extern rtx legitimize_address PARAMS ((rtx, rtx, enum machine_mode, rtx));
+extern int adjust_insn_length PARAMS ((rtx insn, int len));
+extern int default_rtx_costs PARAMS ((rtx, enum rtx_code, enum rtx_code));
+extern void asm_output_char PARAMS ((FILE *, rtx));
+extern void asm_output_short PARAMS ((FILE *, rtx));
+extern void asm_output_byte PARAMS ((FILE *, int));
+extern void print_operand PARAMS ((FILE *, rtx, int));
+extern void print_operand_address PARAMS ((FILE *, rtx));
+extern int ip2k_jump_mode PARAMS ((rtx, rtx));
+extern void ip2k_split_words PARAMS ((enum machine_mode, enum machine_mode,
+ rtx *));
+extern rtx ip2k_get_low_half PARAMS ((rtx, enum machine_mode));
+extern rtx ip2k_get_high_half PARAMS ((rtx, enum machine_mode));
+extern int ip2k_nonptr_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_ptr_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_ip_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_short_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_gen_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_nonsp_reg_operand PARAMS ((rtx, enum machine_mode));
+extern int ip2k_symbol_ref_operand PARAMS ((rtx, enum machine_mode));
+extern const char *ip2k_set_compare PARAMS ((rtx, rtx));
+extern const char *ip2k_gen_sCOND PARAMS ((rtx, enum rtx_code, rtx));
+extern const char *ip2k_gen_signed_comp_branch PARAMS ((rtx,
+ enum rtx_code,
+ rtx));
+extern const char *ip2k_gen_unsigned_comp_branch PARAMS ((rtx,
+ enum rtx_code,
+ rtx));
+extern int is_regfile_address PARAMS ((rtx));
+extern int ip2k_mode_dependent_address PARAMS ((rtx));
+extern int ip2k_address_uses_reg_p PARAMS ((rtx, unsigned int));
+extern int ip2k_xexp_not_uses_reg_p PARAMS ((rtx, unsigned int, int));
+extern int ip2k_composite_xexp_not_uses_reg_p PARAMS ((rtx, unsigned int, int));
+extern int ip2k_composite_xexp_not_uses_cc0_p PARAMS ((rtx));
+extern int ip2k_signed_comparison_operator PARAMS ((rtx,
+ enum machine_mode));
+extern int ip2k_unsigned_comparison_operator PARAMS ((rtx,
+ enum machine_mode));
+extern int ip2k_unary_operator PARAMS ((rtx, enum machine_mode));
+extern int ip2k_binary_operator PARAMS ((rtx, enum machine_mode));
+
+extern rtx ip2k_compare_operands[3];
+#endif /* RTX_CODE */
+
+#ifdef HAVE_MACHINE_MODES
+extern int class_max_nregs PARAMS ((enum reg_class, enum machine_mode));
+extern enum reg_class class_likely_spilled_p PARAMS ((int c));
+#endif /* HAVE_MACHINE_MODES */
+
+#ifdef REAL_VALUE_TYPE
+extern void asm_output_float PARAMS ((FILE *, REAL_VALUE_TYPE));
+#endif
+
+extern int ip2k_init_elim_offset PARAMS ((int, int));
+extern void ip2k_init_local_alloc PARAMS ((int *));
+
--- /dev/null
+/* Subroutines used for code generation on Ubicom IP2022
+ Communications Controller.
+ Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc and Ubicom, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "insn-addr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+#include "obstack.h"
+#include "function.h"
+#include "recog.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "basic-block.h"
+
+/* There are problems with 'frame_pointer_needed'. If we force it
+ on, we either end up not eliminating uses of FP, which results in
+ SPILL register failures or we may end up with calculation errors in
+ the stack offsets. Isolate the decision process into a simple macro. */
+#define CHAIN_FRAMES (frame_pointer_needed || FRAME_POINTER_REQUIRED)
+
+static int ip2k_naked_function_p PARAMS ((tree));
+static void mdr_resequence_xy_yx PARAMS ((rtx));
+static void mdr_pres_replace_and_recurse PARAMS ((rtx, rtx, rtx));
+static void mdr_propagate_reg_equivs_sequence PARAMS ((rtx, rtx, rtx));
+static void mdr_propagate_reg_equivs PARAMS ((rtx));
+static int track_dp_reload PARAMS ((rtx , rtx *, int , int));
+static void mdr_try_dp_reload_elim PARAMS ((rtx));
+static void mdr_try_move_dp_reload PARAMS ((rtx));
+static int ip2k_check_can_adjust_stack_ref PARAMS ((rtx, int));
+static void ip2k_adjust_stack_ref PARAMS ((rtx *, int));
+static void mdr_try_move_pushes PARAMS ((rtx));
+static int ip2k_xexp_not_uses_reg_for_mem PARAMS ((rtx, unsigned int));
+static void mdr_try_propagate_clr_sequence PARAMS ((rtx, unsigned int));
+static void mdr_try_propagate_clr PARAMS ((rtx));
+static void mdr_try_propagate_move_sequence PARAMS ((rtx, rtx, rtx));
+static void mdr_try_propagate_move PARAMS ((rtx));
+static void mdr_try_remove_redundant_insns PARAMS ((rtx));
+static int track_w_reload PARAMS ((rtx, rtx *, int , int));
+static void mdr_try_wreg_elim PARAMS ((rtx));
+
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE function_prologue
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE function_epilogue
+
+#undef TARGET_ASM_UNIQUE_SECTION
+#define TARGET_ASM_UNIQUE_SECTION unique_section
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO encode_section_info
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Commands count in the compiled file. */
+static int commands_in_file;
+
+/* Commands in the functions prologues in the compiled file. */
+static int commands_in_prologues;
+
+/* Commands in the functions epilogues in the compiled file. */
+static int commands_in_epilogues;
+
+/* Prologue/Epilogue size in words. */
+static int prologue_size;
+static int epilogue_size;
+
+/* compare and test instructions for the IP2K are materialized by
+ the conditional branch that uses them. This is because conditional
+ branches are skips over unconditional branches. */
+rtx ip2k_compare_operands[3]; /* Additional operands for condition code. */
+int ip2k_test_flag; /* Indicates Z, WREG contain condition code
+ information. */
+
+/* Some ip2k patterns push a byte onto the stack and then access
+ SP-relative addresses. Since reload doesn't know about these
+ pushes, we must track them internally with a %< (push) or %> (pop)
+ indicator. */
+static int ip2k_stack_delta;
+
+/* Track if or how far our ip2k reorganization pass has run. */
+int ip2k_reorg_in_progress = 0;
+int ip2k_reorg_completed = 0;
+int ip2k_reorg_split_dimode = 0;
+int ip2k_reorg_split_simode = 0;
+int ip2k_reorg_split_himode = 0;
+int ip2k_reorg_split_qimode = 0;
+int ip2k_reorg_merge_qimode = 0;
+
+/* Set up local allocation order. */
+
+void
+ip2k_init_local_alloc (rao)
+ int * rao;
+{
+ static const int alloc_order[] = REG_ALLOC_ORDER;
+
+ memcpy (rao, alloc_order, sizeof (alloc_order));
+}
+
+/* Returns the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+int
+ip2k_return_pops_args (fundecl, funtype, size)
+ tree fundecl ATTRIBUTE_UNUSED;
+ tree funtype;
+ int size;
+{
+ if (TREE_CODE (funtype) == IDENTIFIER_NODE)
+ return size;
+
+ if (TYPE_ARG_TYPES (funtype) == NULL_TREE
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype))) == void_type_node))
+ return size;
+
+ return 0;
+}
+
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+ip2k_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Output function prologue. */
+void
+function_prologue (file, size)
+ FILE *file;
+ int size;
+{
+ int leaf_func_p;
+ int main_p;
+ int reg;
+ rtx operands[2];
+
+ prologue_size = epilogue_size = 0;
+
+ if (ip2k_naked_function_p (current_function_decl))
+ {
+ fprintf (file, "/* prologue: naked */\n");
+ return;
+ }
+
+ leaf_func_p = leaf_function_p ();
+ main_p = ! strcmp ("main", current_function_name);
+
+ /* For now, we compute all these facts about the function, but don't
+ take any action based on the information. */
+
+ prologue_size = 0;
+ fprintf (file, "/* prologue: frame size=%d */\n", size);
+
+ /* Unless we're a leaf we need to save the return PC. */
+
+ if (! leaf_func_p)
+ {
+ OUT_AS1 (push, calll);
+ OUT_AS1 (push, callh);
+ prologue_size += 4;
+ }
+
+ /* We need to save the old FP and set the new FP pointing at the
+ stack location where the old one is saved. Note that because of
+ post-decrement addressing, the SP is off-by-one after the
+ push, so we harvest the SP address BEFORE we push the MSBs of
+ the FP. */
+ if (CHAIN_FRAMES)
+ {
+ OUT_AS1 (push, REG_FP+1); /* Save old LSBs. */
+ OUT_AS2 (mov, w, spl);
+ OUT_AS2 (mov, REG_FP+1, w); /* SPL -> FPL */
+
+ OUT_AS2 (mov, w, sph); /* Freeze SP MSBs */
+ OUT_AS1 (push, REG_FP); /* Save old MSBs */
+ OUT_AS2 (mov, REG_FP, w); /* SPH -> FPH */
+ prologue_size += 12;
+ }
+
+ for (reg = (CHAIN_FRAMES) ? (REG_FP - 1) : (REG_FP + 1);
+ reg > 0; --reg)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ fprintf (file, "\t" AS1 (push,%s) "\n", reg_names[reg]);
+ prologue_size += 2;
+ }
+ }
+
+ if (size)
+ {
+ operands[0] = GEN_INT (size);
+
+ switch (size & 0xff)
+ {
+ case 0:
+ break;
+ case 1:
+ OUT_AS1 (dec, spl);
+ prologue_size += 2;
+ break;
+ default:
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (sub, spl, w);
+ prologue_size += 4;
+ }
+
+ switch (size & 0xff00)
+ {
+ case 0:
+ break;
+ case 0x100:
+ OUT_AS1 (dec, sph);
+ prologue_size += 2;
+ break;
+ default:
+ if ((size & 0xff) != ((size >> 8) & 0xff))
+ OUT_AS2 (mov, w, %H0); /* Otherwise W has value we want. */
+ OUT_AS2 (sub, sph, w);
+ prologue_size += 4;
+ }
+ }
+
+/* XXX - change this to use the carry-propagating subtract trick. */
+ if (flag_stack_check)
+ {
+ OUT_AS2 (mov, w, sph);
+ OUT_AS2 (cmp, w, #%%hi8data(_end));
+ OUT_AS1 (sc, ); /* C == 0 -> hi8(edata) < sph */
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (sz, ); /* Z == 1 -> look at low byte */
+ OUT_AS1 (page,0f);
+ OUT_AS1 (jmp,0f); /* sp < edata, so raise stack fault */
+ OUT_AS2 (mov, w, spl);
+ OUT_AS2 (cmp, w, #%%lo8data(_end));
+ OUT_AS1 (sc,); /* C==1 -> lo8(edata) >= spl */
+ OUT_AS1 (page,1f);
+ OUT_AS1 (jmp,1f);
+ OUT_AS1 (0:,);
+ output_asm_insn ("push\t$ff", operands);
+ OUT_AS1 (system,);
+ OUT_AS1 (1:, );
+ prologue_size += 30;
+ }
+}
+
+/* Output function epilogue. */
+void
+function_epilogue (file, size)
+ FILE *file;
+ int size;
+{
+ int leaf_func_p;
+ int reg,savelimit;
+ int function_size;
+ rtx operands[2]; /* Dummy used by OUT_ASn */
+ int need_ret = 1;
+
+ /* Use this opportunity to reset the reorg flags! */
+ ip2k_reorg_in_progress = 0;
+ ip2k_reorg_completed = 0;
+ ip2k_reorg_split_dimode = 0;
+ ip2k_reorg_split_simode = 0;
+ ip2k_reorg_split_himode = 0;
+ ip2k_reorg_split_qimode = 0;
+ ip2k_reorg_merge_qimode = 0;
+
+ if (ip2k_naked_function_p (current_function_decl))
+ {
+ fprintf (file, "/* epilogue: naked */\n");
+ return;
+ }
+
+ leaf_func_p = leaf_function_p ();
+ function_size = (INSN_ADDRESSES (INSN_UID (get_last_insn ()))
+ - INSN_ADDRESSES (INSN_UID (get_insns ())));
+
+ epilogue_size = 0;
+ fprintf (file, "/* epilogue: frame size=%d */\n", size);
+
+ if (size)
+ {
+ operands[0] = GEN_INT (size);
+
+ switch (size & 0xff)
+ {
+ default:
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (add, spl, w);
+ epilogue_size += 4;
+ /* fall-thru */
+ case 0:
+ break;
+ case 1:
+ OUT_AS1 (inc, spl);
+ epilogue_size += 2;
+ }
+
+ switch (size & 0xff00)
+ {
+ default:
+ if ((size & 0xff) != ((size >> 8) & 0xff))
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (add, sph, w);
+ epilogue_size += 4;
+ /* fall-thru */
+ case 0:
+ break;
+ case 1:
+ OUT_AS1 (inc, sph);
+ epilogue_size += 2;
+ }
+ }
+
+ savelimit = (CHAIN_FRAMES) ? REG_FP : (REG_FP + 2);
+
+ for (reg = 0; reg < savelimit; reg++)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ fprintf (file, "\t" AS1 (pop,%s) "\n", reg_names[reg]);
+ prologue_size += 2;
+ }
+ }
+
+ if (CHAIN_FRAMES
+ && ! (current_function_pops_args
+ && current_function_args_size >= 2
+ && current_function_args_size < 0x100))
+ {
+ OUT_AS1 (pop, REG_FP);
+ OUT_AS1 (pop, REG_FP+1);
+ epilogue_size += 4;
+ }
+
+ if (! leaf_func_p)
+ {
+ if (current_function_pops_args
+ && current_function_args_size >= 2
+ && current_function_args_size < 0x100)
+ {
+ if (current_function_args_size == 2)
+ {
+ if (CHAIN_FRAMES)
+ {
+ OUT_AS1 (page, __fp_pop2_args_ret);
+ OUT_AS1 (jmp, __fp_pop2_args_ret);
+ }
+ else
+ {
+ OUT_AS1 (page, __pop2_args_ret);
+ OUT_AS1 (jmp, __pop2_args_ret);
+ }
+ epilogue_size += 4;
+ }
+ else
+ {
+ operands[0] = GEN_INT (current_function_args_size);
+ OUT_AS2 (mov, w, %L0);
+ if (CHAIN_FRAMES)
+ {
+ OUT_AS1 (page, __fp_pop_args_ret);
+ OUT_AS1 (jmp, __fp_pop_args_ret);
+ }
+ else
+ {
+ OUT_AS1 (page, __pop_args_ret);
+ OUT_AS1 (jmp, __pop_args_ret);
+ }
+ epilogue_size += 6;
+ }
+ need_ret = 0;
+ }
+ else
+ {
+ OUT_AS1 (pop, callh);
+ OUT_AS1 (pop, calll);
+ epilogue_size += 4;
+ }
+ }
+ else
+ {
+ if (current_function_pops_args
+ && current_function_args_size >= 2
+ && current_function_args_size < 0x100)
+ {
+ if (current_function_args_size == 2)
+ {
+ if (CHAIN_FRAMES)
+ {
+ OUT_AS1 (page, __leaf_fp_pop2_args_ret);
+ OUT_AS1 (jmp, __leaf_fp_pop2_args_ret);
+ epilogue_size += 4;
+ need_ret = 0;
+ }
+ }
+ else
+ {
+ operands[0] = GEN_INT (current_function_args_size);
+ if (CHAIN_FRAMES)
+ {
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS1 (page, __leaf_fp_pop_args_ret);
+ OUT_AS1 (jmp, __leaf_fp_pop_args_ret);
+ epilogue_size += 6;
+ need_ret = 0;
+ }
+ }
+ }
+ }
+
+ if (current_function_pops_args && current_function_args_size
+ && need_ret)
+ {
+ operands[0] = GEN_INT (current_function_args_size);
+
+ switch (current_function_args_size & 0xff)
+ {
+ default:
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (add, spl, w);
+ epilogue_size += 4;
+ /* fall-thru */
+
+ case 0:
+ break;
+
+ case 1:
+ OUT_AS1 (inc, spl);
+ epilogue_size += 2;
+ }
+
+ switch (current_function_args_size & 0xff00)
+ {
+ default:
+ if ((current_function_args_size & 0xff)
+ != ((current_function_args_size >> 8) & 0xff))
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (add, sph, w);
+ epilogue_size += 4;
+ /* fall-thru */
+
+ case 0:
+ break;
+
+ case 1:
+ OUT_AS1 (inc, sph);
+ epilogue_size += 2;
+ }
+ }
+
+ if (need_ret)
+ {
+ OUT_AS1 (ret,);
+ epilogue_size += 2;
+ }
+
+ fprintf (file, "/* epilogue end (size=%d) */\n", epilogue_size);
+ fprintf (file, "/* function %s size %d (%d) */\n", current_function_name,
+ prologue_size + function_size + epilogue_size, function_size);
+ commands_in_file += prologue_size + function_size + epilogue_size;
+ commands_in_prologues += prologue_size;
+ commands_in_epilogues += epilogue_size;
+}
+\f
+/* Return the difference between the registers after the function
+ prologue.
+
+ Stack Frame grows down:
+
+ ARGUMENTS
+ <------ AP ($102:$103)
+ RETURN PC (unless leaf function)
+ SAVEDFP (if needed)
+ <------ FP [HARD_FRAME_POINTER] ($FD:$FE)
+ SAVED REGS
+ <------ VFP [$100:$101]
+ STACK ALLOCATION
+ <------ SP ($6:$7) */
+int
+ip2k_init_elim_offset (from, to)
+ int from;
+ int to;
+{
+ int leaf_func_p = leaf_function_p ();
+ int no_saved_pc = leaf_func_p
+ || ip2k_naked_function_p (current_function_decl);
+ int offset;
+ int reg;
+ int reglimit;
+
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return get_frame_size () + 1;
+
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return (CHAIN_FRAMES ? 2 : 0) + (no_saved_pc ? 0 : 2);
+
+ /* Count all the registers we had to preserve. */
+
+ reglimit = CHAIN_FRAMES ? REG_FP : (REG_FP + 2);
+ for (offset = 0,reg = 0; reg < reglimit; ++reg)
+ {
+ if ((regs_ever_live[reg] && ! call_used_regs[reg]))
+ {
+ ++offset;
+ }
+ }
+
+ if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return -offset;
+
+ if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ /* Add in the stack-local variables. */
+ return offset + get_frame_size () + 1;
+
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ /* Add stack-locals plus saved FP and PC. */
+ return offset + get_frame_size () + 1
+ + (CHAIN_FRAMES ? 2 : 0) + (no_saved_pc ? 0 : 2);
+
+ abort (); /* Unanticipated elimination. */
+}
+
+/* Return nonzero if X (an RTX) is a legitimate memory address on the target
+ machine for a memory operand of mode MODE. */
+
+int
+legitimate_address_p (mode, x, strict)
+ enum machine_mode mode;
+ rtx x;
+ int strict;
+{
+ int off;
+
+ if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ /* IP allows indirection without offset - only okay if
+ we don't require access to multiple bytes. */
+ if (REGNO (x) == REG_IP)
+ return (GET_MODE_SIZE (mode) == 1) ? 'R' : 0;
+
+ /* We can indirect thru DP or SP register. */
+ if (strict ? REG_OK_FOR_BASE_STRICT_P (x)
+ : REG_OK_FOR_BASE_NOSTRICT_P (x))
+ return 'S';
+ break;
+
+ case PLUS:
+ /* Offsets from DP or SP are legal in the range 0..127 */
+ {
+ rtx op1, op2;
+
+ op1 = XEXP (x, 0);
+ op2 = XEXP (x, 1);
+
+ if (REG_P (op2) && ! REG_P (op1))
+ {
+ rtx tmp = op1;
+ op1 = op2;
+ op2 = tmp;
+ }
+
+ /* Don't let anything but R+I thru.. */
+ if (! REG_P (op1)
+ || REG_P (op2)
+ || GET_CODE (op2) != CONST_INT)
+ return 0;
+
+ switch (REGNO (op1))
+ {
+ case REG_DP: /* only 0..127 displacement */
+ case REG_SP:
+ off = 2 * GET_MODE_SIZE (mode);
+ if (! off)
+ off = 1;
+
+ if (INTVAL (op2) < 0 || INTVAL (op2) > (128 - off))
+ return 0; /* Positive must be small enough that after
+ splitting all pieces are addressed. */
+ return 'S'; /* Safe displacement. */
+
+ case REG_IP:
+ if (GET_MODE_SIZE (mode) <= 1 && INTVAL (op2) == 0)
+ return (GET_MODE_SIZE (mode) == 1) ? 'R' : 0;
+ return 0;
+
+ case REG_AP:
+ case REG_FP:
+ case REG_VFP:
+ default:
+ if (strict || ! REG_OK_FOR_BASE_NOSTRICT_P (op1))
+ return 0; /* Allow until reload. */
+
+ return 'S';
+ }
+ }
+ break;
+
+ case CONST:
+ case SYMBOL_REF:
+ /* We always allow references to things in code space. */
+ return is_regfile_address (x) ? 0 : 'C';
+
+ case LABEL_REF:
+ return 'L';
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Is ADDR mode dependent? */
+int
+ip2k_mode_dependent_address (addr)
+ rtx addr;
+{
+ switch (GET_CODE (addr))
+ {
+ case POST_INC:
+ case POST_DEC:
+ case PRE_INC:
+ case PRE_DEC:
+ return 1;
+
+ case REG:
+ return (REGNO (addr) == REG_IP); /* Can't do IP displaced addresses. */
+
+ default:
+ return 0; /* Assume no dependency. */
+ }
+}
+
+/* Attempts to replace X with a valid
+ memory address for an operand of mode MODE. */
+
+rtx
+legitimize_address (x, oldx, mode, scratch)
+ rtx x;
+ rtx oldx ATTRIBUTE_UNUSED;
+ rtx scratch;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ rtx reg;
+
+ /* You might think that we could split up a symbolic address by
+ adding the HIGH 8 bits and doing a displacement off the dp. But
+ because we only have 7 bits of offset, that doesn't actually
+ help. So only constant displacements are likely to obtain an
+ advantage. */
+
+ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (XEXP (x, 1)), 'K'))
+ {
+ int offset = INTVAL (XEXP (x, 1));
+
+ reg = scratch ? scratch : gen_reg_rtx (Pmode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ GEN_INT (offset & 0xffc0))));
+ x = gen_rtx_PLUS (Pmode, reg, GEN_INT (offset & 0x3f));
+ }
+
+ return x; /* We don't have any other tricks. */
+}
+\f
+/* Determine if X is a 'data' address or a code address. All static
+ data and stack variables reside in data memory. Only code is believed
+ to be in PRAM or FLASH. */
+int
+is_regfile_address (x)
+ rtx x;
+{
+ while (1)
+ switch (GET_CODE (x))
+ {
+ case SYMBOL_REF:
+ return ! SYMBOL_REF_FLAG (x); /* Declared as function. */
+ case CONST:
+ case PLUS:
+ x = XEXP (x, 0);
+ break;
+ case CONST_INT:
+ case REG:
+ case SUBREG:
+ return 1;
+ case LABEL_REF:
+ return 0;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Output ADDR to FILE as address. */
+
+void
+print_operand_address (file, addr)
+ FILE *file;
+ rtx addr;
+{
+ switch (GET_CODE (addr))
+ {
+ case SUBREG:
+ addr = alter_subreg (&addr);
+ /* fall-thru */
+
+ case REG:
+ fprintf (file, "(%s)",
+ REGNO (addr) == REG_DP ? "DP"
+ : REGNO (addr) == REG_SP ? "SP"
+ : REGNO (addr) == REG_IP ? "IP"
+ : REGNO (addr) == REG_VFP ? "VFP" /* Should never see this */
+ : REGNO (addr) == REG_AP ? "AP" /* or this, either. */
+ : reg_names[REGNO (addr)]);
+ break;
+
+ case PRE_DEC:
+ case POST_INC:
+ abort ();
+ break;
+
+ case CONST:
+ addr = XEXP (addr, 0);
+ print_operand_address (file, XEXP (addr, 0));
+ fprintf (file, "+");
+ print_operand_address (file, XEXP (addr, 1));
+ return;
+
+ case LO_SUM:
+ if (is_regfile_address (XEXP (addr, 1)))
+ fprintf (file, "%%lo8data(");
+ else
+ fprintf (file, "%%lo8insn(");
+ print_operand_address (file, XEXP (addr, 1));
+ fprintf (file, ")");
+ print_operand_address (file, XEXP (addr, 0));
+ break;
+
+ case PLUS: /* Ought to be stack or dp references. */
+ if (XEXP (addr, 1) == const0_rtx
+ && GET_CODE (XEXP (addr, 0)) == PLUS)
+ {
+ print_operand_address (file, XEXP (addr, 0));
+ return;
+ }
+
+ if (! REG_P (XEXP (addr, 0)) || REGNO (XEXP (addr, 0)) != REG_IP)
+ print_operand_address (file, XEXP (addr, 1)); /* const */
+ print_operand_address (file, XEXP (addr, 0)); /* (reg) */
+ break;
+
+ case HIGH:
+ if (is_regfile_address (XEXP (addr, 0)))
+ fprintf (file, "%%hi8data(");
+ else
+ fprintf (file, "%%hi8insn(");
+ output_addr_const (file, XEXP (addr, 0));
+ fprintf (file, ")");
+ break;
+
+ default:
+ output_addr_const (file, addr);
+ }
+}
+
+
+/* Output X as assembler operand to file FILE. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ int abcd = 0;
+ unsigned long value;
+
+ switch (code)
+ {
+ case '<': /* Push */
+ ip2k_stack_delta++;
+ return;
+
+ case '>': /* Pop */
+ ip2k_stack_delta--;
+ return;
+
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ abcd = code - 'A';
+ break;
+
+ case 'H':
+ abcd = 0;
+ break;
+
+ case 'L':
+ abcd = 1;
+ break;
+
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ case 'W':
+ case 'X':
+ case 'Y':
+ case 'Z':
+ abcd = code - 'S';
+
+ default:
+ break;
+ }
+
+ if (ip2k_short_operand (x, GET_MODE (x))
+ && ip2k_address_uses_reg_p (x, REG_SP))
+ /* An SP-relative address needs to account for interior stack
+ pushes that reload didn't know about when it calculated the
+ stack offset. */
+ abcd += ip2k_stack_delta;
+
+ switch (GET_CODE (x))
+ {
+ case SUBREG:
+ x = alter_subreg (&x);
+ /* fall-thru */
+
+ case REG:
+ fprintf (file, reg_names[true_regnum (x) + abcd]);
+ break;
+
+ case CONST_INT:
+ switch (code)
+ {
+ case 'x':
+ fprintf (file, "$%x", INTVAL (x) & 0xffff);
+ break;
+
+ case 'b':
+ fprintf (file, "%d", INTVAL (x)); /* bit selector */
+ break;
+
+ case 'e': /* "1 << n" - e.g. "exp" */
+ fprintf (file, "#%d", 1 << INTVAL (x));
+ break;
+
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ value = INTVAL (x);
+ value >>= 8 * (3 - abcd);
+ value &= 0xff;
+
+ fprintf (file, "#%ld", value);
+ break;
+
+ case 'H':
+ fprintf (file, "#%d", (INTVAL (x) >> 8) & 0xff);
+ break;
+
+ case 'L':
+ fprintf (file, "#%d", INTVAL (x) & 0xff);
+ break;
+
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ case 'W':
+ case 'X':
+ case 'Y':
+ case 'Z':
+ value = ((unsigned long long)INTVAL (x)) >> (8 * (7 - abcd)) & 0xff;
+ fprintf (file, "#%ld", value);
+ break;
+
+ default:
+ fprintf (file, "#%d", INTVAL (x));
+ }
+ break;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CODE_LABEL:
+ case CONST:
+ switch (code)
+ {
+ case 'A':
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ case 'W':
+ case 'X':
+ case 'Y':
+ case 'Z':
+ abort (); /* Probably an error. */
+ break;
+
+ case 'H':
+ fprintf (file, "#%s(",
+ is_regfile_address (x) ? "%hi8data"
+ : "%hi8insn");
+ print_operand_address (file, x);
+ fputc (')', file);
+ break;
+
+ case 'L':
+ fprintf (file, "#%s(",
+ is_regfile_address (x) ? "%lo8data"
+ : "%lo8insn");
+ print_operand_address (file, x);
+ fputc (')', file);
+ break;
+
+ default:
+ print_operand_address (file, x);
+ }
+ break;
+
+ case MEM:
+ {
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == SUBREG)
+ addr = alter_subreg (&x);
+
+ if (CONSTANT_P (addr) && abcd)
+ {
+ fputc ('(', file);
+ print_operand_address (file, addr);
+ fprintf (file, ")+%d", abcd);
+ }
+ else if (abcd)
+ {
+ switch (GET_CODE (addr))
+ {
+ case PLUS:
+ abcd += INTVAL (XEXP (addr, 1));
+
+ /* Worry about (plus (plus (reg DP) (const_int 10))
+ (const_int 0)) */
+ if (GET_CODE (XEXP (addr, 0)) == PLUS)
+ {
+ addr = XEXP (addr, 0);
+ abcd += INTVAL (XEXP (addr, 1));
+ }
+
+ fprintf (file, "%d", abcd);
+ print_operand_address (file, XEXP (addr, 0));
+ break;
+
+ case REG:
+ default:
+ fprintf (file, "%d", abcd);
+ print_operand_address (file, addr);
+ }
+ }
+ else if (GET_CODE (addr) == REG
+ && (REGNO (addr) == REG_DP || REGNO (addr) == REG_SP))
+ {
+ fprintf (file, "0");
+ print_operand_address (file, addr);
+ }
+ else
+ print_operand_address (file, addr);
+ }
+ break;
+
+ case CONST_DOUBLE:
+ /* Is this an integer or a floating point value? */
+ if (GET_MODE (x) == VOIDmode)
+ {
+ switch (code)
+ {
+ case 'S':
+ case 'T':
+ case 'U':
+ case 'V':
+ value = CONST_DOUBLE_HIGH (x);
+ value >>= 8 * (3 - abcd);
+ value &= 0xff;
+
+ fprintf (file, "#%ld", value);
+ break;
+
+ case 'W':
+ case 'X':
+ case 'Y':
+ case 'Z':
+ value = CONST_DOUBLE_LOW (x);
+ value >>= 8 * (7 - abcd);
+ value &= 0xff;
+
+ fprintf (file, "#%ld", value);
+ break;
+ }
+
+ }
+ else
+ {
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, value);
+ asm_fprintf (file, "0x%x", value);
+ }
+ break;
+
+ default:
+ fatal_insn ("bad operand", x);
+ }
+}
+\f
+/* Remember the operands for the compare. */
+const char *
+ip2k_set_compare (x, y)
+ rtx x;
+ rtx y;
+{
+ /* If we're doing a DImode compare then force any CONST_INT second
+ operand to be CONST_DOUBLE. */
+ if (GET_MODE (x) == DImode && GET_CODE (y) == CONST_INT)
+ {
+ rtx value;
+
+ value = rtx_alloc (CONST_DOUBLE);
+ PUT_MODE (value, VOIDmode);
+
+ CONST_DOUBLE_LOW (value) = INTVAL (y);
+ CONST_DOUBLE_HIGH (value) = INTVAL (y) > 0 ? 0 : -1;
+
+ for (i = 2; i < (sizeof CONST_DOUBLE_FORMAT - 1); i++)
+ XWINT (value, i) = 0;
+
+ y = lookup_const_double (value);
+ }
+
+ ip2k_compare_operands[0] = x;
+ ip2k_compare_operands[1] = y;
+ return "";
+}
+
+/* Emit the code for sCOND instructions. */
+const char *
+ip2k_gen_sCOND (insn, code, dest)
+ rtx insn ATTRIBUTE_UNUSED;
+ enum rtx_code code;
+ rtx dest;
+{
+#define operands ip2k_compare_operands
+ enum machine_mode mode;
+
+ operands[2] = dest;
+
+ mode = GET_MODE (operands[0]);
+ if ((mode != QImode) && (mode != HImode)
+ && (mode != SImode) && (mode != DImode))
+ mode = GET_MODE (operands[1]);
+
+ /* We have a fast path for a specific type of QImode compare. We ought
+ to extend this for larger cases too but that wins less frequently and
+ introduces a lot of complexity. */
+ if (mode == QImode
+ && !rtx_equal_p (operands[0], operands[2])
+ && !rtx_equal_p (operands[1], operands[2])
+ && (! REG_P (operands[2])
+ || (ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 1)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 1))))
+ {
+ OUT_AS1 (clr, %2);
+ if (immediate_operand (operands[1], QImode)
+ && ((INTVAL (operands[1]) & 0xff) == 0xff))
+ {
+ if (code == EQ)
+ OUT_AS2 (incsnz, w, %0);
+ else
+ OUT_AS2 (incsz, w, %0);
+ }
+ else if (immediate_operand (operands[1], QImode)
+ && ((INTVAL (operands[1]) & 0xff) == 0x01))
+ {
+ if (code == EQ)
+ OUT_AS2 (decsnz, w, %0);
+ else
+ OUT_AS2 (decsz, w, %0);
+ }
+ else if (ip2k_compare_operands[1] == const0_rtx)
+ {
+ OUT_AS2 (mov, w, %0);
+ if (code == EQ)
+ OUT_AS1 (snz,);
+ else
+ OUT_AS1 (sz,);
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %0);
+ if (code == EQ)
+ OUT_AS2 (csne, w, %1);
+ else
+ OUT_AS2 (cse, w, %1);
+ }
+ OUT_AS1 (inc, %2);
+ }
+ else
+ {
+ if (ip2k_compare_operands[1] == const0_rtx)
+ {
+ switch (mode)
+ {
+ case QImode:
+ OUT_AS2 (mov, w, %0);
+ break;
+
+ case HImode:
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (or, w, %L0);
+ break;
+
+ case SImode:
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ break;
+
+ case DImode:
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ else
+ {
+ switch (mode)
+ {
+ case QImode:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (cmp, w, %0);
+ break;
+
+ case HImode:
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (cmp, w, %H0);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (cmp, w, %L0);
+ OUT_AS1 (2:,);
+ break;
+
+ case SImode:
+ if (code == EQ)
+ {
+ OUT_AS2 (mov, w, #1);
+ OUT_AS2 (mov, mulh, w);
+ }
+ else
+ OUT_AS1 (clr, mulh);
+ OUT_AS2 (mov, w, %A1);
+ OUT_AS2 (cse, w, %A0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (cse, w, %B0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %C1);
+ OUT_AS2 (cse, w, %C0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (cse, w, %D0);
+ OUT_AS1 (2:,);
+ if (code == EQ)
+ OUT_AS1 (dec, mulh);
+ else
+ OUT_AS1 (inc, mulh);
+ OUT_AS2 (mov, w, mulh);
+ OUT_AS2 (mov, %2, w);
+ return "";
+
+ case DImode:
+ if (code == EQ)
+ {
+ OUT_AS2 (mov, w, #1);
+ OUT_AS2 (mov, mulh, w);
+ }
+ else
+ OUT_AS1 (clr, mulh);
+ OUT_AS2 (mov, w, %S1);
+ OUT_AS2 (cse, w, %S0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %T1);
+ OUT_AS2 (cse, w, %T0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %U1);
+ OUT_AS2 (cse, w, %U0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %V1);
+ OUT_AS2 (cse, w, %V0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %W1);
+ OUT_AS2 (cse, w, %W0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %X1);
+ OUT_AS2 (cse, w, %X0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %Y1);
+ OUT_AS2 (cse, w, %Y0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %Z1);
+ OUT_AS2 (cse, w, %Z0);
+ OUT_AS1 (2:,);
+ if (code == EQ)
+ OUT_AS1 (dec, mulh);
+ else
+ OUT_AS1 (inc, mulh);
+ OUT_AS2 (mov, w, mulh);
+ OUT_AS2 (mov, %2, w);
+ return "";
+
+ default:
+ abort ();
+ }
+ }
+ OUT_AS2 (mov, w, #0);
+ if (code == EQ)
+ OUT_AS1 (snz,);
+ else
+ OUT_AS1 (sz,);
+ OUT_AS1 (inc, wreg);
+ OUT_AS2 (mov, %2, w);
+ }
+
+ return "";
+#undef operands
+}
+
+const char *
+ip2k_gen_signed_comp_branch (insn, code, label)
+ rtx insn;
+ enum rtx_code code;
+ rtx label;
+{
+#define operands ip2k_compare_operands
+ enum machine_mode mode;
+ int can_use_skip = 0;
+ rtx ninsn;
+
+ operands[2] = label;
+
+ mode = GET_MODE (operands[0]);
+ if ((mode != QImode) && (mode != HImode)
+ && (mode != SImode) && (mode != DImode))
+ mode = GET_MODE (operands[1]);
+
+ /* Look for situations where we can just skip the next instruction instead
+ of skipping and then branching! */
+ ninsn = next_real_insn (insn);
+ if (ninsn
+ && (recog_memoized (ninsn) >= 0)
+ && get_attr_skip (ninsn) == SKIP_YES)
+ {
+ rtx skip_tgt = next_nonnote_insn (next_real_insn (insn));
+
+ /* The first situation is where the target of the jump is one insn
+ after the jump insn and the insn being jumped is only one machine
+ opcode long. */
+ if (label == skip_tgt)
+ can_use_skip = 1;
+ else
+ {
+ /* If our skip target is in fact a code label then we ignore the
+ label and move onto the next useful instruction. Nothing we do
+ here has any effect on the use of skipping instructions. */
+ if (GET_CODE (skip_tgt) == CODE_LABEL)
+ skip_tgt = next_nonnote_insn (skip_tgt);
+
+ /* The second situation is where we have something of the form:
+
+ test_condition
+ skip_conditional
+ page/jump label
+
+ optional_label (this may or may not exist):
+ skippable_insn
+ page/jump label
+
+ In this case we can eliminate the first "page/jump label". */
+ if (GET_CODE (skip_tgt) == JUMP_INSN)
+ {
+ rtx set = single_set (skip_tgt);
+ if (GET_CODE (XEXP (set, 0)) == PC
+ && GET_CODE (XEXP (set, 1)) == LABEL_REF
+ && label == JUMP_LABEL (skip_tgt))
+ can_use_skip = 2;
+ }
+ }
+ }
+
+ /* gcc is a little braindead and does some rather stateful things while
+ inspecting attributes - we have to put this state back to what it's
+ supposed to be. */
+ extract_constrain_insn_cached (insn);
+
+ if (ip2k_compare_operands[1] == const0_rtx) /* These are easier. */
+ {
+ switch (code)
+ {
+ case LT:
+ if (can_use_skip)
+ {
+ OUT_AS2 (sb, %0, 7);
+ }
+ else
+ {
+ OUT_AS2 (snb, %0, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GT:
+ switch (mode)
+ {
+ case DImode:
+ OUT_AS2 (rl, w, %S0);
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0);
+ OUT_AS1 (snz, );
+ OUT_AS2 (setb, status, 0);
+ OUT_AS2 (sb, status, 0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case SImode:
+ OUT_AS2 (rl, w, %A0);
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ OUT_AS1 (snz, );
+ OUT_AS2 (setb, status, 0);
+ OUT_AS2 (sb, status, 0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case HImode:
+ OUT_AS2 (rl, w, %H0);
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (or, w, %L0);
+ OUT_AS1 (snz, );
+ OUT_AS2 (setb, status, 0);
+ OUT_AS2 (sb, status, 0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case QImode:
+ OUT_AS2 (mov, w, %0); /* Will just do "sb w, 7". */
+ OUT_AS1 (snz, );
+ OUT_AS2 (setb, wreg, 7);
+ OUT_AS2 (sb, wreg, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case LE:
+ switch (mode)
+ {
+ case DImode:
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0); /* Z is correct. */
+ OUT_AS1 (sz, );
+ OUT_AS2 (snb, %S0, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case SImode:
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0); /* Z is correct. */
+ OUT_AS1 (sz, );
+ OUT_AS2 (snb, %A0, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case HImode:
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (or, w, %L0);
+ OUT_AS1 (sz, );
+ OUT_AS2 (snb, %H0, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case QImode:
+ OUT_AS2 (mov, w, %0); /* Will just do "sb w, 7". */
+ OUT_AS1 (sz, );
+ OUT_AS2 (snb, wreg, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case GE:
+ if (can_use_skip)
+ {
+ OUT_AS2 (snb, %0, 7);
+ }
+ else
+ {
+ OUT_AS2 (sb, %0, 7);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ return "";
+ }
+
+ /* signed compares are out of line because we can't get
+ the hardware to compute the overflow for us. */
+
+ switch (mode)
+ {
+ case QImode:
+ OUT_AS1 (push, %1%<);
+ OUT_AS1 (push, %0%>);
+ OUT_AS1 (page, __cmpqi2);
+ OUT_AS1 (call, __cmpqi2);
+ break;
+
+ case HImode:
+ OUT_AS1 (push, %L1%<);
+ OUT_AS1 (push, %H1%<);
+ OUT_AS1 (push, %L0%<);
+ OUT_AS1 (push, %H0%>%>%>);
+ OUT_AS1 (page, __cmphi2);
+ OUT_AS1 (call, __cmphi2);
+ break;
+
+ case SImode:
+ OUT_AS1 (push, %D1%<);
+ OUT_AS1 (push, %C1%<);
+ OUT_AS1 (push, %B1%<);
+ OUT_AS1 (push, %A1%<);
+ OUT_AS1 (push, %D0%<);
+ OUT_AS1 (push, %C0%<);
+ OUT_AS1 (push, %B0%<);
+ OUT_AS1 (push, %A0%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpsi2);
+ OUT_AS1 (call, __cmpsi2);
+ break;
+
+ case DImode:
+ if (GET_CODE (operands[0]) == MEM
+ && true_regnum (XEXP (operands[0], 0)) == REG_DP)
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2_dp);
+ OUT_AS1 (call, __cmpdi2_dp);
+ }
+ else
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%<);
+ OUT_AS1 (push, %Z0%<);
+ OUT_AS1 (push, %Y0%<);
+ OUT_AS1 (push, %X0%<);
+ OUT_AS1 (push, %W0%<);
+ OUT_AS1 (push, %V0%<);
+ OUT_AS1 (push, %U0%<);
+ OUT_AS1 (push, %T0%<);
+ OUT_AS1 (push, %S0%>%>%>%>%>%>%>%>%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2);
+ OUT_AS1 (call, __cmpdi2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ switch (code)
+ {
+ case LT:
+ if (can_use_skip)
+ {
+ OUT_AS2 (cse, w, #0);
+ }
+ else
+ {
+ OUT_AS2 (csne, w, #0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GT:
+ if (can_use_skip)
+ {
+ OUT_AS2 (cse, w, #2);
+ }
+ else
+ {
+ OUT_AS2 (csne, w, #2);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LE:
+ if (can_use_skip)
+ {
+ OUT_AS2 (snb, wreg, 1);
+ }
+ else
+ {
+ OUT_AS2 (sb, wreg, 1);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GE:
+ if (can_use_skip)
+ {
+ OUT_AS2 (csne, w, #0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, #0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ return "";
+#undef operands
+}
+
+const char *
+ip2k_gen_unsigned_comp_branch (insn, code, label)
+ rtx insn;
+ enum rtx_code code;
+ rtx label;
+{
+#define operands ip2k_compare_operands
+ enum machine_mode mode;
+ int imm_sub = 0;
+ int imm_cmp = 0;
+ int can_use_skip = 0;
+ rtx ninsn;
+
+ operands[2] = label;
+
+ mode = GET_MODE (operands[0]);
+ if ((mode != QImode) && (mode != HImode) && (mode != SImode)
+ && (mode != DImode))
+ {
+ mode = GET_MODE (operands[1]);
+ }
+
+ /* Look for situations where we can just skip the next instruction instead
+ of skipping and then branching! */
+ ninsn = next_real_insn (insn);
+ if (ninsn
+ && (recog_memoized (ninsn) >= 0)
+ && get_attr_skip (ninsn) == SKIP_YES)
+ {
+ rtx skip_tgt = next_nonnote_insn (next_real_insn (insn));
+
+ /* The first situation is where the target of the jump is one insn
+ after the jump insn and the insn being jumped is only one machine
+ opcode long. */
+ if (label == skip_tgt)
+ can_use_skip = 1;
+ else
+ {
+ /* If our skip target is in fact a code label then we ignore the
+ label and move onto the next useful instruction. Nothing we do
+ here has any effect on the use of skipping instructions. */
+ if (GET_CODE (skip_tgt) == CODE_LABEL)
+ skip_tgt = next_nonnote_insn (skip_tgt);
+
+ /* The second situation is where we have something of the form:
+
+ test_condition
+ skip_conditional
+ page/jump label
+
+ optional_label (this may or may not exist):
+ skippable_insn
+ page/jump label
+
+ In this case we can eliminate the first "page/jump label". */
+ if (GET_CODE (skip_tgt) == JUMP_INSN)
+ {
+ rtx set = single_set (skip_tgt);
+ if (GET_CODE (XEXP (set, 0)) == PC
+ && GET_CODE (XEXP (set, 1)) == LABEL_REF
+ && label == JUMP_LABEL (skip_tgt))
+ can_use_skip = 2;
+ }
+ }
+ }
+
+ /* gcc is a little braindead and does some rather stateful things while
+ inspecting attributes - we have to put this state back to what it's
+ supposed to be. */
+ extract_constrain_insn_cached (insn);
+
+ if (ip2k_compare_operands[1] == const0_rtx)
+ {
+ switch (code)
+ {
+ case LEU:
+ code = EQ; /* Nothing is LTU 0. */
+ goto zero;
+
+ case GTU:
+ code = NE; /* Anything non-zero is GTU. */
+ /* fall-thru */
+
+ case EQ:
+ case NE: /* Test all the bits, result in
+ Z AND WREG. */
+ zero:
+ switch (mode)
+ {
+ case DImode:
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0);
+ break;
+
+ case SImode:
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ break;
+
+ case HImode:
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (or, w, %L0);
+ break;
+
+ case QImode:
+ OUT_AS2 (mov, w, %0);
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (can_use_skip)
+ {
+ if (code == EQ)
+ OUT_AS1 (sz, );
+ else
+ OUT_AS1 (snz, );
+ }
+ else
+ {
+ if (code == EQ)
+ OUT_AS1 (snz,);
+ else
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GEU:
+ /* Always succeed. */
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case LTU:
+ /* Always fail. */
+ break;
+
+ default:
+ abort ();
+ }
+ return "";
+ }
+
+ /* Look at whether we have a constant as one of our operands. If we do
+ and it's in the position that we use to subtract from during our
+ normal optimized comparison concept then we have to shuffle things
+ around! */
+ if (mode != QImode)
+ {
+ if ((immediate_operand (operands[1], GET_MODE (operands[1]))
+ && ((code == LEU) || (code == GTU)))
+ || (immediate_operand (operands[0], GET_MODE (operands[0]))
+ && ((code == LTU) || (code == GEU))))
+ {
+ imm_sub = 1;
+ }
+ }
+
+ /* Same as above - look if we have a constant that we can compare
+ for equality or non-equality. If we know this then we can look
+ for common value eliminations. Note that we want to ensure that
+ any immediate value is operand 1 to simplify the code later! */
+ if ((code == EQ) || (code == NE))
+ {
+ imm_cmp = immediate_operand (operands[1], GET_MODE (operands[1]));
+ if (! imm_cmp)
+ {
+ imm_cmp = immediate_operand (operands[0], GET_MODE (operands[0]));
+ if (imm_cmp)
+ {
+ rtx tmp = operands[1];
+ operands[1] = operands[0];
+ operands[0] = tmp;
+ }
+ }
+ }
+
+ switch (mode)
+ {
+ case QImode:
+ switch (code)
+ {
+ case EQ:
+ if (imm_cmp && ((INTVAL (operands[1]) & 0xff) == 0xff))
+ OUT_AS2 (incsnz, w, %0);
+ else if (imm_cmp && ((INTVAL (operands[1]) & 0xff) == 0x01))
+ OUT_AS2 (decsnz, w, %0);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (csne, w, %0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case NE:
+ if (imm_cmp && ((INTVAL (operands[1]) & 0xff) == 0xff))
+ OUT_AS2 (incsz, w, %0);
+ else if (imm_cmp && ((INTVAL (operands[1]) & 0xff) == 0x01))
+ OUT_AS2 (decsz, w, %0);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (cse, w, %0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case GTU:
+ OUT_AS2 (mov, w, %0);
+ OUT_AS2 (cmp, w, %1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case GEU:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (cmp, w, %0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case LTU:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (cmp, w, %0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ case LEU:
+ OUT_AS2 (mov, w, %0);
+ OUT_AS2 (cmp, w, %1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case HImode:
+ switch (code)
+ {
+ case EQ:
+ {
+ unsigned char h = 0, l = 1;
+
+ if (imm_cmp)
+ {
+ h = (INTVAL (operands[1]) >> 8) & 0xff;
+ l = INTVAL (operands[1]) & 0xff;
+
+ if ((h == 0xff) && (l == 0xff))
+ {
+ /* We should be able to do the following, but the
+ IP2k simulator doesn't like it and we get a load
+ of failures in gcc-c-torture. */
+ OUT_AS2 (incsnz, w, %L0);
+ OUT_AS2 (incsz, w, %H0);
+/* OUT_AS1 (skip,); Should have this */
+ OUT_AS1 (page, 1f);/* Shouldn't need this! */
+ OUT_AS1 (jmp, 1f); /* Shouldn't need this either. */
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS1 (1:,);
+ break;
+ }
+ else if (h == 0)
+ {
+ if (l == 1)
+ OUT_AS2 (dec, w, %L0);
+ else
+ {
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (sub, w, %L1);
+ }
+ OUT_AS2 (or, w, %H0);
+ OUT_AS1 (snz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+ }
+ else if (l == 0)
+ {
+ if (h == 1)
+ OUT_AS2 (dec, w, %H0);
+ else
+ {
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (sub, w, %H1);
+ }
+ OUT_AS2 (or, w, %L0);
+ OUT_AS1 (snz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+ }
+ }
+
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (cse, w, %H0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ if (! imm_cmp || (h != l))
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (csne, w, %L0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS1 (2:,);
+ }
+ break;
+
+ case NE:
+ {
+ unsigned char h = 0, l = 1;
+
+ if (imm_cmp)
+ {
+ h = (INTVAL (operands[1]) >> 8) & 0xff;
+ l = INTVAL (operands[1]) & 0xff;
+
+ if ((h == 0xff) && (l == 0xff))
+ {
+ OUT_AS2 (incsnz, w, %L0);
+ OUT_AS2 (incsz, w, %H0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+ }
+ else if (h == 0)
+ {
+ if (l == 1)
+ OUT_AS2 (dec, w, %L0);
+ else
+ {
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (sub, w, %L1);
+ }
+ OUT_AS2 (or, w, %H0);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+ }
+ else if (l == 0)
+ {
+ if (h == 1)
+ OUT_AS2 (dec, w, %H0);
+ else
+ {
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (sub, w, %H1);
+ }
+ OUT_AS2 (or, w, %L0);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ break;
+ }
+ }
+
+ OUT_AS2 (mov, w, %H1);
+ if (imm_cmp && (h == l))
+ {
+ OUT_AS2 (csne, w, %H0);
+ OUT_AS2 (cse, w, %L0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %H0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (cse, w, %L0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GTU:
+ if (imm_sub)
+ {
+ /* > 0xffff never suceeds! */
+ if ((INTVAL (operands[1]) & 0xffff) != 0xffff)
+ {
+ operands[3] = GEN_INT (INTVAL (operands[1]) + 1);
+ OUT_AS2 (mov, w, %L3);
+ OUT_AS2 (sub, w, %L0);
+ OUT_AS2 (mov, w, %H3);
+ OUT_AS2 (subc, w, %H0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (sub, w, %L1);
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (subc, w, %H1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GEU:
+ if (imm_sub)
+ {
+ if (INTVAL (operands[0]) == 0)
+ {
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (or, w, %L1);
+ OUT_AS1 (snz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[0]) - 1);
+ OUT_AS2 (mov, w, %L3);
+ OUT_AS2 (sub, w, %L1);
+ OUT_AS2 (mov, w, %H3);
+ OUT_AS2 (subc, w, %H1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (sub, w, %L0);
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (subc, w, %H0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LTU:
+ if (imm_sub)
+ {
+ if (INTVAL (operands[0]) == 0)
+ {
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (or, w, %L1);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[0]) - 1);
+ OUT_AS2 (mov, w, %L3);
+ OUT_AS2 (sub, w, %L1);
+ OUT_AS2 (mov, w, %H3);
+ OUT_AS2 (subc, w, %H1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (sub, w, %L0);
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (subc, w, %H0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LEU:
+ if (imm_sub)
+ {
+ if ((INTVAL (operands[1]) & 0xffff) == 0xffff)
+ {
+ /* <= 0xffff always suceeds. */
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[1]) + 1);
+ OUT_AS2 (mov, w, %L3);
+ OUT_AS2 (sub, w, %L0);
+ OUT_AS2 (mov, w, %H3);
+ OUT_AS2 (subc, w, %H0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS2 (sub, w, %L1);
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (subc, w, %H1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case SImode:
+ switch (code)
+ {
+ case EQ:
+ {
+ unsigned char a = 0, b = 1, c = 2, d = 3;
+
+ if (imm_cmp)
+ {
+ a = (INTVAL (operands[1]) >> 24) & 0xff;
+ b = (INTVAL (operands[1]) >> 16) & 0xff;
+ c = (INTVAL (operands[1]) >> 8) & 0xff;
+ d = INTVAL (operands[1]) & 0xff;
+ }
+
+ OUT_AS2 (mov, w, %A1);
+ if (imm_cmp && (b == a))
+ {
+ OUT_AS2 (csne, w, %A0);
+ OUT_AS2 (cse, w, %B0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %A0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (cse, w, %B0);
+ }
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ if (! imm_cmp || (c != b))
+ OUT_AS2 (mov, w, %C1);
+ OUT_AS2 (cse, w, %C0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ if (! imm_cmp || (d != c))
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (csne, w, %D0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS1 (2:,);
+ }
+ break;
+
+ case NE:
+ {
+ unsigned char a = 0, b = 1, c = 2, d = 3;
+
+ if (imm_cmp)
+ {
+ a = (INTVAL (operands[1]) >> 24) & 0xff;
+ b = (INTVAL (operands[1]) >> 16) & 0xff;
+ c = (INTVAL (operands[1]) >> 8) & 0xff;
+ d = INTVAL (operands[1]) & 0xff;
+ }
+
+ OUT_AS2 (mov, w, %A1);
+ if (imm_cmp && (b == a))
+ {
+ OUT_AS2 (csne, w, %A0);
+ OUT_AS2 (cse, w, %B0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %A0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (cse, w, %B0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ if (! imm_cmp || (c != b))
+ OUT_AS2 (mov, w, %C1);
+ if (imm_cmp && (d == c))
+ {
+ OUT_AS2 (csne, w, %C0);
+ OUT_AS2 (cse, w, %D0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %C0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (cse, w, %D0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GTU:
+ if (imm_sub)
+ {
+ /* > 0xffffffff never suceeds! */
+ if ((unsigned HOST_WIDE_INT)(INTVAL (operands[1]) & 0xffffffff)
+ != 0xffffffff)
+ {
+ operands[3] = GEN_INT (INTVAL (operands[1]) + 1);
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %D0);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %C0);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %B0);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %A0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %D0);
+ OUT_AS2 (sub, w, %D1);
+ OUT_AS2 (mov, w, %C0);
+ OUT_AS2 (subc, w, %C1);
+ OUT_AS2 (mov, w, %B0);
+ OUT_AS2 (subc, w, %B1);
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (subc, w, %A1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GEU:
+ if (imm_sub)
+ {
+ if (INTVAL (operands[0]) == 0)
+ {
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ OUT_AS1 (snz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[0]) - 1);
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %D1);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %C1);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %B1);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %A1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (sub, w, %D0);
+ OUT_AS2 (mov, w, %C1);
+ OUT_AS2 (subc, w, %C0);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (subc, w, %B0);
+ OUT_AS2 (mov, w, %A1);
+ OUT_AS2 (subc, w, %A0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LTU:
+ if (imm_sub)
+ {
+ if (INTVAL (operands[0]) == 0)
+ {
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[0]) - 1);
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %D1);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %C1);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %B1);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %A1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (sub, w, %D0);
+ OUT_AS2 (mov, w, %C1);
+ OUT_AS2 (subc, w, %C0);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (subc, w, %B0);
+ OUT_AS2 (mov, w, %A1);
+ OUT_AS2 (subc, w, %A0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LEU:
+ if (imm_sub)
+ {
+ if ((unsigned HOST_WIDE_INT)(INTVAL (operands[1]) & 0xffffffff)
+ == 0xffffffff)
+ {
+ /* <= 0xffffffff always suceeds. */
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[1]) + 1);
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %D0);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %C0);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %B0);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %A0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %D0);
+ OUT_AS2 (sub, w, %D1);
+ OUT_AS2 (mov, w, %C0);
+ OUT_AS2 (subc, w, %C1);
+ OUT_AS2 (mov, w, %B0);
+ OUT_AS2 (subc, w, %B1);
+ OUT_AS2 (mov, w, %A0);
+ OUT_AS2 (subc, w, %A1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ case DImode:
+ switch (code)
+ {
+ case EQ:
+ {
+ unsigned char s = 0, t = 1, u = 2, v = 3;
+ unsigned char w = 4, x = 5, y = 6, z = 7;
+ if (optimize_size)
+ {
+ if (GET_CODE (operands[0]) == MEM
+ && true_regnum (XEXP (operands[0], 0)) == REG_DP)
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2_dp);
+ OUT_AS1 (call, __cmpdi2_dp);
+ OUT_AS2 (csne, w, #1);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%<);
+ OUT_AS1 (push, %Z0%<);
+ OUT_AS1 (push, %Y0%<);
+ OUT_AS1 (push, %X0%<);
+ OUT_AS1 (push, %W0%<);
+ OUT_AS1 (push, %V0%<);
+ OUT_AS1 (push, %U0%<);
+ OUT_AS1 (push, %T0%<);
+ OUT_AS1 (push, %S0%>%>%>%>%>%>%>%>%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2);
+ OUT_AS1 (call, __cmpdi2);
+ OUT_AS2 (csne, w, #1);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ if (imm_cmp)
+ {
+ s = (CONST_DOUBLE_HIGH (operands[1]) >> 24) & 0xff;
+ t = (CONST_DOUBLE_HIGH (operands[1]) >> 16) & 0xff;
+ u = (CONST_DOUBLE_HIGH (operands[1]) >> 8) & 0xff;
+ v = CONST_DOUBLE_HIGH (operands[1]) & 0xff;
+ w = (CONST_DOUBLE_LOW (operands[1]) >> 24) & 0xff;
+ x = (CONST_DOUBLE_LOW (operands[1]) >> 16) & 0xff;
+ y = (CONST_DOUBLE_LOW (operands[1]) >> 8) & 0xff;
+ z = CONST_DOUBLE_LOW (operands[1]) & 0xff;
+ }
+
+ OUT_AS2 (mov, w, %S1);
+ if (imm_cmp && (s == t))
+ {
+ OUT_AS2 (csne, w, %S0);
+ OUT_AS2 (cse, w, %T0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %S0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %T1);
+ OUT_AS2 (cse, w, %T0);
+ }
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+
+ OUT_AS2 (mov, w, %U1);
+ if (imm_cmp && (u == v))
+ {
+ OUT_AS2 (csne, w, %U0);
+ OUT_AS2 (cse, w, %V0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %U0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %V1);
+ OUT_AS2 (cse, w, %V0);
+ }
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+
+ OUT_AS2 (mov, w, %W1);
+ if (imm_cmp && (w == x))
+ {
+ OUT_AS2 (csne, w, %W0);
+ OUT_AS2 (cse, w, %X0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %W0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ OUT_AS2 (mov, w, %X1);
+ OUT_AS2 (cse, w, %X0);
+ }
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+
+ if (! imm_cmp || (x != y))
+ OUT_AS2 (mov, w, %Y1);
+ OUT_AS2 (cse, w, %Y0);
+ OUT_AS1 (page, 2f);
+ OUT_AS1 (jmp, 2f);
+ if (! imm_cmp || (z != y))
+ OUT_AS2 (mov, w, %Z1);
+ OUT_AS2 (csne, w, %Z0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS1 (2:,);
+ }
+ }
+ break;
+
+ case NE:
+ {
+ unsigned char s = 0, t = 1, u = 2, v = 3;
+ unsigned char w = 4, x = 5, y = 6, z = 7;
+
+ if (optimize_size)
+ {
+ if (GET_CODE (operands[0]) == MEM
+ && true_regnum (XEXP (operands[0], 0)) == REG_DP)
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2_dp);
+ OUT_AS1 (call, __cmpdi2_dp);
+ OUT_AS2 (cse, w, #1);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%<);
+ OUT_AS1 (push, %Z0%<);
+ OUT_AS1 (push, %Y0%<);
+ OUT_AS1 (push, %X0%<);
+ OUT_AS1 (push, %W0%<);
+ OUT_AS1 (push, %V0%<);
+ OUT_AS1 (push, %U0%<);
+ OUT_AS1 (push, %T0%<);
+ OUT_AS1 (push, %S0%>%>%>%>%>%>%>%>%>%>%>%>%>%>%>);
+ OUT_AS1 (page, __cmpdi2);
+ OUT_AS1 (call, __cmpdi2);
+ OUT_AS2 (cse, w, #1);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ if (imm_cmp)
+ {
+ s = (CONST_DOUBLE_HIGH (operands[1]) >> 24) & 0xff;
+ t = (CONST_DOUBLE_HIGH (operands[1]) >> 16) & 0xff;
+ u = (CONST_DOUBLE_HIGH (operands[1]) >> 8) & 0xff;
+ v = CONST_DOUBLE_HIGH (operands[1]) & 0xff;
+ w = (CONST_DOUBLE_LOW (operands[1]) >> 24) & 0xff;
+ x = (CONST_DOUBLE_LOW (operands[1]) >> 16) & 0xff;
+ y = (CONST_DOUBLE_LOW (operands[1]) >> 8) & 0xff;
+ z = CONST_DOUBLE_LOW (operands[1]) & 0xff;
+ }
+
+ OUT_AS2 (mov, w, %S1);
+ if (imm_cmp && (s == t))
+ {
+ OUT_AS2 (csne, w, %S0);
+ OUT_AS2 (cse, w, %T0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %S0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %T1);
+ OUT_AS2 (cse, w, %T0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+
+ OUT_AS2 (mov, w, %U1);
+ if (imm_cmp && (u == v))
+ {
+ OUT_AS2 (csne, w, %U0);
+ OUT_AS2 (cse, w, %V0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %U0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %V1);
+ OUT_AS2 (cse, w, %V0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+
+ OUT_AS2 (mov, w, %W1);
+ if (imm_cmp && (w == x))
+ {
+ OUT_AS2 (csne, w, %W0);
+ OUT_AS2 (cse, w, %X0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %W0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %X1);
+ OUT_AS2 (cse, w, %X0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+
+ if (! imm_cmp || (y != x))
+ OUT_AS2 (mov, w, %Y1);
+ if (imm_cmp && (z == y))
+ {
+ OUT_AS2 (csne, w, %Y0);
+ OUT_AS2 (cse, w, %Z0);
+ }
+ else
+ {
+ OUT_AS2 (cse, w, %Y0);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ OUT_AS2 (mov, w, %Z1);
+ OUT_AS2 (cse, w, %Z0);
+ }
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ break;
+
+ case GTU:
+ if (imm_sub)
+ {
+ /* > 0xffffffffffffffff never suceeds! */
+ if (((CONST_DOUBLE_HIGH (operands[1]) & 0xffffffff)
+ != 0xffffffff)
+ || ((CONST_DOUBLE_LOW (operands[1]) & 0xffffffff)
+ != 0xffffffff))
+ {
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[1]) + 1);
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1])
+ + (INTVAL (operands[3]) ? 0 : 1));
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %Z0);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %Y0);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %X0);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %W0);
+ OUT_AS2 (mov, w, %D4);
+ OUT_AS2 (subc, w, %V0);
+ OUT_AS2 (mov, w, %C4);
+ OUT_AS2 (subc, w, %U0);
+ OUT_AS2 (mov, w, %B4);
+ OUT_AS2 (subc, w, %T0);
+ OUT_AS2 (mov, w, %A4);
+ OUT_AS2 (subc, w, %S0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %Z0);
+ OUT_AS2 (sub, w, %Z1);
+ OUT_AS2 (mov, w, %Y0);
+ OUT_AS2 (subc, w, %Y1);
+ OUT_AS2 (mov, w, %X0);
+ OUT_AS2 (subc, w, %X1);
+ OUT_AS2 (mov, w, %W0);
+ OUT_AS2 (subc, w, %W1);
+ OUT_AS2 (mov, w, %V0);
+ OUT_AS2 (subc, w, %V1);
+ OUT_AS2 (mov, w, %U0);
+ OUT_AS2 (subc, w, %U1);
+ OUT_AS2 (mov, w, %T0);
+ OUT_AS2 (subc, w, %T1);
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (subc, w, %S1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case GEU:
+ if (imm_sub)
+ {
+ if ((CONST_DOUBLE_HIGH (operands[0]) == 0)
+ && (CONST_DOUBLE_LOW (operands[0]) == 0))
+ {
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0);
+ OUT_AS1 (snz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[0]) - 1);
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[0])
+ - (CONST_DOUBLE_LOW (operands[0])
+ ? 1 : 0));
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %Z1);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %Y1);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %X1);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %W1);
+ OUT_AS2 (mov, w, %D4);
+ OUT_AS2 (subc, w, %V1);
+ OUT_AS2 (mov, w, %C4);
+ OUT_AS2 (subc, w, %U1);
+ OUT_AS2 (mov, w, %B4);
+ OUT_AS2 (subc, w, %T1);
+ OUT_AS2 (mov, w, %A4);
+ OUT_AS2 (subc, w, %S1);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %Z1);
+ OUT_AS2 (sub, w, %Z0);
+ OUT_AS2 (mov, w, %Y1);
+ OUT_AS2 (subc, w, %Y0);
+ OUT_AS2 (mov, w, %X1);
+ OUT_AS2 (subc, w, %X0);
+ OUT_AS2 (mov, w, %W1);
+ OUT_AS2 (subc, w, %W0);
+ OUT_AS2 (mov, w, %V1);
+ OUT_AS2 (subc, w, %V0);
+ OUT_AS2 (mov, w, %U1);
+ OUT_AS2 (subc, w, %U0);
+ OUT_AS2 (mov, w, %T1);
+ OUT_AS2 (subc, w, %T0);
+ OUT_AS2 (mov, w, %S1);
+ OUT_AS2 (subc, w, %S0);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LTU:
+ if (imm_sub)
+ {
+ if ((CONST_DOUBLE_HIGH (operands[0]) == 0)
+ && (CONST_DOUBLE_LOW (operands[0]) == 0))
+ {
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (or, w, %T0);
+ OUT_AS2 (or, w, %U0);
+ OUT_AS2 (or, w, %V0);
+ OUT_AS2 (or, w, %W0);
+ OUT_AS2 (or, w, %X0);
+ OUT_AS2 (or, w, %Y0);
+ OUT_AS2 (or, w, %Z0);
+ OUT_AS1 (sz,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[0]) - 1);
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[0])
+ - (CONST_DOUBLE_LOW (operands[0])
+ ? 1 : 0));
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %Z1);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %Y1);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %X1);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %W1);
+ OUT_AS2 (mov, w, %D4);
+ OUT_AS2 (subc, w, %V1);
+ OUT_AS2 (mov, w, %C4);
+ OUT_AS2 (subc, w, %U1);
+ OUT_AS2 (mov, w, %B4);
+ OUT_AS2 (subc, w, %T1);
+ OUT_AS2 (mov, w, %A4);
+ OUT_AS2 (subc, w, %S1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %Z1);
+ OUT_AS2 (sub, w, %Z0);
+ OUT_AS2 (mov, w, %Y1);
+ OUT_AS2 (subc, w, %Y0);
+ OUT_AS2 (mov, w, %X1);
+ OUT_AS2 (subc, w, %X0);
+ OUT_AS2 (mov, w, %W1);
+ OUT_AS2 (subc, w, %W0);
+ OUT_AS2 (mov, w, %V1);
+ OUT_AS2 (subc, w, %V0);
+ OUT_AS2 (mov, w, %U1);
+ OUT_AS2 (subc, w, %U0);
+ OUT_AS2 (mov, w, %T1);
+ OUT_AS2 (subc, w, %T0);
+ OUT_AS2 (mov, w, %S1);
+ OUT_AS2 (subc, w, %S0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ case LEU:
+ if (imm_sub)
+ {
+ if (((CONST_DOUBLE_HIGH (operands[1]) & 0xffffffff)
+ == 0xffffffff)
+ && ((CONST_DOUBLE_LOW (operands[1]) & 0xffffffff)
+ == 0xffffffff))
+ {
+ /* <= 0xffffffffffffffff always suceeds. */
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ else
+ {
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[1]) + 1);
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1])
+ + (INTVAL (operands[3]) ? 0 : 1));
+ OUT_AS2 (mov, w, %D3);
+ OUT_AS2 (sub, w, %Z0);
+ OUT_AS2 (mov, w, %C3);
+ OUT_AS2 (subc, w, %Y0);
+ OUT_AS2 (mov, w, %B3);
+ OUT_AS2 (subc, w, %X0);
+ OUT_AS2 (mov, w, %A3);
+ OUT_AS2 (subc, w, %W0);
+ OUT_AS2 (mov, w, %D4);
+ OUT_AS2 (subc, w, %V0);
+ OUT_AS2 (mov, w, %C4);
+ OUT_AS2 (subc, w, %U0);
+ OUT_AS2 (mov, w, %B4);
+ OUT_AS2 (subc, w, %T0);
+ OUT_AS2 (mov, w, %A4);
+ OUT_AS2 (subc, w, %S0);
+ OUT_AS1 (sc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %Z0);
+ OUT_AS2 (sub, w, %Z1);
+ OUT_AS2 (mov, w, %Y0);
+ OUT_AS2 (subc, w, %Y1);
+ OUT_AS2 (mov, w, %X0);
+ OUT_AS2 (subc, w, %X1);
+ OUT_AS2 (mov, w, %W0);
+ OUT_AS2 (subc, w, %W1);
+ OUT_AS2 (mov, w, %V0);
+ OUT_AS2 (subc, w, %V1);
+ OUT_AS2 (mov, w, %U0);
+ OUT_AS2 (subc, w, %U1);
+ OUT_AS2 (mov, w, %T0);
+ OUT_AS2 (subc, w, %T1);
+ OUT_AS2 (mov, w, %S0);
+ OUT_AS2 (subc, w, %S1);
+ OUT_AS1 (snc,);
+ OUT_AS1 (page, %2);
+ OUT_AS1 (jmp, %2);
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ abort ();
+ }
+#undef operands
+ return "";
+}
+
+/* Output rtx VALUE as .byte to file FILE. */
+
+void
+asm_output_char(file, value)
+ FILE *file;
+ rtx value;
+{
+ fprintf (file, "\t.byte ");
+ output_addr_const (file, value);
+ fprintf (file, "\n");
+}
+
+
+/* Output VALUE as .byte to file FILE. */
+
+void
+asm_output_byte (file,value)
+ FILE *file;
+ int value;
+{
+ fprintf (file, "\t.byte 0x%x\n",value & 0xff);
+}
+
+
+/* Output rtx VALUE as .word to file FILE. */
+
+void
+asm_output_short (file, value)
+ FILE *file;
+ rtx value;
+{
+ fprintf (file, "\t.word ");
+ output_addr_const (file, (value));
+ fprintf (file, "\n");
+}
+
+
+/* Output real N to file FILE. */
+
+void
+asm_output_float (file, n)
+ FILE *file;
+ REAL_VALUE_TYPE n;
+{
+ long val;
+ char dstr[100];
+
+ REAL_VALUE_TO_TARGET_SINGLE (n, val);
+ REAL_VALUE_TO_DECIMAL (n, "%g", dstr);
+ fprintf (file, "\t.long 0x%08lx\t/* %s */\n",val, dstr);
+}
+
+/* Sets section name for declaration DECL. */
+
+void
+unique_section (decl, reloc)
+ tree decl;
+ int reloc ATTRIBUTE_UNUSED;
+{
+ int len;
+ const char *name;
+ char *string;
+ const char *prefix;
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ /* Strip off any encoding in name. */
+ name = (* targetm.strip_name_encoding) (name);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ if (flag_function_sections)
+ prefix = ".text.";
+ else
+ prefix = ".text";
+ }
+ else
+ abort ();
+
+ if (flag_function_sections)
+ {
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+ sprintf (string, "%s%s", prefix, name);
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+ }
+}
+
+
+/* Output section name to file FILE. */
+
+void
+asm_output_section_name(file, decl, name, reloc)
+ FILE *file;
+ tree decl ATTRIBUTE_UNUSED;
+ const char *name;
+ int reloc ATTRIBUTE_UNUSED;
+{
+ fprintf (file, ".section %s\n", name);
+}
+
+/* Return value is nonzero if pseudos that have been
+ assigned to registers of class CLASS would likely be spilled
+ because registers of CLASS are needed for spill registers. */
+
+enum reg_class
+class_likely_spilled_p(c)
+ int c;
+{
+ return (c == IP_REGS
+ || c == IPL_REGS
+ || c == IPH_REGS
+ || c == DP_SP_REGS
+ || c == SP_REGS
+ || c == DP_REGS
+ || c == DPL_REGS
+ || c == DPH_REGS
+ || c == PTR_REGS);
+}
+
+/* Only `progmem' attribute valid for type. */
+
+int
+valid_machine_type_attribute(type, attributes, identifier, args)
+ tree type ATTRIBUTE_UNUSED;
+ tree attributes ATTRIBUTE_UNUSED;
+ tree identifier;
+ tree args ATTRIBUTE_UNUSED;
+{
+ return is_attribute_p ("progmem", identifier);
+}
+
+/* If IDENTIFIER with arguments ARGS is a valid machine specific
+ attribute for DECL return 1.
+ Valid attributes:
+ progmem - put data to program memory;
+ naked - don't generate function prologue/epilogue and `ret' command. */
+
+int
+valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes ATTRIBUTE_UNUSED;
+ tree attr;
+ tree args ATTRIBUTE_UNUSED;
+{
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("progmem", attr)
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ {
+ /* Data stored in program RAM or FLASH must be word aligned or
+ it won't be directly addressable. */
+ if (DECL_ALIGN (decl) < FUNCTION_BOUNDARY)
+ DECL_ALIGN (decl) = FUNCTION_BOUNDARY;
+
+ if (DECL_INITIAL (decl) == NULL_TREE)
+ {
+ warning ("Only initialized variables can be placed into "
+ "program memory area.");
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Encode section information about tree DECL. */
+
+void
+encode_section_info (decl)
+ tree decl;
+{
+ if (! DECL_P (decl))
+ return;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1;
+}
+
+/* Outputs to the stdio stream FILE some
+ appropriate text to go at the start of an assembler file. */
+
+void
+asm_file_start (file)
+ FILE *file;
+{
+ output_file_directive (file, main_input_filename);
+
+ commands_in_file = 0;
+ commands_in_prologues = 0;
+ commands_in_epilogues = 0;
+}
+
+/* Outputs to the stdio stream FILE some
+ appropriate text to go at the end of an assembler file. */
+
+void
+asm_file_end (file)
+ FILE *file;
+{
+ fprintf
+ (file,
+ "/* File %s: code %4d = 0x%04x (%4d), prologues %3d, epilogues %3d */\n",
+ main_input_filename,
+ commands_in_file,
+ commands_in_file,
+ commands_in_file - commands_in_prologues - commands_in_epilogues,
+ commands_in_prologues, commands_in_epilogues);
+}
+
+/* Cost functions. */
+
+/* Calculate the cost of X code of the expression in which it is contained,
+ found in OUTER_CODE. */
+
+int
+default_rtx_costs (x, code, outer_code)
+ rtx x;
+ enum rtx_code code;
+ enum rtx_code outer_code;
+{
+ enum machine_mode mode = GET_MODE (x);
+ int extra_cost = 0;
+ int total;
+
+ switch (code)
+ {
+ case MEM:
+ return ip2k_address_cost (XEXP (x, 0));
+
+ case ROTATE:
+ case ROTATERT:
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ int val = INTVAL (XEXP (x, 1));
+ int cost;
+
+ /* Shift by const instructions are proportional to
+ the shift count modulus 8. Note that we increase the mode
+ size multiplier by 1 to account for clearing the carry flag. */
+ cost = COSTS_N_INSNS (abs (val) % 8);
+ cost += rtx_cost (XEXP (x, 0), code);
+ cost *= (GET_MODE_SIZE (mode) + 1);
+
+ /* Sign-preserving shifts require 2 extra instructions. */
+ if (code == ASHIFT)
+ cost += COSTS_N_INSNS (2);
+ return cost;
+ }
+ total = rtx_cost (XEXP (x, 0), code);
+ total += COSTS_N_INSNS (GET_MODE_SIZE (mode) * 8);
+ return total;
+
+ case MINUS:
+ case PLUS:
+ case AND:
+ case XOR:
+ case IOR:
+ total = rtx_cost (XEXP (x, 0), code)
+ + rtx_cost (XEXP (x, 1), code);
+ total += COSTS_N_INSNS (GET_MODE_SIZE (mode) * 3);
+ return total;
+
+ case MOD:
+ case DIV:
+ if (mode == QImode)
+ return COSTS_N_INSNS (20);
+ if (mode == HImode)
+ return COSTS_N_INSNS (60);
+ else if (mode == SImode)
+ return COSTS_N_INSNS (180);
+ else
+ return COSTS_N_INSNS (540);
+
+ case MULT:
+ /* These costs are OK, but should really handle subtle cases
+ where we're using sign or zero extended args as these are
+ *much* cheaper than those given below! */
+ if (mode == QImode)
+ return COSTS_N_INSNS (4);
+ if (mode == HImode)
+ return COSTS_N_INSNS (12);
+ if (mode == SImode)
+ return COSTS_N_INSNS (36);
+ else
+ return COSTS_N_INSNS (108);
+
+ case NEG:
+ case SIGN_EXTEND:
+ extra_cost = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+
+ /* Fall through. */
+ case NOT:
+ case COMPARE:
+ case ABS:
+ total = rtx_cost (XEXP (x, 0), code);
+ return total + extra_cost + COSTS_N_INSNS (GET_MODE_SIZE (mode) * 2);
+
+ case TRUNCATE:
+ case ZERO_EXTEND:
+ if (outer_code == SET)
+ return rtx_cost (XEXP (x, 0), code)
+ + COSTS_N_INSNS (GET_MODE_SIZE (mode) * 3 / 2);
+ else
+ return -(COSTS_N_INSNS (GET_MODE_SIZE (mode)) / 2);
+
+ case IF_THEN_ELSE:
+ return rtx_cost (XEXP (x, 0), code)
+ + COSTS_N_INSNS (2);
+
+ case EQ:
+ case NE:
+ case LTU:
+ case GTU:
+ case LEU:
+ case GEU:
+ case LT:
+ case GT:
+ case LE:
+ case GE:
+ return rtx_cost (XEXP (x, 0), code)
+ + rtx_cost (XEXP (x, 1), code);
+
+ default:
+ return COSTS_N_INSNS (4);
+ }
+}
+
+/* Calculate the cost of a memory address. */
+
+int
+ip2k_address_cost (x)
+ rtx x;
+{
+ switch (legitimate_address_p (VOIDmode, x, 0))
+ {
+ case 'S': /* Very low cost - (IP), (SP+N) or (DP+N) */
+ return 8;
+
+ case 'R': /* Indirected through IP. */
+ return 8;
+
+ case 'L': /* Label references. */
+ return 0;
+
+ case 'C': /* Constants and symbol references. */
+ return 4;
+
+ default:
+ return 1000; /* Must reload. */
+ }
+}
+
+/* As part of the machine-dependent reorg we look for opcode sequences where
+ we do some operation and then move the results back to one of the original
+ source operands. With working on the source operand directly is probably
+ much cheaper and the move from this to the original source operand will be
+ no more expensive than the original move. */
+
+void
+mdr_resequence_xy_yx (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ rtx set;
+
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Look for operations that tend to be very cheap to run when the source
+ * and dest args are the same because the IP2022 has opcodes that can
+ operate on the source directly. If we have to spill through the W
+ register then we've possibly not got a good case for doing this. */
+ if ((GET_CODE (XEXP (set, 0)) == REG
+ || GET_CODE (XEXP (set, 0)) == MEM)
+ && (GET_CODE (XEXP (set, 1)) == ASHIFT
+ || GET_CODE (XEXP (set, 1)) == ASHIFTRT
+ || GET_CODE (XEXP (set, 1)) == LSHIFTRT
+ || GET_CODE (XEXP (set, 1)) == XOR
+ || GET_CODE (XEXP (set, 1)) == IOR
+ || GET_CODE (XEXP (set, 1)) == AND
+ || GET_CODE (XEXP (set, 1)) == PLUS
+ || GET_CODE (XEXP (set, 1)) == MINUS
+ || GET_CODE (XEXP (set, 1)) == MULT))
+ {
+ rtx set2;
+ rtx next_insn;
+
+ next_insn = next_nonnote_insn (insn);
+ if (! next_insn)
+ continue;
+
+ if (GET_CODE (next_insn) != INSN)
+ continue;
+
+ set2 = ((GET_CODE (PATTERN (next_insn)) == SET)
+ ? PATTERN (next_insn) : NULL_RTX);
+ if (set2 == NULL_RTX)
+ continue;
+
+ if ((GET_CODE (XEXP (XEXP (set, 1), 0)) == REG
+ || GET_CODE (XEXP (XEXP (set, 1), 0)) == MEM)
+ && rtx_equal_p (XEXP (set2, 0), XEXP (XEXP (set, 1), 0))
+ && rtx_equal_p (XEXP (set2, 1), XEXP (set, 0)))
+ {
+ rtx next2_insn;
+ rtx b_insn;
+
+ b_insn = gen_rtx_SET (VOIDmode,
+ XEXP (XEXP (set, 1), 0),
+ gen_rtx_fmt_ee (GET_CODE (XEXP (set, 1)),
+ GET_MODE (XEXP (set, 0)),
+ XEXP (XEXP (set, 1), 0),
+ XEXP (XEXP (set, 1), 1)));
+
+ emit_insn_before (b_insn, insn);
+ b_insn = gen_rtx_SET (GET_MODE (XEXP (set, 0)), XEXP (set, 0),
+ XEXP (XEXP (set, 1), 0));
+ next2_insn = emit_insn_before (b_insn, insn);
+ delete_insn (insn);
+ delete_insn (next_insn);
+ insn = next2_insn;
+ continue;
+ }
+
+ /* Having tried with one operand of the expression, now, if
+ appropriate, try to do the same thing with the second operand.
+ Of course there are fewer operations that can match here
+ because they must be commutative. */
+ if (GET_RTX_CLASS (GET_CODE (XEXP (set, 1))) == 'c'
+ && (GET_CODE (XEXP (XEXP (set, 1), 1)) == REG
+ || GET_CODE (XEXP (XEXP (set, 1), 1)) == MEM)
+ && rtx_equal_p (XEXP (set2, 0), XEXP (XEXP (set, 1), 1))
+ && rtx_equal_p (XEXP (set2, 1), XEXP (set, 0)))
+ {
+ rtx rtx_ee;
+ rtx next2_insn;
+ int swap_args;
+
+ /* Try to ensure that we put things in a canonical form. */
+ swap_args = (GET_CODE (XEXP (XEXP (set, 1), 0)) == REG
+ || GET_CODE (XEXP (XEXP (set, 1), 0)) == MEM);
+ rtx_ee = gen_rtx_fmt_ee (GET_CODE (XEXP (set, 1)),
+ GET_MODE (XEXP (set, 0)),
+ XEXP (XEXP (set, 1), swap_args ? 1 : 0),
+ XEXP (XEXP (set, 1),
+ swap_args ? 0 : 1));
+
+ emit_insn_before (gen_rtx_SET (VOIDmode,
+ XEXP (XEXP (set, 1), 1),
+ rtx_ee),
+ insn);
+ next2_insn = emit_insn_before (gen_rtx_SET
+ (GET_MODE (XEXP (set, 0)),
+ XEXP (set, 0),
+ XEXP (XEXP (set, 1), 1)),
+ insn);
+ delete_insn (insn);
+ delete_insn (next_insn);
+ insn = next2_insn;
+ }
+ }
+ }
+}
+
+/* Replace and recurse until we've tried QImode pieces! */
+
+static void
+mdr_pres_replace_and_recurse (orig, with, insn)
+ rtx orig;
+ rtx with;
+ rtx insn;
+{
+ enum machine_mode new_mode;
+
+ validate_replace_rtx (orig, with, insn);
+
+ switch (GET_MODE (orig))
+ {
+ case DImode:
+ case DFmode:
+ new_mode = SImode;
+ break;
+
+ case SImode:
+ case SFmode:
+ new_mode = HImode;
+ break;
+
+ case HImode:
+ new_mode = QImode;
+ break;
+
+ default:
+ return;
+ }
+
+ mdr_pres_replace_and_recurse (ip2k_get_low_half (orig, new_mode),
+ ip2k_get_low_half (with, new_mode),
+ insn);
+ mdr_pres_replace_and_recurse (ip2k_get_high_half (orig, new_mode),
+ ip2k_get_high_half (with, new_mode),
+ insn);
+}
+
+/* Assist the following function, mdr_propagate_reg_equivs(). */
+
+static void
+mdr_propagate_reg_equivs_sequence (first_insn, orig, equiv)
+ rtx first_insn;
+ rtx orig;
+ rtx equiv;
+{
+ rtx try_insn;
+ rtx try_equiv = equiv;
+
+ /* First scan the RTL looking for anything else that might clobber what
+ we're doing. If we find anything then we can't do the replacement. */
+ for (try_insn = next_nonnote_insn (first_insn);
+ try_insn; try_insn = next_nonnote_insn (try_insn))
+ {
+ rtx pattern;
+
+ if (GET_CODE (try_insn) != JUMP_INSN && GET_CODE (try_insn) != INSN)
+ continue;
+
+ pattern = PATTERN (try_insn);
+ if (GET_CODE (pattern) == PARALLEL)
+ {
+ int j;
+
+ for (j = 0; j < XVECLEN (pattern, 0); j++)
+ {
+ rtx px = XVECEXP (pattern, 0, j);
+
+ if (GET_CODE (px) == SET)
+ if (! ip2k_composite_xexp_not_uses_reg_p (XEXP (px, 0),
+ REGNO (orig),
+ GET_MODE_SIZE (GET_MODE (orig))))
+ return;
+ }
+ }
+ else if (GET_CODE (pattern) == SET)
+ {
+ if (! ip2k_composite_xexp_not_uses_reg_p (XEXP (pattern, 0),
+ REGNO (orig),
+ GET_MODE_SIZE (GET_MODE (orig))))
+ return;
+ }
+ }
+
+ /* Once we've decided that we're safe to do the replacement then make the
+ changes. */
+ for (try_insn = next_nonnote_insn (first_insn); try_insn;
+ try_insn = next_nonnote_insn (try_insn))
+ {
+ rtx set;
+ rtx new_equiv = NULL_RTX;
+
+ if (GET_CODE (try_insn) != JUMP_INSN && GET_CODE (try_insn) != INSN)
+ {
+ try_equiv = equiv;
+ continue;
+ }
+
+ set = ((GET_CODE (PATTERN (try_insn)) == SET)
+ ? PATTERN (try_insn) : NULL_RTX);
+ if (set == NULL_RTX)
+ continue;
+
+ /* We look for a special case of "push" operations screwing our
+ register equivalence when it's based on a stack slot. We can
+ track this one and replace the old equivalence expression with
+ a new one. */
+ if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && REG_P (XEXP (XEXP (XEXP (set, 0), 0), 0))
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP)
+ {
+ /* XXX - need to ensure that we can track this without going
+ out of range! */
+ HOST_WIDE_INT disp = (INTVAL (XEXP (XEXP (try_equiv, 0), 1))
+ + GET_MODE_SIZE (GET_MODE (XEXP (set, 0))));
+ new_equiv = gen_rtx_MEM (GET_MODE (try_equiv),
+ gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (HImode, REG_SP),
+ GEN_INT (disp)));
+ }
+
+ /* The replacement process is somewhat complicated by the fact that we
+ might be dealing with what were originally subregs and thus we have
+ to replace parts of our original expression! */
+ mdr_pres_replace_and_recurse (orig, try_equiv, try_insn);
+
+ if (new_equiv != NULL_RTX)
+ try_equiv = new_equiv;
+ }
+}
+
+/* Try propagating register equivalences forwards. It may be that we can
+ replace a register use with an equivalent expression that already
+ holds the same value and thus allow one or more register loads to
+ be eliminated. */
+
+void
+mdr_propagate_reg_equivs (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ rtx set;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Have we found a stack slot equivalence for a register? */
+ if (REG_P (XEXP (set, 0))
+ && REGNO (XEXP (set, 0)) >= 0x88
+ && GET_CODE (XEXP (set, 1)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 1), 0)) == PLUS
+ && REG_P (XEXP (XEXP (XEXP (set, 1), 0), 0))
+ && REGNO (XEXP (XEXP (XEXP (set, 1), 0), 0)) == REG_SP
+ && find_reg_note (insn, REG_EQUIV, NULL_RTX))
+ {
+ mdr_propagate_reg_equivs_sequence (insn, XEXP (set, 0),
+ XEXP (set, 1));
+ }
+ }
+}
+
+/* Structure used to track jump targets. */
+
+struct dpre_jump_targets
+{
+ int target; /* Is this a jump target? */
+ int reach_count; /* Number of ways we can reach this insn. */
+ int touch_count; /* Number of times we've touched this
+ insns during scanning. */
+ rtx dp_equiv; /* DP-equivalence at this point. */
+};
+
+struct dpre_jump_targets *ip2k_dpre_jump_targets;
+
+/* DP equivalence tracking used within DP reload elimination. */
+
+static int
+track_dp_reload (insn, dp_current, dp_current_ok, modifying)
+ rtx insn;
+ rtx *dp_current;
+ int dp_current_ok;
+ int modifying;
+{
+ rtx set;
+
+ if (GET_CODE (insn) != INSN)
+ {
+ *dp_current = NULL_RTX;
+ return 1;
+ }
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ {
+ *dp_current = NULL_RTX;
+ return 1;
+ }
+
+ /* If we're pushing a PLUS or MINUS then it's a win if we can replace
+ an expression for which DP is equivalent with DP. This happens
+ surprisingly often when we pass a pointer to a structure embedded
+ within another structure. */
+ if (*dp_current != NULL_RTX
+ && GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP
+ && (GET_CODE (XEXP (set, 1)) == PLUS
+ || GET_CODE (XEXP (set, 1)) == MINUS)
+ && GET_CODE (*dp_current) != SYMBOL_REF
+ && GET_CODE (*dp_current) != LABEL_REF
+ && GET_CODE (*dp_current) != CONST)
+ {
+ if (modifying)
+ validate_replace_rtx (*dp_current, gen_rtx_REG (HImode, REG_DP), insn);
+ }
+
+ /* Look for DP being modified. If it is, see if it's being changed
+ to what it already is! */
+ if (GET_CODE (XEXP (set, 0)) == REG
+ && REGNO (XEXP (set, 0)) == REG_DP
+ && GET_MODE (XEXP (set, 0)) == HImode)
+ {
+ /* If this is an equivalence we can delete the new set operation. */
+ if (*dp_current != NULL_RTX
+ && rtx_equal_p (XEXP (set, 1), *dp_current))
+ {
+ if (modifying)
+ delete_insn (insn);
+ }
+ else
+ {
+ /* If we've not found an equivalence we can look for a special
+ case where an operand of the expression that sets DP is
+ already equivalent to DP and in that circumstance we simplify
+ by replacing that expression with DP. */
+ if (*dp_current != NULL_RTX
+ && GET_CODE (*dp_current) != SYMBOL_REF
+ && GET_CODE (*dp_current) != LABEL_REF
+ && GET_CODE (*dp_current) != CONST
+ && modifying)
+ validate_replace_rtx (*dp_current, XEXP (set, 0), insn);
+
+ /* Assuming that we're not loading DP from something that uses DP
+ itself then we mark the new equivalence for DP. If we did match
+ DP then we can't re-use this one. */
+ if (ip2k_xexp_not_uses_reg_p (XEXP (set, 1), REG_DP, 2))
+ {
+ *dp_current = XEXP (set, 1);
+ return 1;
+ }
+ else
+ {
+ *dp_current = NULL_RTX;
+ return 1;
+ }
+ }
+ }
+ else if (GET_CODE (XEXP (set, 0)) == REG
+ && (REGNO (XEXP (set, 0)) == REG_DPL
+ || REGNO (XEXP (set, 0)) == REG_DPH))
+ {
+ /* If we clobber part of DP then we've clobbered any equivalences! */
+ *dp_current = NULL_RTX;
+ return 1;
+ }
+ else if (! ip2k_xexp_not_uses_reg_p (XEXP (set, 0), REG_SP, 2)
+ && *dp_current != NULL_RTX
+ && !ip2k_xexp_not_uses_reg_p (*dp_current, REG_SP, 2))
+ {
+ /* We look for a special case of "push" operations screwing up the
+ setting of DP when it's based on the stack. We can track this one
+ and replace the old expression for DP with a new one. */
+ if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP
+ && GET_CODE (*dp_current) == MEM
+ && GET_CODE (XEXP (*dp_current, 0)) == PLUS)
+ {
+ /* XXX - need to ensure that we can track this without going
+ out of range! */
+ HOST_WIDE_INT disp = (INTVAL (XEXP (XEXP (*dp_current, 0), 1))
+ + GET_MODE_SIZE (GET_MODE (XEXP (set, 0))));
+ *dp_current = gen_rtx_MEM (HImode,
+ gen_rtx_PLUS (Pmode,
+ gen_rtx_REG(HImode, REG_SP),
+ GEN_INT (disp)));
+ return 1;
+ }
+
+ /* Now we look for writes to the stack. We can determine if these will
+ affect the equivalence we're tracking for DP and if not then we can
+ keep tracking it. */
+ if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (*dp_current) == MEM)
+ {
+ /* Look at the SP offsets and look for any overlaps. */
+ int dp_cur_sp_offs = INTVAL (XEXP (XEXP (*dp_current, 0), 1));
+ int set_sp_offs = INTVAL (XEXP (XEXP (XEXP (set, 0), 0), 1));
+
+ if (abs (dp_cur_sp_offs - set_sp_offs) < 2)
+ {
+ *dp_current = NULL_RTX;
+ return 1;
+ }
+ }
+ }
+ else if (GET_CODE (XEXP (set, 0)) == REG
+ && *dp_current != NULL_RTX
+ && !ip2k_xexp_not_uses_reg_p (*dp_current, REGNO (XEXP (set, 0)),
+ GET_MODE_SIZE (GET_MODE (XEXP (set,
+ 0)))))
+ {
+ /* If we've just clobbered all or part of a register reference that we
+ were sharing for DP then we can't share it any more! */
+ *dp_current = NULL_RTX;
+ }
+
+ return dp_current_ok;
+}
+
+/* As part of the machine-dependent reorg we scan loads and reloads of
+ DP to see where any are redundant. This does happens because we
+ are able to subsequently transform things in interesting ways. Sometimes
+ gcc also does unecessary reloads too so we try to eliminate these too. */
+
+static void
+mdr_try_dp_reload_elim (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ struct dpre_jump_targets *djt;
+ rtx dp_current;
+ int incomplete_scan;
+ int last_incomplete_scan;
+
+ ip2k_dpre_jump_targets
+ = (struct dpre_jump_targets *) xcalloc (get_max_uid (),
+ sizeof (struct dpre_jump_targets));
+
+ /* First we scan to build up a list of all CODE_LABEL insns and we work out
+ how many different ways we can reach them. */
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ djt = &ip2k_dpre_jump_targets[INSN_UID (insn)];
+ djt->target = 1;
+ djt->reach_count = LABEL_NUSES (insn);
+ djt->touch_count = 0;
+ djt->dp_equiv = NULL_RTX;
+ if (! prev_nonnote_insn (insn)
+ || (prev_nonnote_insn (insn)
+ && GET_CODE (prev_nonnote_insn (insn)) != BARRIER))
+ djt->reach_count++;
+ }
+ }
+
+ /* Next we scan all of the ways of reaching the code labels to see
+ what the DP register is equivalent to as we reach them. If we find
+ that they're the same then we keep noting the matched value. We
+ iterate around this until we reach a convergence on DP equivalences
+ at all code labels - we have to be very careful not to be too
+ optimistic! */
+ incomplete_scan = -1;
+ do
+ {
+ int dp_current_ok = 0;
+ last_incomplete_scan = incomplete_scan;
+ dp_current = NULL_RTX;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ /* If we have a code label then we need to see if we already know
+ what the equivalence is at this point. If we do then we use it
+ immediately, but if we don't then we have a special case to track
+ when we hit a fallthrough-edge (label with no barrier preceding
+ it). Any other accesses to the label must be from jump insns
+ and so they're handled elsewhere. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ djt = &ip2k_dpre_jump_targets[INSN_UID (insn)];
+
+ /* If we're fully characterized the use the equivalence. */
+ if (djt->touch_count == djt->reach_count)
+ {
+ dp_current = djt->dp_equiv;
+ dp_current_ok = 1;
+ continue;
+ }
+
+ /* If we have a known equivalence for DP as we reach the
+ fallthrough-edge then track this into the code label. */
+ if (dp_current_ok
+ && (! prev_nonnote_insn (insn)
+ || (prev_nonnote_insn (insn)
+ && GET_CODE (prev_nonnote_insn (insn)) != BARRIER)))
+ {
+ if (djt->touch_count == 0)
+ djt->dp_equiv = dp_current;
+
+ if (djt->touch_count < djt->reach_count)
+ {
+ djt->touch_count++;
+ if (! rtx_equal_p (djt->dp_equiv, dp_current))
+ {
+ /* When we definitely know that we can't form an
+ equivalence for DP here we must clobber anything
+ that we'd started to track too. */
+ djt->dp_equiv = NULL_RTX;
+ dp_current = NULL_RTX;
+ dp_current_ok = 1;
+ }
+ }
+ }
+
+ /* If we've not completely characterized this code label then
+ be cautious and assume that we don't know what DP is
+ equivalent to. */
+ if (djt->touch_count < djt->reach_count)
+ {
+ dp_current = NULL_RTX;
+ dp_current_ok = 0;
+ }
+
+ continue;
+ }
+
+ /* If we've hit a jump insn then we look for either an address
+ vector (jump table) or for jump label references. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* Don't attempt to track here if we don't have a known
+ equivalence for DP at this point. */
+ if (dp_current_ok)
+ {
+ rtx pat = PATTERN (insn);
+ if (GET_CODE (pat) == ADDR_VEC)
+ {
+ int i;
+ int len = XVECLEN (pat, 0);
+
+ for (i = 0; i < len; i++)
+ {
+ rtx vec_insn = XEXP (XVECEXP (pat, 0, i), 0);
+ djt = &ip2k_dpre_jump_targets [INSN_UID (vec_insn)];
+
+ if (djt->touch_count == 0)
+ djt->dp_equiv = dp_current;
+
+ if (djt->touch_count < djt->reach_count)
+ {
+ djt->touch_count++;
+ if (! rtx_equal_p (djt->dp_equiv, dp_current))
+ djt->dp_equiv = NULL_RTX;
+ }
+ }
+ }
+ else if (JUMP_LABEL (insn))
+ {
+ rtx j_insn = JUMP_LABEL (insn);
+ djt = &ip2k_dpre_jump_targets[INSN_UID (j_insn)];
+
+ if (djt->touch_count == 0)
+ djt->dp_equiv = dp_current;
+
+ if (djt->touch_count < djt->reach_count)
+ {
+ djt->touch_count++;
+ if (! rtx_equal_p (djt->dp_equiv, dp_current))
+ djt->dp_equiv = NULL_RTX;
+ }
+ }
+ }
+
+ continue;
+ }
+
+ /* Anything other than a code labal or jump arrives here.
+ We try and track DP, but sometimes we might not be able to. */
+ dp_current_ok = track_dp_reload (insn, &dp_current,
+ dp_current_ok, 0);
+ }
+
+ /* When we're looking to see if we've finished we count the number of
+ paths throught the code labels where we weren't able to definitively
+ track DP.
+ This number is used to see if we're converging on a solution.
+ If this hits zero then we've fully converged, but if this stays the
+ same as last time then we probably can't make any further
+ progress. */
+ incomplete_scan = 0;
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ djt = &ip2k_dpre_jump_targets[INSN_UID (insn)];
+ if (djt->touch_count != djt->reach_count)
+ {
+ incomplete_scan += (djt->reach_count - djt->touch_count);
+ djt->dp_equiv = NULL_RTX;
+ djt->touch_count = 0;
+ }
+ }
+ }
+ }
+ while (incomplete_scan && incomplete_scan != last_incomplete_scan);
+
+ /* Finally we scan the whole function and run DP elimination. When we hit
+ a CODE_LABEL we pick up any stored equivalence since we now know that
+ every path to this point entered with DP holding the same thing! If
+ we subsequently have a reload that matches then we can eliminate it. */
+ dp_current = NULL_RTX;
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ continue;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ djt = &ip2k_dpre_jump_targets[INSN_UID (insn)];
+ dp_current = djt->dp_equiv;
+ continue;
+ }
+
+ track_dp_reload (insn, &dp_current, 1, 1);
+ }
+
+ free (ip2k_dpre_jump_targets);
+}
+
+/* As part of the machine-dependent reorg we look for reloads of DP
+ that we can move to earlier points within the file.
+ Moving these out of the way allows more peepholes to match. */
+
+void
+mdr_try_move_dp_reload (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ rtx set;
+ rtx orig_first;
+
+ /* Don't try to match the first instruction because we can't move it
+ anyway. */
+ orig_first = first_insn;
+ first_insn = next_nonnote_insn (first_insn);
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Look for DP being loaded. When we find this we start a rewind
+ scan looking for possible positions to move this to. */
+ if (GET_CODE (XEXP (set, 0)) == REG
+ && REGNO (XEXP (set, 0)) == REG_DP
+ && GET_MODE (XEXP (set, 0)) == HImode)
+ {
+ int try_again;
+ rtx try_insn = insn;
+
+ do
+ {
+ rtx rewind;
+ rtx check;
+
+ try_again = 0;
+
+ /* For now we do the *really* simple version of things and only
+ attempt to move the load of DP if it's very safe to do so. */
+ rewind = prev_nonnote_insn (try_insn);
+ if (rewind != orig_first && rewind != NULL_RTX
+ && GET_CODE (rewind) == INSN)
+ {
+ check = ((GET_CODE (PATTERN (rewind)) == SET)
+ ? PATTERN (rewind) : NULL_RTX);
+ if (check != NULL_RTX
+ && ip2k_composite_xexp_not_uses_cc0_p (XEXP (check, 0))
+ && ip2k_composite_xexp_not_uses_cc0_p (XEXP (check, 1)))
+ {
+ if (GET_CODE (XEXP (check, 0)) == REG
+ && REGNO (XEXP (check, 0)) != REG_DPH
+ && REGNO (XEXP (check, 0)) != REG_DPL
+ && (ip2k_composite_xexp_not_uses_reg_p
+ (XEXP (check, 1), REG_DP, 2))
+ && (ip2k_composite_xexp_not_uses_reg_p
+ (XEXP (set, 1),
+ REGNO (XEXP (check, 0)),
+ GET_MODE_SIZE (GET_MODE (XEXP (check, 0))))))
+ {
+ emit_insn_before (set, rewind);
+ if (try_insn == insn)
+ insn = prev_nonnote_insn (insn);
+ delete_insn (try_insn);
+ try_insn = prev_nonnote_insn (rewind);
+ try_again = 1;
+ }
+ else if (GET_CODE (XEXP (set, 1)) == REG
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (check, 1), REG_DP, 2)
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (check, 0), REG_DP, 2)
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (check, 0), REGNO (XEXP (set, 1)),
+ GET_MODE_SIZE (GET_MODE (XEXP (set, 1)))))
+ {
+ emit_insn_before (set, rewind);
+ if (try_insn == insn)
+ insn = prev_nonnote_insn (insn);
+ delete_insn (try_insn);
+ try_insn = prev_nonnote_insn (rewind);
+ try_again = 1;
+ }
+ }
+ }
+ }
+ while (try_again && try_insn);
+ }
+ }
+}
+
+/* Look to see if the expression, x, can have any stack references offset by
+ a fixed constant, offset. If it definitely can then returns non-zero. */
+
+int
+ip2k_check_can_adjust_stack_ref (x, offset)
+ rtx x;
+ int offset;
+{
+ if (GET_RTX_CLASS (GET_CODE (x)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x)) == 'c')
+ return (ip2k_check_can_adjust_stack_ref (XEXP (x, 0), offset)
+ && ip2k_check_can_adjust_stack_ref (XEXP (x, 1), offset));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '1')
+ return ip2k_check_can_adjust_stack_ref (XEXP (x, 0), offset);
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ return (REGNO (x) != REG_SPH && REGNO (x) != REG_SPL);
+
+ case MEM:
+ if (GET_CODE (XEXP (x, 0)) != PLUS)
+ return 1;
+
+ if (GET_CODE (XEXP (XEXP (x, 0), 0)) != REG)
+ return 1;
+
+ if (REGNO (XEXP (XEXP (x, 0), 0)) != REG_SP)
+ return 1;
+
+ /* We can't allow this if the adjustment will create an
+ invalid address. */
+ return (INTVAL (XEXP (XEXP (x, 0), 1))
+ + offset <= (128 - 2 * GET_MODE_SIZE (GET_MODE (x))));
+
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Adjusts all of the stack references in the expression pointed to by x by
+ a fixed offset. */
+
+void
+ip2k_adjust_stack_ref (x, offset)
+ rtx *x;
+ int offset;
+{
+ if (GET_RTX_CLASS (GET_CODE (*x)) == '2'
+ || GET_RTX_CLASS (GET_CODE (*x)) == 'c')
+ {
+ ip2k_adjust_stack_ref (&XEXP (*x, 0), offset);
+ ip2k_adjust_stack_ref (&XEXP (*x, 1), offset);
+ return;
+ }
+
+ if (GET_RTX_CLASS (GET_CODE (*x)) == '1')
+ {
+ ip2k_adjust_stack_ref (&XEXP (*x, 0), offset);
+ return;
+ }
+
+ switch (GET_CODE (*x))
+ {
+ case MEM:
+ if (GET_CODE (XEXP (*x, 0)) != PLUS)
+ return;
+
+ if (GET_CODE (XEXP (XEXP (*x, 0), 0)) != REG)
+ return;
+
+ if (REGNO (XEXP (XEXP (*x, 0), 0)) != REG_SP)
+ return;
+
+ *x = copy_rtx (*x);
+ XEXP (XEXP (*x, 0), 1) = GEN_INT (INTVAL (XEXP (XEXP (*x, 0), 1))
+ + offset);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* As part of the machine-dependent reorg we look to move push instructions
+ to earlier points within the file. Moving these out of the way allows more
+ peepholes to match. */
+
+void
+mdr_try_move_pushes (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ rtx set;
+ rtx orig_first;
+
+ /* Don't try to match the first instruction because we can't move
+ it anyway. */
+ orig_first = first_insn;
+ first_insn = next_nonnote_insn (first_insn);
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Have we found a push instruction? */
+ if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP
+ && GET_CODE (XEXP (set, 1)) == REG)
+ {
+ rtx try_insn = insn;
+ unsigned int regno = REGNO (XEXP (set, 1));
+ int reg_range = GET_MODE_SIZE (GET_MODE (XEXP (set, 1)));
+
+ while (1)
+ {
+ rtx rewind;
+ rtx check;
+
+ rewind = prev_nonnote_insn (try_insn);
+ if (rewind == orig_first || rewind == NULL_RTX
+ || GET_CODE (rewind) != INSN)
+ break;
+
+ check = (GET_CODE (PATTERN (rewind)) == SET) ? PATTERN (rewind) : NULL_RTX;
+ if (check == NULL_RTX)
+ break;
+
+ if (! ip2k_check_can_adjust_stack_ref (XEXP (check, 0),
+ reg_range)
+ || ! ip2k_check_can_adjust_stack_ref (XEXP (check, 1),
+ reg_range))
+ break;
+
+ /* If we've hit another push instruction we can't go any
+ further. */
+ if (GET_CODE (XEXP (check, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (check, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (check, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (check, 0), 0), 0)) == REG_SP)
+ break;
+
+ /* If this is a register move then check that it doesn't clobber
+ SP or any part of the instruction we're trying to move. */
+ if (GET_CODE (XEXP (check, 0)) == REG)
+ {
+ unsigned int check_reg = REGNO (XEXP (check, 0));
+ int check_reg_range = GET_MODE_SIZE (GET_MODE (XEXP (check,
+ 0)));
+
+ /* If we have a special case where what we want to push is
+ being loaded by this "clobbering" insn then we can just
+ push what is being used to load us and then do the load.
+ This may seem a little odd, but we may subsequently be
+ able to merge the load with another instruction as it
+ may only be used once now! Note though that we
+ specifically don't try this if the expression being
+ loaded is an HImode MEM using IP. */
+ if (check_reg == regno
+ && check_reg_range == reg_range
+ && ((GET_CODE (XEXP (check, 1)) == REG
+ || (GET_CODE (XEXP (check, 1)) == MEM
+ && (GET_MODE (XEXP (check, 1)) != HImode
+ || ip2k_xexp_not_uses_reg_for_mem (XEXP (check, 1), REG_IP))))))
+ {
+ switch (check_reg_range)
+ {
+ case 1:
+ emit_insn_before (gen_movqi (XEXP (set, 0),
+ XEXP (check, 1)),
+ rewind);
+ delete_insn (try_insn);
+ break;
+
+ case 2:
+ emit_insn_before (gen_movhi (XEXP (set, 0),
+ XEXP (check, 1)),
+ rewind);
+ delete_insn (try_insn);
+ break;
+
+ case 4:
+ emit_insn_before (gen_movsi (XEXP (set, 0),
+ XEXP (check, 1)),
+ rewind);
+ delete_insn (try_insn);
+ break;
+
+ case 8:
+ emit_insn_before (gen_movdi (XEXP (set, 0),
+ XEXP (check, 1)),
+ rewind);
+ delete_insn (try_insn);
+ break;
+ }
+
+ ip2k_adjust_stack_ref (&XEXP (check, 0), reg_range);
+ ip2k_adjust_stack_ref (&XEXP (check, 1), reg_range);
+ try_insn = prev_nonnote_insn (rewind);
+ /* XXX - should be a continue? */
+ break;
+ }
+
+ if ((check_reg == REG_SPL)
+ || (check_reg == REG_SPH)
+ || (((regno <= check_reg)
+ && (regno + reg_range - 1) >= check_reg)
+ || ((regno <= (check_reg + check_reg_range - 1))
+ && ((regno + reg_range - 1)
+ >= (check_reg + check_reg_range - 1)))))
+ break;
+ }
+
+ emit_insn_before (set, rewind);
+ delete_insn (try_insn);
+ ip2k_adjust_stack_ref (&XEXP (check, 0), reg_range);
+ ip2k_adjust_stack_ref (&XEXP (check, 1), reg_range);
+ try_insn = prev_nonnote_insn (rewind);
+ }
+ }
+ }
+}
+
+/* Assist the following function, mdr_try_propagate_clr(). */
+
+static void
+mdr_try_propagate_clr_sequence (first_insn, regno)
+ rtx first_insn;
+ unsigned int regno;
+{
+ rtx try_insn;
+
+ for (try_insn = next_nonnote_insn (first_insn); try_insn;
+ try_insn = next_nonnote_insn (try_insn))
+ {
+ rtx new_insn = NULL_RTX;
+ rtx set2;
+
+ if (GET_CODE (try_insn) == JUMP_INSN)
+ continue;
+
+ if (GET_CODE (try_insn) != INSN)
+ break;
+
+ set2 = ((GET_CODE (PATTERN (try_insn)) == SET)
+ ? PATTERN (try_insn) : NULL_RTX);
+ if (set2 == NULL_RTX)
+ continue;
+
+ if (GET_CODE (XEXP (set2, 1)) == AND
+ && ((GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 0)) == regno)
+ || (GET_CODE (XEXP (XEXP (set2, 1), 1)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 1)) == regno)))
+ {
+ rtx remove_insn = try_insn;
+ try_insn = emit_insn_before (gen_rtx_SET (QImode, XEXP (set2, 0),
+ const0_rtx), try_insn);
+ delete_insn (remove_insn);
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == IOR
+ && GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 0)) == regno)
+ {
+ rtx remove_insn = try_insn;
+ try_insn = emit_insn_before (gen_rtx_SET (QImode, XEXP (set2, 0),
+ XEXP (XEXP (set2, 1), 1)),
+ try_insn);
+ delete_insn (remove_insn);
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == IOR
+ && GET_CODE (XEXP (XEXP (set2, 1), 1)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 1)) == regno)
+ {
+ rtx remove_insn = try_insn;
+ try_insn = emit_insn_before (gen_rtx_SET (QImode, XEXP (set2, 0),
+ XEXP (XEXP (set2, 1), 0)),
+ try_insn);
+ delete_insn (remove_insn);
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == XOR
+ && GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 0)) == regno)
+ {
+ rtx remove_insn = try_insn;
+ try_insn = emit_insn_before (gen_rtx_SET (QImode, XEXP (set2, 0),
+ XEXP (XEXP (set2, 1), 1)),
+ try_insn);
+ delete_insn (remove_insn);
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == XOR
+ && GET_CODE (XEXP (XEXP (set2, 1), 1)) == REG
+ && REGNO (XEXP (XEXP (set2, 1), 1)) == regno)
+ {
+ rtx remove_insn = try_insn;
+ try_insn = emit_insn_before (gen_rtx_SET (QImode, XEXP (set2, 0),
+ XEXP (XEXP (set2, 1), 0)),
+ try_insn);
+ delete_insn (remove_insn);
+ }
+
+ if (GET_CODE (XEXP (set2, 0)) == REG)
+ {
+ int reg2_range = GET_MODE_SIZE (GET_MODE (XEXP (set2, 0)));
+ unsigned int regno2 = REGNO (XEXP (set2, 0));
+
+ if (reg2_range == 1
+ && regno == regno2
+ && GET_CODE (XEXP (set2, 1)) == CONST_INT)
+ {
+ int iv = INTVAL (XEXP (set2, 1));
+ if (iv == 0xff)
+ iv = -1;
+ if (iv == 1 || iv == -1)
+ {
+ new_insn = gen_rtx_SET (QImode, XEXP (set2, 0),
+ gen_rtx_PLUS (QImode, XEXP (set2, 0),
+ GEN_INT (iv)));
+ new_insn = emit_insn_before (new_insn, try_insn);
+ delete_insn (try_insn);
+ try_insn = new_insn;
+ }
+ break;
+ }
+
+ if ((regno >= regno2) && (regno <= regno2 + reg2_range - 1))
+ break;
+
+ if (GET_CODE (XEXP (set2, 1)) == REG
+ && REGNO (XEXP (set2, 1)) == regno)
+ {
+ new_insn = emit_insn_before (gen_rtx_SET (QImode,
+ XEXP (set2, 0),
+ const0_rtx),
+ try_insn);
+ delete_insn (try_insn);
+ try_insn = new_insn;
+ }
+ }
+
+ if (GET_CODE (XEXP (set2, 0)) == CC0)
+ {
+ if (GET_CODE (XEXP (set2, 1)) == REG
+ && GET_MODE_SIZE (GET_MODE (XEXP (set2, 1))) == 2
+ && REGNO (XEXP (set2, 1)) == regno)
+ {
+ new_insn = gen_rtx_SET (VOIDmode, gen_rtx (CC0, VOIDmode),
+ gen_rtx_REG(QImode, regno + 1));
+ new_insn = emit_insn_before (new_insn, try_insn);
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == COMPARE
+ && GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (set2, 1), 0))) == 2
+ && REGNO (XEXP (XEXP (set2, 1), 0)) == regno
+ && GET_CODE (XEXP (XEXP (set2, 1), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (set2, 1), 1)) >= 0
+ && INTVAL (XEXP (XEXP (set2, 1), 1)) < 256)
+ {
+ new_insn = gen_rtx_SET (VOIDmode, cc0_rtx,
+ gen_rtx_COMPARE(QImode,
+ gen_rtx_REG (QImode,
+ regno + 1),
+ XEXP (XEXP (set2, 1),
+ 1)));
+ new_insn = emit_insn_before (new_insn, try_insn);
+ }
+
+ /* If we have inserted a replacement for a CC0 setter operation
+ then we need to delete the old one. */
+ if (new_insn != NULL_RTX)
+ {
+ delete_insn (try_insn);
+ try_insn = new_insn;
+
+ /* Now as we know that we have just done an unsigned compare
+ (remember we were zero-extended by the clr!) we also know
+ that we don't need a signed jump insn. If we find that
+ our next isns is a signed jump then make it unsigned! */
+ if (GET_CODE (next_nonnote_insn (try_insn)) == JUMP_INSN)
+ {
+ rtx set3;
+
+ try_insn = next_nonnote_insn (try_insn);
+ set3 = ((GET_CODE (PATTERN (try_insn)) == SET)
+ ? PATTERN (try_insn) : NULL_RTX);
+ if (set3 == NULL_RTX)
+ continue;
+
+ /* If we discover that our jump target is only accessible
+ from here then we can continue our "clr" propagation to
+ it too! */
+ if (LABEL_NUSES (JUMP_LABEL (try_insn)) == 1)
+ mdr_try_propagate_clr_sequence (JUMP_LABEL (try_insn),
+ regno);
+
+ if (GET_CODE (XEXP (set3, 0)) == PC
+ && GET_CODE (XEXP (set3, 1)) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (XEXP (set3, 1), 0)) == GT
+ || GET_CODE (XEXP (XEXP (set3, 1), 0)) == GE
+ || GET_CODE (XEXP (XEXP (set3, 1), 0)) == LT
+ || GET_CODE (XEXP (XEXP (set3, 1), 0)) == LE)
+ && GET_CODE (XEXP (XEXP (XEXP (set3, 1), 0), 0)) == CC0
+ && (GET_CODE (XEXP (XEXP (XEXP (set3, 1), 0), 1))
+ == CONST_INT)
+ && GET_CODE (XEXP (XEXP (set3, 1), 1)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (set3, 1), 2)) == PC)
+ {
+ enum rtx_code code;
+ rtx new_if;
+ rtx cmp;
+
+ /* Replace our old conditional jump with a new one that
+ does the unsigned form of what was previously a
+ signed comparison. */
+ code = GET_CODE (XEXP (XEXP (set3, 1), 0));
+ cmp = gen_rtx_fmt_ee ((code == GT
+ ? GTU
+ : (code == GE
+ ? GEU
+ : (code == LT ? LTU : LEU))),
+ VOIDmode,
+ XEXP (XEXP (XEXP (set3, 1), 0), 0),
+ XEXP (XEXP (XEXP (set3, 1), 0),
+ 1));
+ new_if
+ = gen_rtx_SET (GET_MODE (set3),
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE
+ (GET_MODE (XEXP (set3, 1)), cmp,
+ XEXP (XEXP (set3, 1), 1),
+ XEXP (XEXP (set3, 1), 2)));
+ new_insn = emit_jump_insn_before (new_if, try_insn);
+ LABEL_NUSES (JUMP_LABEL (try_insn))++;
+ delete_insn (try_insn);
+ try_insn = new_insn;
+ }
+ }
+ }
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (set2, 1), 0))) == 2
+ && REGNO (XEXP (XEXP (set2, 1), 0)) == regno
+ && (GET_CODE (XEXP (XEXP (set2, 1), 1)) == REG
+ || GET_CODE (XEXP (XEXP (set2, 1), 1)) == MEM
+ || GET_CODE (XEXP (XEXP (set2, 1), 1)) == CONST_INT
+ || GET_CODE (XEXP (XEXP (set2, 1), 1)) == CONST
+ || GET_CODE (XEXP (XEXP (set2, 1), 1)) == SYMBOL_REF))
+ {
+ rtx extend = gen_rtx_ZERO_EXTEND (HImode,
+ gen_rtx_REG (QImode, regno + 1));
+ new_insn = gen_rtx_SET (HImode, XEXP (set2, 0),
+ gen_rtx_PLUS (HImode, extend,
+ XEXP (XEXP (set2, 1), 1)));
+ new_insn = emit_insn_before (new_insn, try_insn);
+ delete_insn (try_insn);
+ try_insn = new_insn;
+ }
+ else if (GET_CODE (XEXP (set2, 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (set2, 1), 1)) == REG
+ && GET_MODE_SIZE (GET_MODE (XEXP (XEXP (set2, 1), 1))) == 2
+ && REGNO (XEXP (XEXP (set2, 1), 1)) == regno
+ && (GET_CODE (XEXP (XEXP (set2, 1), 0)) == REG
+ || GET_CODE (XEXP (XEXP (set2, 1), 0)) == MEM
+ || GET_CODE (XEXP (XEXP (set2, 1), 0)) == CONST_INT
+ || GET_CODE (XEXP (XEXP (set2, 1), 0)) == CONST
+ || GET_CODE (XEXP (XEXP (set2, 1), 0)) == SYMBOL_REF))
+ {
+ rtx t_src = gen_rtx_PLUS (HImode,
+ gen_rtx_ZERO_EXTEND (HImode,
+ gen_rtx_REG (QImode,
+ regno
+ + 1)),
+ XEXP (XEXP (set2, 1), 0));
+ new_insn = emit_insn_before (gen_rtx_SET (HImode, XEXP (set2, 0),
+ t_src),
+ try_insn);
+ delete_insn (try_insn);
+ try_insn = new_insn;
+ }
+ }
+}
+
+/* One of the things that can quite often happen with an 8-bit CPU is that
+ we end up clearing the MSByte of a 16-bit value. Unfortunately, all too
+ often gcc doesn't have any way to realize that only half of the value is
+ useful and ends up doing more work than it should. We scan for such
+ occurrences here, track them and reduce compare operations to a smaller
+ size where possible.
+
+ Note that this is somewhat different to move propagation as we may
+ actually change some instruction patterns when we're doing this whereas
+ move propagation is just about doing a search and replace. */
+
+void
+mdr_try_propagate_clr (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ rtx set;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Have we found a "clr" instruction? */
+ if (GET_CODE (XEXP (set, 0)) == REG
+ && GET_CODE (XEXP (set, 1)) == CONST_INT
+ && GET_MODE_SIZE (GET_MODE (XEXP (set, 0))) == 1
+ && INTVAL (XEXP (set, 1)) == 0)
+ {
+ mdr_try_propagate_clr_sequence (insn, REGNO (XEXP (set, 0)));
+ }
+ }
+}
+
+/* Look to see if the expression, x, does not make any memory references
+ via the specified register. This is very conservative and only returns
+ non-zero if we definitely don't have such a memory ref. */
+
+int
+ip2k_xexp_not_uses_reg_for_mem (x, regno)
+ rtx x;
+ unsigned int regno;
+{
+ if (regno & 1)
+ regno &= 0xfffffffe;
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'b')
+ return (ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 0), regno)
+ && ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 1), regno)
+ && ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 2), regno));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x)) == 'c'
+ || GET_RTX_CLASS (GET_CODE (x)) == '<')
+ return (ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 0), regno)
+ && ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 1), regno));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '1'
+ || GET_RTX_CLASS (GET_CODE (x)) == '3')
+ return ip2k_xexp_not_uses_reg_for_mem (XEXP (x, 0), regno);
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ return 1;
+
+ case MEM:
+ if ((GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && REGNO (XEXP (XEXP (x, 0), 0)) == regno)
+ || (GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) == regno))
+ return 0;
+ else
+ return 1;
+
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CC0:
+ case PC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Assist the following function, mdr_try_propagate_move(). */
+
+static void
+mdr_try_propagate_move_sequence (first_insn, orig, equiv)
+ rtx first_insn;
+ rtx orig;
+ rtx equiv;
+{
+ rtx try_insn;
+
+ for (try_insn = next_nonnote_insn (first_insn); try_insn;
+ try_insn = next_nonnote_insn (try_insn))
+ {
+ rtx set;
+ int range;
+ rtx new_equiv = NULL_RTX;
+
+ if (GET_CODE (try_insn) != JUMP_INSN && GET_CODE (try_insn) != INSN)
+ break;
+
+ set = single_set (try_insn);
+ if (set == NULL_RTX)
+ break;
+
+ range = MAX (GET_MODE_SIZE (GET_MODE (equiv)),
+ GET_MODE_SIZE (GET_MODE (XEXP (set, 0))));
+
+ if (GET_CODE (equiv) == REG
+ && REGNO (equiv) == REG_W
+ && (recog_memoized (try_insn) < 0
+ || get_attr_clobberw (try_insn) != CLOBBERW_NO)
+ && (! (GET_CODE (XEXP (set, 0)) == REG
+ && REGNO (XEXP (set, 0)) == REG_W
+ && rtx_equal_p (XEXP (set, 1), orig))))
+ break;
+ else if (GET_CODE (XEXP (set, 0)) == REG
+ && (REGNO (XEXP (set, 0)) == REG_SP
+ || ! ip2k_xexp_not_uses_reg_p (equiv, REGNO (XEXP (set, 0)),
+ range)
+ || ! ip2k_xexp_not_uses_reg_p (orig, REGNO (XEXP (set, 0)),
+ range))
+ && ! rtx_equal_p (equiv, XEXP (set, 0))
+ && ! rtx_equal_p (orig, XEXP (set, 0)))
+ break;
+ else if (GET_CODE (orig) == REG
+ && (REGNO (orig) == REG_IPL
+ || REGNO (orig) == REG_IPH
+ || REGNO (orig) == REG_DPL
+ || REGNO (orig) == REG_DPH)
+ && (! ip2k_xexp_not_uses_reg_for_mem (XEXP (set, 0),
+ REGNO (orig))
+ || ! ip2k_xexp_not_uses_reg_for_mem (XEXP (set, 1),
+ REGNO (orig))))
+ break;
+ else if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (equiv) == MEM)
+ {
+ if (! ip2k_xexp_not_uses_reg_p (equiv, REG_SP, 2))
+ {
+ if (! ip2k_xexp_not_uses_reg_p (XEXP (set, 0), REG_SP, 2))
+ {
+ /* We look for a special case of "push" operations screwing
+ our register equivalence when it's based on a stack slot.
+ We can track this one and replace the old equivalence
+ expression with a new one. */
+ if (GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP
+ && GET_CODE (XEXP (equiv, 0)) == PLUS
+ && REGNO (XEXP (XEXP (equiv, 0), 0)) == REG_SP)
+ {
+ int md_size = GET_MODE_SIZE (GET_MODE (XEXP (set, 0)));
+ int new_sp_offs = INTVAL (XEXP (XEXP (equiv, 0), 1))
+ + md_size;
+
+ /* Don't allow an invalid stack pointer offset to be
+ created. */
+ if (new_sp_offs > (128 - 2 * md_size))
+ break;
+
+ new_equiv
+ = gen_rtx_MEM (GET_MODE (equiv),
+ gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (HImode ,
+ REG_SP),
+ GEN_INT (new_sp_offs)));
+ }
+ else if (! rtx_equal_p (equiv, XEXP (set, 0)))
+ {
+ /* Look at the SP offsets and look for any overlaps. */
+ int equiv_offs = GET_CODE (XEXP (equiv, 0)) == PLUS
+ ? INTVAL (XEXP (XEXP (equiv, 0), 1))
+ : 0;
+ int set_offs
+ = (GET_CODE (XEXP (XEXP (set, 0), 0)) == PLUS
+ ? INTVAL (XEXP (XEXP (XEXP (set, 0), 0), 1))
+ : 0);
+
+ if (abs (equiv_offs - set_offs) < range)
+ break;
+ }
+ }
+ }
+
+ if (! ip2k_xexp_not_uses_reg_p (equiv, REG_IP, 2))
+ break;
+
+ if (! ip2k_xexp_not_uses_reg_p (XEXP (set, 0), REG_DP, 2)
+ && ! ip2k_xexp_not_uses_reg_p (equiv, REG_DP, 2)
+ && ! rtx_equal_p (equiv, XEXP (set, 0)))
+ {
+ /* Look at the DP offsets and look for any overlaps. */
+ int equiv_offs = GET_CODE (XEXP (equiv, 0)) == PLUS
+ ? INTVAL (XEXP (XEXP (equiv, 0), 1))
+ : 0;
+ int set_offs = GET_CODE (XEXP (XEXP (set, 0), 0)) == PLUS
+ ? INTVAL (XEXP (XEXP (XEXP (set, 0), 0), 1))
+ : 0;
+
+ if (abs (equiv_offs - set_offs) < range)
+ break;
+ }
+ }
+
+ validate_replace_rtx_subexp (orig, equiv, try_insn, &XEXP (set, 1));
+
+ if (rtx_equal_p (equiv, XEXP (set, 0))
+ || rtx_equal_p (orig, XEXP (set, 0)))
+ break;
+
+ if (new_equiv != NULL_RTX)
+ equiv = new_equiv;
+ }
+}
+
+/* Try propagating move instructions forwards. It may be that we can
+ replace a register use with an equivalent expression that already
+ holds the same value and thus allow one or more register loads to
+ be eliminated. */
+
+void
+mdr_try_propagate_move (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ rtx set;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ /* Have we found a simple move instruction? */
+ if (GET_CODE (XEXP (set, 0)) == REG
+ && (REGNO (XEXP (set, 0)) >= 0x80
+ || REGNO (XEXP (set, 0)) == REG_DPL
+ || REGNO (XEXP (set, 0)) == REG_DPH
+ || REGNO (XEXP (set, 0)) == REG_IPL
+ || REGNO (XEXP (set, 0)) == REG_IPH)
+ && ((GET_CODE (XEXP (set, 1)) == REG
+ && REGNO (XEXP (set, 1)) != REG_SP
+ && ip2k_xexp_not_uses_reg_p (XEXP (set, 0),
+ REGNO (XEXP (set, 1)),
+ GET_MODE_SIZE (GET_MODE (XEXP (set,
+ 0)))))
+ || (GET_CODE (XEXP (set, 1)) == MEM
+ && (ip2k_xexp_not_uses_reg_p (XEXP (set, 1), REG_IP, 2)
+ || GET_MODE (XEXP (set, 1)) == QImode)
+ && ((REGNO (XEXP (set, 0)) != REG_DPH
+ && REGNO (XEXP (set, 0)) != REG_DPL)
+ || ip2k_xexp_not_uses_reg_p (XEXP (set, 1), REG_DP, 2)))
+ || (GET_CODE (XEXP (set, 1)) == CONST_INT
+ && (GET_MODE (XEXP (set, 0)) != QImode
+ || INTVAL (XEXP (set, 1)) != 0))
+ || GET_CODE (XEXP (set, 1)) == CONST_DOUBLE
+ || GET_CODE (XEXP (set, 1)) == CONST
+ || GET_CODE (XEXP (set, 1)) == SYMBOL_REF))
+ {
+ mdr_try_propagate_move_sequence (insn, XEXP (set, 0), XEXP (set, 1));
+ }
+ }
+}
+
+/* Try to remove redundant instructions. */
+
+void
+mdr_try_remove_redundant_insns (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ rtx set;
+ enum machine_mode mode;
+ int md_size;
+ HOST_WIDE_INT pattern;
+ int i;
+
+ if (GET_CODE (insn) != INSN)
+ continue;
+
+ if (GET_CODE (PATTERN (insn)) == CONST_INT)
+ {
+ /* We've found a dummy expression. */
+ rtx remove_insn = insn;
+ insn = prev_nonnote_insn (insn);
+ delete_insn (remove_insn);
+ continue;
+ }
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ continue;
+
+ mode = GET_MODE (XEXP (set, 0));
+ md_size = GET_MODE_SIZE (mode);
+ if ((md_size < 1) || (md_size > 4))
+ continue;
+
+ pattern = 0;
+ for (i = 0; i < md_size; i++)
+ {
+ pattern <<= 8;
+ pattern |= 0xff;
+ }
+
+ if ((GET_CODE (XEXP (set, 1)) == AND
+ && GET_CODE (XEXP (XEXP (set, 1), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (set, 1), 1)) == pattern)
+ || ((GET_CODE (XEXP (set, 1)) == IOR
+ || GET_CODE (XEXP (set, 1)) == XOR)
+ && GET_CODE (XEXP (XEXP (set, 1), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (set, 1), 1)) == 0x00))
+ {
+ /* We've found an AND with all 1's, an XOR with all 0's or an
+ IOR with 0's. */
+ rtx remove_insn = insn;
+
+ /* Is it completely redundant or should it become a move insn? */
+ if (! rtx_equal_p (XEXP (set, 0), XEXP (XEXP (set, 1), 0)))
+ {
+ emit_insn_before (gen_rtx_SET (mode,
+ XEXP (set, 0),
+ XEXP (XEXP (set, 1), 0)),
+ insn);
+ }
+
+ insn = prev_nonnote_insn(insn);
+ delete_insn (remove_insn);
+ }
+ else if (GET_CODE (XEXP (set, 1)) == AND
+ && GET_CODE (XEXP (XEXP (set, 1), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (set, 1), 1)) == 0)
+ {
+ /* We've found an AND with all 0's. */
+ rtx remove_insn = insn;
+ insn = emit_insn_before (gen_rtx_SET (mode,
+ XEXP (set, 0),
+ XEXP (XEXP (set, 1), 1)),
+ insn);
+ delete_insn (remove_insn);
+ }
+ }
+}
+
+/* Structure used to track jump targets. */
+
+struct we_jump_targets
+{
+ int target; /* Is this a jump target? */
+ int reach_count; /* Number of ways we can reach this insn. */
+ int touch_count; /* Number of times we've touched this insn
+ during scanning. */
+ rtx w_equiv; /* WREG-equivalence at this point. */
+};
+
+struct we_jump_targets *ip2k_we_jump_targets;
+
+/* WREG equivalence tracking used within DP reload elimination. */
+
+int
+track_w_reload (insn, w_current, w_current_ok, modifying)
+ rtx insn;
+ rtx *w_current;
+ int w_current_ok;
+ int modifying;
+{
+ rtx set;
+
+ if (GET_CODE (insn) != INSN)
+ {
+ *w_current = NULL_RTX;
+ return 1;
+ }
+
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if (set == NULL_RTX)
+ {
+ *w_current = NULL_RTX;
+ return 1;
+ }
+
+ /* Look for W being modified. If it is, see if it's being changed
+ to what it already is! */
+ if (GET_CODE (XEXP (set, 0)) == REG
+ && REGNO (XEXP (set, 0)) == REG_W
+ && GET_MODE (XEXP (set, 0)) == QImode)
+ {
+ /* If this is an equivalence we can delete the new set operation. */
+ if (*w_current != NULL_RTX
+ && rtx_equal_p (XEXP (set, 1), *w_current))
+ {
+ if (modifying)
+ delete_insn (insn);
+ }
+ else
+ {
+ *w_current = XEXP (set, 1);
+ return 1;
+ }
+ }
+ else if (recog_memoized (insn) < 0
+ || get_attr_clobberw (insn) != CLOBBERW_NO)
+ {
+ /* If we clobber W then we've clobbered any equivalences ! */
+ *w_current = NULL_RTX;
+ return 1;
+ }
+ else if (! ip2k_xexp_not_uses_reg_p (XEXP (set, 0), REG_SP, 2)
+ && *w_current != NULL_RTX
+ && !ip2k_xexp_not_uses_reg_p (*w_current, REG_SP, 2))
+ {
+ /* We look for a special case of "push" operations screwing up the
+ setting of DP when it's based on the stack. We can track this one
+ and replace the old expression for DP with a new one. */
+ if (GET_CODE (XEXP (set, 0)) == MEM
+ && GET_CODE (XEXP (XEXP (set, 0), 0)) == POST_DEC
+ && GET_CODE (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG
+ && REGNO (XEXP (XEXP (XEXP (set, 0), 0), 0)) == REG_SP
+ && GET_CODE (*w_current) == MEM
+ && GET_CODE (XEXP (*w_current, 0)) == PLUS)
+ {
+ /* XXX - need to ensure that we can track this without going
+ out of range! */
+ rtx val = GEN_INT (INTVAL (XEXP (XEXP (*w_current, 0), 1))
+ + GET_MODE_SIZE (GET_MODE (XEXP (set, 0))));
+ *w_current
+ = gen_rtx_MEM (HImode, gen_rtx_PLUS (Pmode,
+ gen_rtx_REG(HImode, REG_SP),
+ val));
+ return 1;
+ }
+ }
+ else if (GET_CODE (XEXP (set, 0)) == REG
+ && *w_current != NULL_RTX
+ && !ip2k_xexp_not_uses_reg_p (*w_current, REGNO (XEXP (set, 0)),
+ GET_MODE_SIZE (GET_MODE (XEXP (set
+ , 0)))))
+ {
+ /* If we've just clobbered all or part of a register reference that we
+ were sharing for W then we can't share it any more! */
+ *w_current = NULL_RTX;
+ }
+
+ return w_current_ok;
+}
+
+/* As part of the machine-dependent reorg we scan moves into w and track them
+ to see where any are redundant. */
+
+void
+mdr_try_wreg_elim (first_insn)
+ rtx first_insn;
+{
+ rtx insn;
+ struct we_jump_targets *wjt;
+ rtx w_current;
+ int incomplete_scan;
+ int last_incomplete_scan;
+
+ ip2k_we_jump_targets
+ = (struct we_jump_targets *) xcalloc (get_max_uid (),
+ sizeof (struct we_jump_targets));
+
+ /* First we scan to build up a list of all CODE_LABEL insns and we work out
+ how many different ways we can reach them. */
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ wjt = &ip2k_we_jump_targets[INSN_UID (insn)];
+ wjt->target = 1;
+ wjt->reach_count = LABEL_NUSES (insn);
+ wjt->touch_count = 0;
+ wjt->w_equiv = NULL_RTX;
+ if (! prev_nonnote_insn (insn)
+ || (prev_nonnote_insn (insn)
+ && GET_CODE (prev_nonnote_insn (insn)) != BARRIER))
+ wjt->reach_count++;
+ }
+ }
+
+ /* Next we scan all of the ways of reaching the code labels to see
+ what the WREG register is equivalent to as we reach them. If we find
+ that they're the same then we keep noting the matched value. We
+ iterate around this until we reach a convergence on WREG equivalences
+ at all code labels - we have to be very careful not to be too
+ optimistic! */
+ incomplete_scan = -1;
+ do
+ {
+ int w_current_ok = 0;
+ last_incomplete_scan = incomplete_scan;
+ w_current = NULL_RTX;
+
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ /* If we have a code label then we need to see if we already know
+ what the equivalence is at this point. If we do then we use it
+ immediately, but if we don't then we have a special case to track
+ when we hit a fallthrough-edge (label with no barrier preceding
+ it). Any other accesses to the label must be from jump insns
+ and so they're handled elsewhere. */
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ wjt = &ip2k_we_jump_targets[INSN_UID (insn)];
+
+ /* If we're fully characterized the use the equivalence. */
+ if (wjt->touch_count == wjt->reach_count)
+ {
+ w_current = wjt->w_equiv;
+ w_current_ok = 1;
+ continue;
+ }
+
+ /* If we have a known equivalence for WREG as we reach the
+ fallthrough-edge then track this into the code label. */
+ if (w_current_ok
+ && (! prev_nonnote_insn (insn)
+ || (prev_nonnote_insn (insn)
+ && GET_CODE (prev_nonnote_insn (insn)) != BARRIER)))
+ {
+ if (wjt->touch_count == 0)
+ wjt->w_equiv = w_current;
+
+ if (wjt->touch_count < wjt->reach_count)
+ {
+ wjt->touch_count++;
+ if (! rtx_equal_p (wjt->w_equiv, w_current))
+ {
+ /* When we definitely know that we can't form an
+ equivalence for WREG here we must clobber anything
+ that we'd started to track too. */
+ wjt->w_equiv = NULL_RTX;
+ w_current = NULL_RTX;
+ w_current_ok = 1;
+ }
+ }
+ }
+
+ /* If we've not completely characterized this code label then
+ be cautious and assume that we don't know what WREG is
+ equivalent to. */
+ if (wjt->touch_count < wjt->reach_count)
+ {
+ w_current = NULL_RTX;
+ w_current_ok = 0;
+ }
+
+ continue;
+ }
+
+ /* If we've hit a jump insn then we look for either an address
+ vector (jump table) or for jump label references. */
+ if (GET_CODE (insn) == JUMP_INSN)
+ {
+ /* Don't attempt to track here if we don't have a known
+ equivalence for WREG at this point. */
+ if (w_current_ok)
+ {
+ if (JUMP_LABEL (insn))
+ {
+ wjt
+ = &ip2k_we_jump_targets[INSN_UID (JUMP_LABEL (insn))];
+
+ if (wjt->touch_count == 0)
+ wjt->w_equiv = w_current;
+
+ if (wjt->touch_count < wjt->reach_count)
+ {
+ wjt->touch_count++;
+ if (! rtx_equal_p (wjt->w_equiv, w_current))
+ wjt->w_equiv = NULL_RTX;
+ }
+ }
+ }
+
+ continue;
+ }
+
+ /* Anything other than a code labal or jump arrives here. We try and
+ track WREG, but sometimes we might not be able to. */
+ w_current_ok = track_w_reload (insn, &w_current, w_current_ok, 0);
+ }
+
+ /* When we're looking to see if we've finished we count the number of
+ paths throught the code labels where we weren't able to definitively
+ track WREG. This number is used to see if we're converging on a
+ solution.
+ If this hits zero then we've fully converged, but if this stays the
+ same as last time then we probably can't make any further
+ progress. */
+ incomplete_scan = 0;
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ wjt = &ip2k_we_jump_targets[INSN_UID (insn)];
+ if (wjt->touch_count != wjt->reach_count)
+ {
+ incomplete_scan += (wjt->reach_count - wjt->touch_count);
+ wjt->w_equiv = NULL_RTX;
+ wjt->touch_count = 0;
+ }
+ }
+ }
+ }
+ while (incomplete_scan && incomplete_scan != last_incomplete_scan);
+
+ /* Finally we scan the whole function and run WREG elimination. When we hit
+ a CODE_LABEL we pick up any stored equivalence since we now know that
+ every path to this point entered with WREG holding the same thing! If
+ we subsequently have a reload that matches then we can eliminate it. */
+ w_current = NULL_RTX;
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ continue;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ wjt = &ip2k_we_jump_targets[INSN_UID (insn)];
+ w_current = wjt->w_equiv;
+ continue;
+ }
+
+ track_w_reload (insn, &w_current, 1, 1);
+ }
+
+ free (ip2k_we_jump_targets);
+}
+
+/* We perform a lot of untangling of the RTL within the reorg pass since
+ the IP2k requires some really bizarre (and really undesireable) things
+ to happen in order to guarantee not aborting. This pass causes several
+ earlier passes to be re-run as it progressively transforms things,
+ making the subsequent runs continue to win. */
+
+void
+machine_dependent_reorg (first_insn)
+ rtx first_insn;
+{
+ rtx insn, set;
+
+ CC_STATUS_INIT;
+
+ if (optimize == 0)
+ {
+ ip2k_reorg_completed = 1;
+ ip2k_reorg_split_dimode = 1;
+ ip2k_reorg_split_simode = 1;
+ ip2k_reorg_split_himode = 1;
+ ip2k_reorg_split_qimode = 1;
+ ip2k_reorg_merge_qimode = 1;
+ return;
+ }
+#ifndef IP2K_MD_REORG_PASS
+ ip2k_reorg_completed = 1;
+ ip2k_reorg_split_dimode = 1;
+ ip2k_reorg_split_simode = 1;
+ ip2k_reorg_split_himode = 1;
+ ip2k_reorg_split_qimode = 1;
+ ip2k_reorg_merge_qimode = 1;
+#else
+ /* All optimizations below must be debugged and enabled one by one.
+ All of them commented now because of abort in GCC core. */
+
+ ip2k_reorg_in_progress = 1;
+
+ /* Look for size effects of earlier optimizations - in particular look for
+ situations where we're saying "use" a register on one hand but immediately
+ tagging it as "REG_DEAD" at the same time! Seems like a bug in core-gcc
+ somewhere really but this is what we have to live with! */
+ for (insn = first_insn; insn; insn = NEXT_INSN (insn))
+ {
+ rtx body;
+
+ if (GET_CODE (insn) == CODE_LABEL
+ || GET_CODE (insn) == NOTE
+ || GET_CODE (insn) == BARRIER)
+ continue;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ continue;
+
+ body = PATTERN (insn);
+ if (GET_CODE (body) == USE)
+ if (GET_CODE (XEXP (body, 0)) == REG)
+ {
+ int reg;
+
+ reg = REGNO (XEXP (body, 0));
+ if (find_regno_note (insn, REG_DEAD, reg))
+ {
+ delete_insn (insn);
+ }
+ }
+ }
+
+ /* There's a good chance that since we last did CSE that we've rearranged
+ things in such a way that another go will win. Do so now! */
+ reload_cse_regs (first_insn);
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_REG_INFO | PROP_DEATH_NOTES);
+
+ /* Look for where absurd things are happening with DP. */
+ mdr_try_dp_reload_elim (first_insn);
+
+ ip2k_reorg_in_progress = 0;
+ ip2k_reorg_completed = 1;
+
+ split_all_insns (0);
+
+ reload_cse_regs (first_insn);
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_REG_INFO | PROP_DEATH_NOTES);
+ if (flag_peephole2)
+ peephole2_optimize (NULL);
+
+ mdr_resequence_xy_yx (first_insn);
+ mdr_propagate_reg_equivs (first_insn);
+
+ /* Look for redundant set instructions. These can occur when we split
+ instruction patterns and end up with the second half merging with
+ or being replaced by something that clobbers the first half. */
+ for (insn = first_insn; insn; insn = next_nonnote_insn (insn))
+ {
+ if (GET_CODE (insn) == INSN)
+ {
+ set = (GET_CODE (PATTERN (insn)) == SET) ? PATTERN (insn) : NULL_RTX;
+ if ((set != NULL_RTX)
+ && (GET_CODE (XEXP (set, 0)) == REG)
+ && (GET_MODE (XEXP (set, 0)) == QImode)
+ && (find_regno_note (insn, REG_UNUSED, REGNO (XEXP (set, 0)))))
+ delete_insn (insn);
+ }
+ }
+
+ mdr_try_move_dp_reload (first_insn);
+ mdr_try_move_pushes (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+
+ mdr_try_propagate_move (first_insn);
+ mdr_resequence_xy_yx (first_insn);
+
+ ip2k_reorg_split_dimode = 1;
+ split_all_insns (0);
+
+ mdr_try_remove_redundant_insns (first_insn);
+
+ mdr_try_propagate_move (first_insn);
+
+ reload_cse_regs (first_insn);
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+ if (flag_peephole2)
+ peephole2_optimize (NULL);
+
+ mdr_try_propagate_move (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+
+ ip2k_reorg_split_simode = 1;
+ split_all_insns (0);
+
+ mdr_try_remove_redundant_insns (first_insn);
+
+ mdr_try_propagate_move (first_insn);
+
+ reload_cse_regs (first_insn);
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+ if (flag_peephole2)
+ peephole2_optimize (NULL);
+
+ mdr_try_propagate_move (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+
+ ip2k_reorg_split_himode = 1;
+ ip2k_reorg_merge_qimode = 1;
+ split_all_insns (0);
+
+ mdr_try_remove_redundant_insns (first_insn);
+ mdr_try_propagate_clr (first_insn);
+ mdr_try_propagate_move (first_insn);
+
+ mdr_try_dp_reload_elim (first_insn);
+ mdr_try_move_dp_reload (first_insn);
+
+ rebuild_jump_labels (first_insn);
+
+ /* Call to jump_optimize (...) was here, but now I removed it. */
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+ if (flag_peephole2)
+ peephole2_optimize (NULL);
+
+ mdr_try_propagate_move (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+ mdr_try_remove_redundant_insns (first_insn);
+
+ mdr_try_propagate_clr (first_insn);
+ mdr_try_propagate_move (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+
+ ip2k_reorg_split_qimode = 1;
+ split_all_insns (0);
+
+ mdr_try_wreg_elim (first_insn);
+ mdr_try_propagate_move (first_insn);
+
+ find_basic_blocks (first_insn, max_reg_num (), 0);
+ life_analysis (first_insn, 0, PROP_FINAL);
+#endif
+}
+
+/* Returns a bit position if mask contains only a single bit. Returns -1 if
+ there were zero or more than one set bits. */
+int
+find_one_set_bit_p (mask)
+ HOST_WIDE_INT mask;
+{
+ int i;
+ unsigned HOST_WIDE_INT n = mask;
+ for (i = 0; i < 32; i++)
+ {
+ if (n & 0x80000000UL)
+ {
+ if (n & 0x7fffffffUL)
+ return -1;
+ else
+ return 31 - i;
+ }
+ n <<= 1;
+ }
+ return -1;
+}
+
+/* Returns a bit position if mask contains only a single clear bit.
+ Returns -1 if there were zero or more than one clear bits. */
+int
+find_one_clear_bit_p (mask)
+ HOST_WIDE_INT mask;
+{
+ int i;
+ unsigned HOST_WIDE_INT n = mask;
+ for (i = 0; i < 32; i++)
+ {
+ if ((n & 0x80000000UL) == 0UL)
+ {
+ if ((n & 0x7fffffffUL) != 0x7fffffffUL)
+ return -1;
+ else
+ return 31 - i;
+ }
+ n <<= 1;
+ n |= 1;
+ }
+ return -1;
+}
+
+\f
+/* Split a move into two smaller pieces.
+ MODE indicates the reduced mode. OPERANDS[0] is the original destination
+ OPERANDS[1] is the original src. The new destinations are
+ OPERANDS[2] and OPERANDS[4], while the new sources are OPERANDS[3]
+ and OPERANDS[5]. */
+
+void
+ip2k_split_words (nmode, omode, operands)
+ enum machine_mode nmode;
+ enum machine_mode omode;
+ rtx *operands;
+{
+ rtx dl, dh; /* src/dest pieces. */
+ rtx sl, sh;
+ int move_high_first = 0; /* Assume no overlap. */
+ int pushflag = 0;
+
+ switch (GET_CODE (operands[0])) /* DEST */
+ {
+ case SUBREG:
+ case REG:
+ if ((GET_CODE (operands[1]) == REG
+ || GET_CODE (operands[1]) == SUBREG)
+ && (true_regnum (operands[0]) <= true_regnum (operands[1])
+ || (true_regnum (operands[1])
+ + GET_MODE_SIZE (omode) - 1 < true_regnum (operands[0]))))
+ move_high_first = 1;
+
+ if (GET_CODE (operands[0]) == SUBREG)
+ {
+ dl = simplify_gen_subreg (nmode, operands[0], omode,
+ GET_MODE_SIZE (nmode));
+ dh = simplify_gen_subreg (nmode, operands[0], omode, 0);
+ }
+ else if (GET_CODE (operands[0]) == REG && ! IS_PSEUDO_P (operands[0]))
+ {
+ int r = REGNO (operands[0]);
+ dh = gen_rtx_REG (nmode, r);
+ dl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode));
+ }
+ else
+ {
+ dh = gen_rtx_SUBREG (nmode, operands[0], 0);
+ dl = gen_rtx_SUBREG (nmode, operands[0], GET_MODE_SIZE (nmode));
+ }
+ break;
+
+ case MEM:
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case POST_INC:
+ abort ();
+ case POST_DEC:
+ dl = dh = gen_rtx_MEM (nmode, XEXP (operands[0], 0));
+ pushflag = 1;
+ break;
+ default:
+ dl = change_address (operands[0], nmode,
+ plus_constant (XEXP (operands[0], 0),
+ GET_MODE_SIZE (nmode)));
+ dh = gen_rtx_MEM (nmode, XEXP (operands[0], 0));
+ }
+ break;
+ default:
+ abort ();
+ }
+
+ switch (GET_CODE (operands[1]))
+ {
+ case REG:
+ if (! IS_PSEUDO_P (operands[1]))
+ {
+ int r = REGNO (operands[1]);
+
+ sh = gen_rtx_REG (nmode, r);
+ sl = gen_rtx_REG (nmode, r + HARD_REGNO_NREGS (r, nmode));
+ }
+ else
+ {
+ sh = gen_rtx_SUBREG (nmode, operands[1], 0);
+ sl = gen_rtx_SUBREG (nmode, operands[1], GET_MODE_SIZE (nmode));
+ }
+ break;
+
+ case CONST_DOUBLE:
+ if (operands[1] == const0_rtx)
+ sh = sl = const0_rtx;
+ else
+ {
+ if (GET_MODE (operands[0]) != DImode)
+ {
+ REAL_VALUE_TYPE rv;
+ long value;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, value);
+
+ sh = gen_int_mode ((value >> 16) & 0xffff, nmode);
+ sl = gen_int_mode (value & 0xffff, nmode);
+ }
+ else
+ {
+ sh = gen_int_mode (CONST_DOUBLE_HIGH (operands[1]), nmode);
+ sl = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), nmode);
+ }
+ }
+ break;
+
+ case CONST_INT:
+ if (operands[1] == const0_rtx)
+ sh = sl = const0_rtx;
+ else
+ {
+ int val = INTVAL (operands[1]);
+ int vl, vh;
+
+ switch (nmode)
+ {
+ case QImode:
+ vh = (val >> 8) & 0xff;
+ vl = val & 0xff;
+ break;
+
+ case HImode:
+ vh = (val >> 16) & 0xffff;
+ vl = val & 0xffff;
+ break;
+
+ case SImode:
+ if (val < 0) /* sign extend */
+ vh = -1;
+ else
+ vh = 0;
+ vl = val; /* Give low 32 bits back. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ sl = gen_int_mode (vl, nmode);
+ sh = gen_int_mode (vh, nmode);
+ }
+ break;
+
+ case SUBREG:
+ sl = simplify_gen_subreg (nmode, operands[1], omode,
+ GET_MODE_SIZE (nmode));
+ sh = simplify_gen_subreg (nmode, operands[1], omode, 0);
+ break;
+
+ case MEM:
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case POST_DEC:
+ case POST_INC:
+ abort ();
+ break;
+
+ default:
+ /* Worry about splitting stack pushes. */
+ if (pushflag && ip2k_address_uses_reg_p (operands[1], REG_SP))
+ sl = sh = change_address (operands[1], nmode,
+ plus_constant (XEXP (operands[1], 0),
+ GET_MODE_SIZE (nmode)));
+ else
+ {
+ sl = change_address (operands[1], nmode,
+ plus_constant (XEXP (operands[1], 0),
+ GET_MODE_SIZE (nmode)));
+ sh = gen_rtx_MEM (nmode, XEXP (operands[1], 0));
+ }
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (move_high_first)
+ {
+ operands[2] = dh;
+ operands[3] = sh;
+ operands[4] = dl;
+ operands[5] = sl;
+ }
+ else
+ {
+ operands[2] = dl;
+ operands[3] = sl;
+ operands[4] = dh;
+ operands[5] = sh;
+ }
+ return;
+}
+
+/* Get the low half of an operand. */
+rtx
+ip2k_get_low_half (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ if (! IS_PSEUDO_P (x))
+ {
+ unsigned int r = REGNO (x);
+
+ return gen_rtx_REG (mode, r + HARD_REGNO_NREGS (r, mode));
+ }
+ else
+ {
+ return gen_rtx_SUBREG (mode, x, GET_MODE_SIZE (mode));
+ }
+ break;
+
+ case CONST_DOUBLE:
+ if (x == const0_rtx)
+ return const0_rtx;
+ else
+ {
+ if (mode != SImode)
+ {
+ REAL_VALUE_TYPE rv;
+ long value;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, value);
+
+ return gen_int_mode (value & 0xffff, mode);
+ }
+ else
+ return gen_int_mode (CONST_DOUBLE_LOW (x), mode);
+ }
+ break;
+
+ case CONST_INT:
+ if (x == const0_rtx)
+ return const0_rtx;
+ else
+ {
+ int val = INTVAL (x);
+ int vl, vh;
+
+ switch (mode)
+ {
+ case QImode:
+ vh = (val >> 8) & 0xff;
+ vl = val & 0xff;
+ break;
+
+ case HImode:
+ vh = (val >> 16) & 0xffff;
+ vl = val & 0xffff;
+ break;
+
+ case SImode:
+ if (val < 0) /* sign extend */
+ vh = -1;
+ else
+ vh = 0;
+ vl = val; /* Give low 32 bits back. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ return gen_int_mode (vl, mode);
+ }
+ break;
+
+ case SUBREG:
+ return simplify_gen_subreg (mode, x, GET_MODE (x), GET_MODE_SIZE (mode));
+
+ case MEM:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case POST_DEC:
+ case POST_INC:
+ abort ();
+ break;
+
+ default:
+ return change_address (x, mode,
+ plus_constant (XEXP (x, 0),
+ GET_MODE_SIZE (mode)));
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ return NULL_RTX;
+}
+
+/* Get the high half of an operand. */
+rtx
+ip2k_get_high_half (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ if (! IS_PSEUDO_P (x))
+ {
+ unsigned int r = REGNO (x);
+
+ return gen_rtx_REG (mode, r);
+ }
+ else
+ {
+ return gen_rtx_SUBREG (mode, x, 0);
+ }
+ break;
+
+ case CONST_DOUBLE:
+ if (x == const0_rtx)
+ return const0_rtx;
+ else
+ {
+ if (mode != SImode)
+ {
+ REAL_VALUE_TYPE rv;
+ long value;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, value);
+
+ return gen_int_mode ((value >> 16) & 0xffff, mode);
+ }
+ else
+ return gen_int_mode (CONST_DOUBLE_HIGH (x), mode);
+ }
+ break;
+
+ case CONST_INT:
+ if (x == const0_rtx)
+ return const0_rtx;
+ else
+ {
+ int val = INTVAL (x);
+ int vl, vh;
+
+ switch (mode)
+ {
+ case QImode:
+ vh = (val >> 8) & 0xff;
+ vl = val & 0xff;
+ break;
+
+ case HImode:
+ vh = (val >> 16) & 0xffff;
+ vl = val & 0xffff;
+ break;
+
+ case SImode:
+ if (val < 0) /* sign extend */
+ vh = -1;
+ else
+ vh = 0;
+ vl = val; /* Give low 32 bits back. */
+ break;
+
+ default:
+ abort ();
+ }
+
+ return gen_int_mode (vh, mode);
+ }
+ break;
+
+ case SUBREG:
+ return simplify_gen_subreg (mode, x, GET_MODE (x), 0);
+ break;
+
+ case MEM:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case POST_DEC:
+ case POST_INC:
+ abort ();
+ break;
+
+ default:
+ return change_address (x, mode, plus_constant (XEXP (x, 0), 0));
+ }
+ break;
+
+ default:
+ abort ();
+ }
+ return NULL_RTX;
+}
+
+/* Does address X use register R. Only valid for REG_SP, REG_DP, REG_IP
+ or REG_FP. */
+
+int
+ip2k_address_uses_reg_p (x, r)
+ rtx x;
+ unsigned int r;
+{
+ if (GET_CODE (x) != MEM)
+ return 0;
+
+ x = XEXP (x, 0);
+
+ while (1)
+ switch (GET_CODE (x))
+ {
+ case POST_DEC:
+ case POST_INC:
+ case PRE_DEC:
+ case PRE_INC:
+ x = XEXP (x, 0);
+ break;
+
+ case PLUS:
+ if (ip2k_address_uses_reg_p (XEXP (x, 1), r))
+ return 1;
+
+ x = XEXP (x, 0);
+ break;
+
+ case SUBREG:
+ /* Ignore subwords. */
+ x = SUBREG_REG (x);
+ break;
+
+ case REG:
+ /* Have to consider that r might be LSB of a pointer reg. */
+ return ((REGNO (x) == r) || (REGNO (x) == (r - 1))) ? 1 : 0;
+
+ case MEM:
+ /* We might be looking at a (mem:BLK (mem (...))) */
+ x = XEXP (x, 0);
+ break;
+
+ default:
+ return 0;
+ };
+}
+
+/* Does the queried XEXP not use a particular register? If we're certain
+ that it doesn't then we return TRUE otherwise we assume FALSE. */
+
+int
+ip2k_xexp_not_uses_reg_p (x, r, rsz)
+ rtx x;
+ unsigned int r;
+ int rsz;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ {
+ int msz = GET_MODE_SIZE (GET_MODE (x));
+
+ return (((REGNO (x) + msz - 1) < r)
+ || (REGNO (x) > (r + rsz - 1)));
+ }
+
+ case MEM:
+ return !ip2k_address_uses_reg_p (x, r);
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CC0:
+ case PC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Does the queried XEXP not use a particular register? If we're certain
+ that it doesn't then we return TRUE otherwise we assume FALSE. */
+
+int
+ip2k_composite_xexp_not_uses_reg_p (x, r, rsz)
+ rtx x;
+ unsigned int r;
+ int rsz;
+{
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'b')
+ return (ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 0), r, rsz)
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 1), r, rsz)
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 2), r, rsz));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x)) == 'c'
+ || GET_RTX_CLASS (GET_CODE (x)) == '<')
+ return (ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 0), r, rsz)
+ && ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 1), r, rsz));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '1'
+ || GET_RTX_CLASS (GET_CODE (x)) == '3')
+ return ip2k_composite_xexp_not_uses_reg_p (XEXP (x, 0), r, rsz);
+
+ return ip2k_xexp_not_uses_reg_p (x, r, rsz);
+}
+
+/* Does the queried XEXP not use CC0? If we're certain that
+ it doesn't then we return TRUE otherwise we assume FALSE. */
+
+int
+ip2k_composite_xexp_not_uses_cc0_p (x)
+ rtx x;
+{
+ if (GET_RTX_CLASS (GET_CODE (x)) == 'b')
+ return (ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 0))
+ && ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 1))
+ && ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 2)));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '2'
+ || GET_RTX_CLASS (GET_CODE (x)) == 'c'
+ || GET_RTX_CLASS (GET_CODE (x)) == '<')
+ return (ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 0))
+ && ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 1)));
+
+ if (GET_RTX_CLASS (GET_CODE (x)) == '1'
+ || GET_RTX_CLASS (GET_CODE (x)) == '3')
+ return ip2k_composite_xexp_not_uses_cc0_p (XEXP (x, 0));
+
+ return GET_CODE (x) != CC0;
+}
+
+int
+ip2k_split_dest_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return nonimmediate_operand (x, mode) || push_operand (x, mode);
+}
+
+int
+ip2k_nonptr_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return register_operand (x, mode) && !ip2k_ptr_operand (x, mode);
+}
+
+/* Is X a reference to IP or DP or SP? */
+
+int
+ip2k_ptr_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+
+{
+ if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && (mode == HImode || mode == VOIDmode)
+ && (REGNO (x) == REG_IP
+ || REGNO (x) == REG_DP
+ || REGNO (x) == REG_SP));
+}
+
+int
+ip2k_sp_operand (x, mode)
+ rtx x;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+
+{
+ return REG_P (x) && REGNO (x) == REG_SP;
+}
+
+int
+ip2k_ip_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+
+{
+ if (GET_CODE (x) != MEM)
+ return 0;
+
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
+ x = XEXP (x, 0);
+
+ if (! REG_P (x))
+ return 0;
+
+ if (GET_MODE_SIZE (mode) > 1)
+ return 0; /* Can't access offset bytes. */
+
+ return REGNO (x) == REG_IP;
+}
+
+/* Is X a memory address suitable for SP or DP relative addressing? */
+int
+ip2k_short_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int r;
+ unsigned int offs = 0;
+
+ if (! memory_operand (x, mode))
+ return 0; /* Got to be a memory address. */
+
+ x = XEXP (x, 0);
+ switch (GET_CODE (x))
+ {
+ default:
+ return 0;
+
+ case PLUS:
+ if (! REG_P (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 0;
+
+ offs = INTVAL (XEXP (x, 1));
+
+ if (128 <= offs)
+ return 0;
+
+ x = XEXP (x, 0);
+
+ /* fall thru */
+
+ case REG:
+ if (IS_PSEUDO_P (x))
+ return 0; /* Optimistic - doesn't work. */
+
+ r = REGNO (x);
+
+ /* For 'S' constraint, we presume that no IP adjustment
+ simulation is performed - so only QI mode allows IP to be a
+ short offset address. All other IP references must be
+ handled by 'R' constraints. */
+ if (r == REG_IP && offs == 0 && GET_MODE_SIZE (mode) <= 1)
+ return 1;
+
+ return (r == REG_SP || r == REG_DP);
+ }
+}
+
+int
+ip2k_nonsp_reg_operand (x, mode)
+ rtx x;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x) && REGNO (x) != REG_SP);
+}
+
+int
+ip2k_gen_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return ip2k_short_operand (x, mode)
+ || (GET_CODE (x) == SUBREG
+ && REG_P (SUBREG_REG (x)))
+ || (ip2k_nonsp_reg_operand (x, mode));
+}
+
+int
+ip2k_extra_constraint (x, c)
+ rtx x;
+ int c;
+{
+ switch (c)
+ {
+ case 'S': /* Allow offset in stack frame... */
+ return ip2k_short_operand (x, GET_MODE (x));
+
+ case 'R':
+ return ip2k_ip_operand (x, GET_MODE (x));
+
+ case 'T': /* Constant int or .data address. */
+ return CONSTANT_P (x) && is_regfile_address (x);
+
+ default:
+ return 0;
+ }
+}
+
+int
+ip2k_unary_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((mode == VOIDmode || GET_MODE (op) == mode)
+ && GET_RTX_CLASS (GET_CODE (op)) == '1');
+}
+
+int
+ip2k_binary_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((mode == VOIDmode || GET_MODE (op) == mode)
+ && (GET_RTX_CLASS (GET_CODE (op)) == 'c'
+ || GET_RTX_CLASS (GET_CODE (op)) == '2'));
+}
+
+int
+ip2k_symbol_ref_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ /* We define an IP2k symbol ref to be either a direct reference or one
+ with a constant offset. */
+ return (GET_CODE (op) == SYMBOL_REF)
+ || (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF);
+}
+
+int
+ip2k_signed_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (comparison_operator (op, mode)
+ && signed_condition (GET_CODE (op)) == GET_CODE (op));
+}
+
+int
+ip2k_unsigned_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (comparison_operator (op, mode)
+ && unsigned_condition (GET_CODE (op)) == GET_CODE (op));
+}
--- /dev/null
+/* Definitions of target machine for GNU compiler,
+ For Ubicom IP2022 Communications Controller
+
+ Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Red Hat, Inc and Ubicom, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+\f
+/* Set up System V.4 (aka ELF) defaults. */
+
+#include "elfos.h"
+#undef ASM_SPEC /* But we have a GAS assembler. */
+
+#define CPP_PREDEFINES \
+ "-DIP2K -D__INT_MAX__=SHRT_MAX -D_DOUBLE_IS_32BITS -D__BUFSIZ__=512 -D__FILENAME_MAX__=128"
+/* Define this to be a string constant containing `-D' options to
+ define the predefined macros that identify this machine and system.
+ These macros will be predefined unless the `-ansi' option is
+ specified.
+
+ In addition, a parallel set of macros are predefined, whose names
+ are made by appending `__' at the beginning and at the end. These
+ `__' macros are permitted by the ANSI standard, so they are
+ predefined regardless of whether `-ansi' is specified.
+
+ For example, on the Sun, one can use the following value:
+
+ "-Dmc68000 -Dsun -Dunix"
+
+ The result is to define the macros `__mc68000__', `__sun__' and
+ `__unix__' unconditionally, and the macros `mc68000', `sun' and
+ `unix' provided `-ansi' is not specified. */
+
+
+/* This declaration should be present. */
+extern int target_flags;
+
+/* `TARGET_...'
+ This series of macros is to allow compiler command arguments to
+ enable or disable the use of optional features of the target
+ machine. For example, one machine description serves both the
+ 68000 and the 68020; a command argument tells the compiler whether
+ it should use 68020-only instructions or not. This command
+ argument works by means of a macro `TARGET_68020' that tests a bit
+ in `target_flags'.
+
+ Define a macro `TARGET_FEATURENAME' for each such option. Its
+ definition should test a bit in `target_flags'; for example:
+
+ #define TARGET_68020 (target_flags & 1)
+
+ One place where these macros are used is in the
+ condition-expressions of instruction patterns. Note how
+ `TARGET_68020' appears frequently in the 68000 machine description
+ file, `m68k.md'. Another place they are used is in the
+ definitions of the other macros in the `MACHINE.h' file. */
+
+
+
+#define TARGET_SWITCHES {{"",0, NULL}}
+/* This macro defines names of command options to set and clear bits
+ in `target_flags'. Its definition is an initializer with a
+ subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ option name, and a number, which contains the bits to set in
+ `target_flags'. A negative number says to clear bits instead; the
+ negative of the number is which bits to clear. The actual option
+ name is made by appending `-m' to the specified name.
+
+ One of the subgroupings should have a null string. The number in
+ this grouping is the default value for `target_flags'. Any target
+ options act starting with that value.
+
+ Here is an example which defines `-m68000' and `-m68020' with
+ opposite meanings, and picks the latter as the default:
+
+ #define TARGET_SWITCHES \
+ { { "68020", 1}, \
+ { "68000", -1}, \
+ { "", 1}} */
+
+
+/* This macro is similar to `TARGET_SWITCHES' but defines names of
+ command options that have values. Its definition is an
+ initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ fixed part of the option name, and the address of a variable. The
+ variable, type `char *', is set to the variable part of the given
+ option if the fixed part matches. The actual option name is made
+ by appending `-m' to the specified name.
+
+ Here is an example which defines `-mshort-data-NUMBER'. If the
+ given option is `-mshort-data-512', the variable `m88k_short_data'
+ will be set to the string `"512"'.
+
+ extern char *m88k_short_data;
+ #define TARGET_OPTIONS \
+ { { "short-data-", &m88k_short_data } } */
+
+#define TARGET_VERSION fprintf (stderr, " (ip2k, GNU assembler syntax)")
+/* This macro is a C statement to print on `stderr' a string
+ describing the particular machine description choice. Every
+ machine description should define `TARGET_VERSION'. For example:
+
+ #ifdef MOTOROLA
+ #define TARGET_VERSION \
+ fprintf (stderr, " (68k, Motorola syntax)")
+ #else
+ #define TARGET_VERSION \
+ fprintf (stderr, " (68k, MIT syntax)")
+ #endif */
+
+/* Caller-saves is not a win for the IP2K. Pretty much anywhere that
+ a register is permitted allows SP-relative addresses too.
+
+ This machine doesn't have PIC addressing modes, so disable that also. */
+
+#define OVERRIDE_OPTIONS \
+ do { \
+ flag_caller_saves = 0; \
+ flag_pic = 0; \
+ } while (0)
+
+/* `OVERRIDE_OPTIONS'
+ Sometimes certain combinations of command options do not make
+ sense on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
+
+/* Put each function in its own section so that PAGE-instruction
+ relaxation can do its best. */
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZEFLAG) \
+ do { \
+ if ((LEVEL) || (SIZEFLAG)) \
+ flag_function_sections = 1; \
+ } while (0)
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit. */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int'; */
+#define BITS_PER_WORD 8
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (BITS_PER_WORD / BITS_PER_UNIT)
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 16
+
+/* Maximum sized of reasonable data type DImode or Dfmode ... */
+#define MAX_FIXED_MODE_SIZE 64
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 8
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 8
+
+/* No data type wants to be aligned rounder than this. */
+
+#define BIGGEST_ALIGNMENT 8
+
+#define STRICT_ALIGNMENT 0
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* A C expression for the size in bits of the type `int' on the
+ target machine. If you don't define this, the default is one word. */
+#undef INT_TYPE_SIZE
+#define INT_TYPE_SIZE 16
+
+
+/* A C expression for the size in bits of the type `short' on the
+ target machine. If you don't define this, the default is half a
+ word. (If this would be less than one storage unit, it is rounded
+ up to one unit.) */
+#undef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `long' on the
+ target machine. If you don't define this, the default is one word. */
+#undef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE 32
+
+
+/* Maximum number for the size in bits of the type `long' on the
+ target machine. If this is undefined, the default is
+ `LONG_TYPE_SIZE'. Otherwise, it is the constant value that is the
+ largest value that `LONG_TYPE_SIZE' can have at run-time. This is
+ used in `cpp'. */
+#define MAX_LONG_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `long long' on the
+ target machine. If you don't define this, the default is two
+ words. If you want to support GNU Ada on your machine, the value
+ of macro must be at least 64. */
+#undef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE 64
+
+#undef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE 8
+/* A C expression for the size in bits of the type `char' on the
+ target machine. If you don't define this, the default is one
+ quarter of a word. (If this would be less than one storage unit,
+ it is rounded up to one unit.) */
+
+#undef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE 32
+/* A C expression for the size in bits of the type `float' on the
+ target machine. If you don't define this, the default is one word. */
+
+#undef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE 32
+/* A C expression for the size in bits of the type `double' on the
+ target machine. If you don't define this, the default is two
+ words. */
+
+
+/* A C expression for the size in bits of the type `long double' on
+ the target machine. If you don't define this, the default is two
+ words. */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 32
+
+#define DEFAULT_SIGNED_CHAR 1
+/* An expression whose value is 1 or 0, according to whether the type
+ `char' should be signed or unsigned by default. The user can
+ always override this default with the options `-fsigned-char' and
+ `-funsigned-char'. */
+
+/* #define DEFAULT_SHORT_ENUMS 1
+ This was the default for the IP2k but gcc has a bug (as of 17th May
+ 2001) in the way that library calls to the memory checker functions
+ are issues that screws things up if an enum is not equivalent to
+ an int. */
+/* `DEFAULT_SHORT_ENUMS'
+ A C expression to determine whether to give an `enum' type only as
+ many bytes as it takes to represent the range of possible values
+ of that type. A nonzero value means to do that; a zero value
+ means all `enum' types should be allocated like `int'.
+
+ If you don't define the macro, the default is 0. */
+
+#define SIZE_TYPE "unsigned int"
+/* A C expression for a string describing the name of the data type
+ to use for size values. The typedef name `size_t' is defined
+ using the contents of the string.
+
+ The string can contain more than one keyword. If so, separate
+ them with spaces, and write first any length keyword, then
+ `unsigned' if appropriate, and finally `int'. The string must
+ exactly match one of the data type names defined in the function
+ `init_decl_processing' in the file `c-decl.c'. You may not omit
+ `int' or change the order--that would cause the compiler to crash
+ on startup.
+
+ If you don't define this macro, the default is `"long unsigned
+ int"'. */
+
+#define PTRDIFF_TYPE "int"
+/* A C expression for a string describing the name of the data type
+ to use for the result of subtracting two pointers. The typedef
+ name `ptrdiff_t' is defined using the contents of the string. See
+ `SIZE_TYPE' above for more information.
+
+ If you don't define this macro, the default is `"long int"'. */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+/* A C expression for the size in bits of the data type for wide
+ characters. This is used in `cpp', which cannot make use of
+ `WCHAR_TYPE'. */
+
+#define HARD_REG_SIZE (UNITS_PER_WORD)
+/* Standard register usage.
+
+ for the IP2K, we are going to have a LOT of registers, but only some of them
+ are named. */
+
+#define FIRST_PSEUDO_REGISTER (0x104) /* Skip over physical regs, VFP, AP. */
+
+/* Number of hardware registers known to the compiler. They receive
+ numbers 0 through `FIRST_PSEUDO_REGISTER-1'; thus, the first
+ pseudo register's number really is assigned the number
+ `FIRST_PSEUDO_REGISTER'. */
+
+#define REG_IP 0x4
+#define REG_IPH REG_IP
+#define REG_IPL 0x5
+
+#define REG_SP 0x6
+#define REG_SPH REG_SP
+#define REG_SPL 0x7
+
+#define REG_PCH 0x8
+#define REG_PCL 0x9
+
+#define REG_W 0xa
+#define REG_STATUS 0xb
+
+#define REG_DP 0xc
+#define REG_DPH REG_DP
+#define REG_DPL 0xd
+
+#define REG_MULH 0xf
+
+#define REG_CALLH 0x7e /* Call-stack readout. */
+#define REG_CALLL 0x7f
+
+
+#define REG_RESULT 0x80 /* Result register (upto 8 bytes). */
+#define REG_FP 0xfd /* 2 bytes for FRAME chain */
+
+#define REG_ZERO 0xff /* Initialized to zero by runtime. */
+
+#define REG_VFP 0x100 /* Virtual frame pointer. */
+#define REG_AP 0x102 /* Virtual arg pointer. */
+
+/* Status register bits. */
+#define Z_FLAG 0x2
+#define DC_FLAG 0x1
+#define C_FLAG 0x0
+
+#define FIXED_REGISTERS {\
+1,1,1,1,0,0,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r0.. r31*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r32.. r63*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r64.. r95*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r96..r127*/\
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,/*r128..r159*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r160..r191*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r192..r223*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r224..r255*/\
+1,1,1,1}
+
+/* An initializer that says which registers are used for fixed
+ purposes all throughout the compiled code and are therefore not
+ available for general allocation. These would include the stack
+ pointer, the frame pointer (except on machines where that can be
+ used as a general register when no frame pointer is needed), the
+ program counter on machines where that is considered one of the
+ addressable registers, and any other numbered register with a
+ standard use.
+
+ This information is expressed as a sequence of numbers, separated
+ by commas and surrounded by braces. The Nth number is 1 if
+ register N is fixed, 0 otherwise.
+
+ The table initialized from this macro, and the table initialized by
+ the following one, may be overridden at run time either
+ automatically, by the actions of the macro
+ `CONDITIONAL_REGISTER_USAGE', or by the user with the command
+ options `-ffixed-REG', `-fcall-used-REG' and `-fcall-saved-REG'. */
+
+#define CALL_USED_REGISTERS { \
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r0.. r31*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r32.. r63*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r64.. r95*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/* r96..r127*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r128..r159*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r160..r191*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r192..r223*/\
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,/*r224..r255*/\
+1,1,1,1}
+
+/* Like `FIXED_REGISTERS' but has 1 for each register that is
+ clobbered (in general) by function calls as well as for fixed
+ registers. This macro therefore identifies the registers that are
+ not available for general allocation of values that must live
+ across function calls.
+
+ If a register has 0 in `CALL_USED_REGISTERS', the compiler
+ automatically saves it on function entry and restores it on
+ function exit, if the register is used within the function. */
+
+#define NON_SAVING_SETJMP 0
+/* If this macro is defined and has a nonzero value, it means that
+ `setjmp' and related functions fail to save the registers, or that
+ `longjmp' fails to restore them. To compensate, the compiler
+ avoids putting variables in registers in functions that use
+ `setjmp'. */
+
+#define REG_ALLOC_ORDER { \
+ 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f, \
+ 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97, \
+ 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f, \
+ 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87, \
+ 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7, \
+ 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf, \
+ 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7, \
+ 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf, \
+ 0xc0,0xc1,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7, \
+ 0xc8,0xc9,0xca,0xcb,0xcc,0xcd,0xce,0xcf, \
+ 0xd0,0xd1,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7, \
+ 0xd8,0xd9,0xda,0xdb,0xdc,0xdd,0xde,0xdf, \
+ 0xe0,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7, \
+ 0xe8,0xe9,0xea,0xeb,0xec,0xed,0xee,0xef, \
+ 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7, \
+ 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff, \
+ 0x00,0x01,0x02,0x03,0x0c,0x0d,0x06,0x07, \
+ 0x08,0x09,0x0a,0x0b,0x04,0x05,0x0e,0x0f, \
+ 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17, \
+ 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f, \
+ 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27, \
+ 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f, \
+ 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37, \
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f, \
+ 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47, \
+ 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f, \
+ 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57, \
+ 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f, \
+ 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67, \
+ 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f, \
+ 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77, \
+ 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f, \
+ 0x100,0x101,0x102,0x103}
+
+/* If defined, an initializer for a vector of integers, containing the
+ numbers of hard registers in the order in which GNU CC should
+ prefer to use them (from most preferred to least).
+
+ If this macro is not defined, registers are used lowest numbered
+ first (all else being equal).
+
+ One use of this macro is on machines where the highest numbered
+ registers must always be saved and the save-multiple-registers
+ instruction supports only sequences of consecutive registers. On
+ such machines, define `REG_ALLOC_ORDER' to be an initializer that
+ lists the highest numbered allocatable register first. */
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC ip2k_init_local_alloc (reg_alloc_order)
+/* A C statement (sans semicolon) to choose the order in which to
+ allocate hard registers for pseudo-registers local to a basic
+ block.
+
+ Store the desired register order in the array `reg_alloc_order'.
+ Element 0 should be the register to allocate first; element 1, the
+ next register; and so on.
+
+ The macro body should not assume anything about the contents of
+ `reg_alloc_order' before execution of the macro.
+
+ On most machines, it is not necessary to define this macro. */
+
+/* Are we allowed to rename registers? For some reason, regrename was
+ changing DP to IP (when it appeared in addresses like (plus:HI
+ (reg: DP) (const_int 37)) - and that's bad because IP doesn't
+ permit offsets! */
+
+#define HARD_REGNO_RENAME_OK(REG, NREG) \
+ (((REG) == REG_DPH) ? 0 \
+ : ((REG) == REG_IPH) ? ((NREG) == REG_DPH) \
+ : (((NREG) == REG_IPL) || ((NREG) == REG_DPL)) ? 0 : 1)
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* A C expression for the number of consecutive hard registers,
+ starting at register number REGNO, required to hold a value of mode
+ MODE.
+
+ On a machine where all registers are exactly one word, a suitable
+ definition of this macro is
+
+ #define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)) */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+/* A C expression that is nonzero if it is permissible to store a
+ value of mode MODE in hard register number REGNO (or in several
+ registers starting with that one). For a machine where all
+ registers are equivalent, a suitable definition is
+
+ #define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+ It is not necessary for this macro to check for the numbers of
+ fixed registers, because the allocation mechanism considers them
+ to be always occupied.
+
+ On some machines, double-precision values must be kept in even/odd
+ register pairs. The way to implement that is to define this macro
+ to reject odd register numbers for such modes.
+
+ The minimum requirement for a mode to be OK in a register is that
+ the `movMODE' instruction pattern support moves between the
+ register and any other hard register for which the mode is OK; and
+ that moving a value into the register and back out not alter it.
+
+ Since the same instruction used to move `SImode' will work for all
+ narrower integer modes, it is not necessary on any machine for
+ `HARD_REGNO_MODE_OK' to distinguish between these modes, provided
+ you define patterns `movhi', etc., to take advantage of this. This
+ is useful because of the interaction between `HARD_REGNO_MODE_OK'
+ and `MODES_TIEABLE_P'; it is very desirable for all integer modes
+ to be tieable.
+
+ Many machines have special registers for floating point arithmetic.
+ Often people assume that floating point machine modes are allowed
+ only in floating point registers. This is not true. Any
+ registers that can hold integers can safely *hold* a floating
+ point machine mode, whether or not floating arithmetic can be done
+ on it in those registers. Integer move instructions can be used
+ to move the values.
+
+ On some machines, though, the converse is true: fixed-point machine
+ modes may not go in floating registers. This is true if the
+ floating registers normalize any value stored in them, because
+ storing a non-floating value there would garble it. In this case,
+ `HARD_REGNO_MODE_OK' should reject fixed-point machine modes in
+ floating registers. But if the floating registers do not
+ automatically normalize, if you can store any bit pattern in one
+ and retrieve it unchanged without a trap, then any machine mode
+ may go in a floating register, so you can define this macro to say
+ so.
+
+ The primary significance of special floating registers is rather
+ that they are the registers acceptable in floating point arithmetic
+ instructions. However, this is of no concern to
+ `HARD_REGNO_MODE_OK'. You handle it by writing the proper
+ constraints for those instructions.
+
+ On some machines, the floating registers are especially slow to
+ access, so that it is better to store a value in a stack frame
+ than in such a register if floating point arithmetic is not being
+ done. As long as the floating registers are not in class
+ `GENERAL_REGS', they will not be used unless some pattern's
+ constraint asks for one. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (((MODE1) == QImode && (MODE2) == HImode) \
+ || ((MODE2) == QImode && (MODE1) == HImode))
+/* We originally had this as follows - this isn't a win on the IP2k
+ though as registers just get in our way!
+
+ #define MODES_TIEABLE_P(MODE1, MODE2) \
+ (((MODE1) > HImode && (MODE2) == HImode)
+ || ((MODE1) == HImode && (MODE2) > HImode)) */
+
+/* A C expression that is nonzero if it is desirable to choose
+ register allocation so as to avoid move instructions between a
+ value of mode MODE1 and a value of mode MODE2.
+
+ If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
+ MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
+ MODE2)' must be zero. */
+
+enum reg_class {
+ NO_REGS,
+ DPH_REGS,
+ DPL_REGS,
+ DP_REGS,
+ SP_REGS,
+ IPH_REGS,
+ IPL_REGS,
+ IP_REGS,
+ DP_SP_REGS,
+ PTR_REGS,
+ NONPTR_REGS,
+ NONSP_REGS,
+ GENERAL_REGS,
+ ALL_REGS = GENERAL_REGS,
+ LIM_REG_CLASSES
+};
+
+/* An enumeral type that must be defined with all the register class
+ names as enumeral values. `NO_REGS' must be first. `ALL_REGS'
+ must be the last register class, followed by one more enumeral
+ value, `LIM_REG_CLASSES', which is not a register class but rather
+ tells how many classes there are.
+
+ Each register class has a number, which is the value of casting
+ the class name to type `int'. The number serves as an index in
+ many of the tables described below. */
+
+
+#define N_REG_CLASSES (int)LIM_REG_CLASSES
+/* The number of distinct register classes, defined as follows:
+
+ #define N_REG_CLASSES (int) LIM_REG_CLASSES */
+
+#define REG_CLASS_NAMES { \
+ "NO_REGS", \
+ "DPH_REGS", \
+ "DPL_REGS", \
+ "DP_REGS", \
+ "SP_REGS", \
+ "IPH_REGS", \
+ "IPL_REGS", \
+ "IP_REGS", \
+ "DP_SP_REGS", \
+ "PTR_REGS", \
+ "NONPTR_REGS", \
+ "NONSP_REGS", \
+ "GENERAL_REGS" \
+ }
+/* An initializer containing the names of the register classes as C
+ string constants. These names are used in writing some of the
+ debugging dumps. */
+
+
+#define REG_CLASS_CONTENTS { \
+{0x00000000, 0, 0, 0, 0, 0, 0, 0, 0}, /* NO_REGS */ \
+{0x00001000, 0, 0, 0, 0, 0, 0, 0, 0}, /* DPH_REGS */ \
+{0x00002000, 0, 0, 0, 0, 0, 0, 0, 0}, /* DPL_REGS */ \
+{0x00003000, 0, 0, 0, 0, 0, 0, 0, 0}, /* DP_REGS */ \
+{0x000000c0, 0, 0, 0, 0, 0, 0, 0, 0}, /* SP_REGS */ \
+{0x00000010, 0, 0, 0, 0, 0, 0, 0, 0}, /* IPH_REGS */ \
+{0x00000020, 0, 0, 0, 0, 0, 0, 0, 0}, /* IPL_REGS */ \
+{0x00000030, 0, 0, 0, 0, 0, 0, 0, 0}, /* IP_REGS */ \
+{0x000030c0, 0, 0, 0, 0, 0, 0, 0, 0}, /* DP_SP_REGS */ \
+{0x000030f0, 0, 0, 0, 0, 0, 0, 0, 0}, /* PTR_REGS */ \
+{0xffffcf0f,-1,-1,-1,-1,-1,-1,-1, 0}, /* NONPTR_REGS */ \
+{0xffffff3f,-1,-1,-1,-1,-1,-1,-1, 0}, /* NONSP_REGS */ \
+{0xffffffff,-1,-1,-1,-1,-1,-1,-1,15} /* GENERAL_REGS */ \
+}
+
+/* An initializer containing the contents of the register classes, as
+ integers which are bit masks. The Nth integer specifies the
+ contents of class N. The way the integer MASK is interpreted is
+ that register R is in the class if `MASK & (1 << R)' is 1.
+
+ When the machine has more than 32 registers, an integer does not
+ suffice. Then the integers are replaced by sub-initializers,
+ braced groupings containing several integers. Each
+ sub-initializer must be suitable as an initializer for the type
+ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
+
+#define REGNO_REG_CLASS(R) \
+ ( (R) == REG_IPH ? IPH_REGS \
+ : (R) == REG_IPL ? IPL_REGS \
+ : (R) == REG_DPH ? DPH_REGS \
+ : (R) == REG_DPL ? DPL_REGS \
+ : (R) == REG_SPH ? SP_REGS \
+ : (R) == REG_SPL ? SP_REGS \
+ : NONPTR_REGS)
+
+/* A C expression whose value is a register class containing hard
+ register REGNO. In general there is more than one such class;
+ choose a class which is "minimal", meaning that no smaller class
+ also contains the register. */
+
+#define MODE_BASE_REG_CLASS(MODE) ((MODE) == QImode ? PTR_REGS : DP_SP_REGS)
+/* This is a variation of the BASE_REG_CLASS macro which allows
+ the selection of a base register in a mode depenedent manner.
+ If MODE is VOIDmode then it should return the same value as
+ BASE_REG_CLASS. */
+
+#define BASE_REG_CLASS PTR_REGS
+/* A macro whose definition is the name of the class to which a valid
+ base register must belong. A base register is one used in an
+ address which is the register value plus a displacement. */
+
+#define INDEX_REG_CLASS NO_REGS
+/* A macro whose definition is the name of the class to which a valid
+ index register must belong. An index register is one used in an
+ address where its value is either multiplied by a scale factor or
+ added to another register (as well as added to a displacement). */
+
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ( (C) == 'j' ? IPH_REGS \
+ : (C) == 'k' ? IPL_REGS \
+ : (C) == 'f' ? IP_REGS \
+ : (C) == 'y' ? DPH_REGS \
+ : (C) == 'z' ? DPL_REGS \
+ : (C) == 'b' ? DP_REGS \
+ : (C) == 'u' ? NONSP_REGS \
+ : (C) == 'q' ? SP_REGS \
+ : (C) == 'c' ? DP_SP_REGS \
+ : (C) == 'a' ? PTR_REGS \
+ : (C) == 'd' ? NONPTR_REGS \
+ : NO_REGS)
+
+/* A C expression which defines the machine-dependent operand
+ constraint letters for register classes. If CHAR is such a
+ letter, the value should be the register class corresponding to
+ it. Otherwise, the value should be `NO_REGS'. The register
+ letter `r', corresponding to class `GENERAL_REGS', will not be
+ passed to this macro; you do not need to handle it. */
+
+
+#define REGNO_OK_FOR_BASE_P(R) \
+ ((R) == REG_DP || (R) == REG_IP || (R) == REG_SP)
+/* A C expression which is nonzero if register number R is suitable
+ for use as a base register in operand addresses. It may be either
+ a suitable hard register or a pseudo register that has been
+ allocated such a hard register. */
+
+#define REGNO_MODE_OK_FOR_BASE_P(R,M) \
+ ((R) == REG_DP || (R) == REG_SP \
+ || ((R) == REG_IP && GET_MODE_SIZE (M) <= 1))
+/* A C expression that is just like `REGNO_OK_FOR_BASE_P', except that
+ that expression may examine the mode of the memory reference in
+ MODE. You should define this macro if the mode of the memory
+ reference affects whether a register may be used as a base
+ register. If you define this macro, the compiler will use it
+ instead of `REGNO_OK_FOR_BASE_P'. */
+
+#define REGNO_OK_FOR_INDEX_P(NUM) 0
+/* A C expression which is nonzero if register number NUM is suitable
+ for use as an index register in operand addresses. It may be
+ either a suitable hard register or a pseudo register that has been
+ allocated such a hard register.
+
+ The difference between an index register and a base register is
+ that the index register may be scaled. If an address involves the
+ sum of two registers, neither one of them scaled, then either one
+ may be labeled the "base" and the other the "index"; but whichever
+ labeling is used must fit the machine's constraints of which
+ registers may serve in each capacity. The compiler will try both
+ labelings, looking for one that is valid, and will reload one or
+ both registers only if neither labeling works. */
+
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+/* A C expression that places additional restrictions on the register
+ class to use when it is necessary to copy value X into a register
+ in class CLASS. The value is a register class; perhaps CLASS, or
+ perhaps another, smaller class. On many machines, the following
+ definition is safe:
+
+ #define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+
+ Sometimes returning a more restrictive class makes better code.
+ For example, on the 68000, when X is an integer constant that is
+ in range for a `moveq' instruction, the value of this macro is
+ always `DATA_REGS' as long as CLASS includes the data registers.
+ Requiring a data register guarantees that a `moveq' will be used.
+
+ If X is a `const_double', by returning `NO_REGS' you can force X
+ into a memory constant. This is useful on certain machines where
+ immediate floating values cannot be loaded into certain kinds of
+ registers. */
+
+/* `PREFERRED_OUTPUT_RELOAD_CLASS (X, CLASS)'
+ Like `PREFERRED_RELOAD_CLASS', but for output reloads instead of
+ input reloads. If you don't define this macro, the default is to
+ use CLASS, unchanged. */
+
+/* `LIMIT_RELOAD_CLASS (MODE, CLASS)'
+ A C expression that places additional restrictions on the register
+ class to use when it is necessary to be able to hold a value of
+ mode MODE in a reload register for which class CLASS would
+ ordinarily be used.
+
+ Unlike `PREFERRED_RELOAD_CLASS', this macro should be used when
+ there are certain modes that simply can't go in certain reload
+ classes.
+
+ The value is a register class; perhaps CLASS, or perhaps another,
+ smaller class.
+
+ Don't define this macro unless the target machine has limitations
+ which require the macro to do something nontrivial. */
+
+/* SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X)
+ `SECONDARY_RELOAD_CLASS (CLASS, MODE, X)'
+ `SECONDARY_OUTPUT_RELOAD_CLASS (CLASS, MODE, X)'
+ Many machines have some registers that cannot be copied directly
+ to or from memory or even from other types of registers. An
+ example is the `MQ' register, which on most machines, can only be
+ copied to or from general registers, but not memory. Some
+ machines allow copying all registers to and from memory, but
+ require a scratch register for stores to some memory locations
+ (e.g., those with symbolic address on the RT, and those with
+ certain symbolic address on the Sparc when compiling PIC). In
+ some cases, both an intermediate and a scratch register are
+ required.
+
+ You should define these macros to indicate to the reload phase
+ that it may need to allocate at least one register for a reload in
+ addition to the register to contain the data. Specifically, if
+ copying X to a register CLASS in MODE requires an intermediate
+ register, you should define `SECONDARY_INPUT_RELOAD_CLASS' to
+ return the largest register class all of whose registers can be
+ used as intermediate registers or scratch registers.
+
+ If copying a register CLASS in MODE to X requires an intermediate
+ or scratch register, `SECONDARY_OUTPUT_RELOAD_CLASS' should be
+ defined to return the largest register class required. If the
+ requirements for input and output reloads are the same, the macro
+ `SECONDARY_RELOAD_CLASS' should be used instead of defining both
+ macros identically.
+
+ The values returned by these macros are often `GENERAL_REGS'.
+ Return `NO_REGS' if no spare register is needed; i.e., if X can be
+ directly copied to or from a register of CLASS in MODE without
+ requiring a scratch register. Do not define this macro if it
+ would always return `NO_REGS'.
+
+ If a scratch register is required (either with or without an
+ intermediate register), you should define patterns for
+ `reload_inM' or `reload_outM', as required (*note Standard
+ Names::.. These patterns, which will normally be implemented with
+ a `define_expand', should be similar to the `movM' patterns,
+ except that operand 2 is the scratch register.
+
+ Define constraints for the reload register and scratch register
+ that contain a single register class. If the original reload
+ register (whose class is CLASS) can meet the constraint given in
+ the pattern, the value returned by these macros is used for the
+ class of the scratch register. Otherwise, two additional reload
+ registers are required. Their classes are obtained from the
+ constraints in the insn pattern.
+
+ X might be a pseudo-register or a `subreg' of a pseudo-register,
+ which could either be in a hard register or in memory. Use
+ `true_regnum' to find out; it will return -1 if the pseudo is in
+ memory and the hard register number if it is in a register.
+
+ These macros should not be used in the case where a particular
+ class of registers can only be copied to memory and not to another
+ class of registers. In that case, secondary reload registers are
+ not needed and would not be helpful. Instead, a stack location
+ must be used to perform the copy and the `movM' pattern should use
+ memory as a intermediate storage. This case often occurs between
+ floating-point and general registers. */
+
+/* `SECONDARY_MEMORY_NEEDED (CLASS1, CLASS2, M)'
+ Certain machines have the property that some registers cannot be
+ copied to some other registers without using memory. Define this
+ macro on those machines to be a C expression that is non-zero if
+ objects of mode M in registers of CLASS1 can only be copied to
+ registers of class CLASS2 by storing a register of CLASS1 into
+ memory and loading that memory location into a register of CLASS2.
+
+ Do not define this macro if its value would always be zero.
+
+ `SECONDARY_MEMORY_NEEDED_RTX (MODE)'
+ Normally when `SECONDARY_MEMORY_NEEDED' is defined, the compiler
+ allocates a stack slot for a memory location needed for register
+ copies. If this macro is defined, the compiler instead uses the
+ memory location defined by this macro.
+
+ Do not define this macro if you do not define
+ `SECONDARY_MEMORY_NEEDED'. */
+
+#define SMALL_REGISTER_CLASSES 1
+/* Normally the compiler avoids choosing registers that have been
+ explicitly mentioned in the rtl as spill registers (these
+ registers are normally those used to pass parameters and return
+ values). However, some machines have so few registers of certain
+ classes that there would not be enough registers to use as spill
+ registers if this were done.
+
+ Define `SMALL_REGISTER_CLASSES' to be an expression with a non-zero
+ value on these machines. When this macro has a non-zero value, the
+ compiler allows registers explicitly used in the rtl to be used as
+ spill registers but avoids extending the lifetime of these
+ registers.
+
+ It is always safe to define this macro with a non-zero value, but
+ if you unnecessarily define it, you will reduce the amount of
+ optimizations that can be performed in some cases. If you do not
+ define this macro with a non-zero value when it is required, the
+ compiler will run out of spill registers and print a fatal error
+ message. For most machines, you should not define this macro at
+ all. */
+
+#define CLASS_LIKELY_SPILLED_P(CLASS) class_likely_spilled_p(CLASS)
+/* A C expression whose value is nonzero if pseudos that have been
+ assigned to registers of class CLASS would likely be spilled
+ because registers of CLASS are needed for spill registers.
+
+ The default value of this macro returns 1 if CLASS has exactly one
+ register and zero otherwise. On most machines, this default
+ should be used. Only define this macro to some other expression
+ if pseudo allocated by `local-alloc.c' end up in memory because
+ their hard registers were needed for spill registers. If this
+ macro returns nonzero for those classes, those pseudos will only
+ be allocated by `global.c', which knows how to reallocate the
+ pseudo to another register. If there would not be another
+ register available for reallocation, you should not change the
+ definition of this macro since the only effect of such a
+ definition would be to slow down register allocation. */
+
+#define CLASS_MAX_NREGS(CLASS, MODE) GET_MODE_SIZE (MODE)
+/* A C expression for the maximum number of consecutive registers of
+ class CLASS needed to hold a value of mode MODE.
+
+ This is closely related to the macro `HARD_REGNO_NREGS'. In fact,
+ the value of the macro `CLASS_MAX_NREGS (CLASS, MODE)' should be
+ the maximum value of `HARD_REGNO_NREGS (REGNO, MODE)' for all
+ REGNO values in the class CLASS.
+
+ This macro helps control the handling of multiple-word values in
+ the reload pass. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (VALUE) >= -255 && (VALUE) <= -1 : \
+ (C) == 'J' ? (VALUE) >= 0 && (VALUE) <= 7 : \
+ (C) == 'K' ? (VALUE) >= 0 && (VALUE) <= 127 : \
+ (C) == 'L' ? (VALUE) > 0 && (VALUE) < 128: \
+ (C) == 'M' ? (VALUE) == -1: \
+ (C) == 'N' ? (VALUE) == 1: \
+ (C) == 'O' ? (VALUE) == 0: \
+ (C) == 'P' ? (VALUE) >= 0 && (VALUE) <= 255: \
+ 0)
+
+/* A C expression that defines the machine-dependent operand
+ constraint letters (`I', `J', `K', ... `P') that specify
+ particular ranges of integer values. If C is one of those
+ letters, the expression should check that VALUE, an integer, is in
+ the appropriate range and return 1 if so, 0 otherwise. If C is
+ not one of those letters, the value should be 0 regardless of
+ VALUE. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) 0
+
+/* `CONST_DOUBLE_OK_FOR_LETTER_P (VALUE, C)'
+ A C expression that defines the machine-dependent operand
+ constraint letters that specify particular ranges of
+ `const_double' values (`G' or `H').
+
+ If C is one of those letters, the expression should check that
+ VALUE, an RTX of code `const_double', is in the appropriate range
+ and return 1 if so, 0 otherwise. If C is not one of those
+ letters, the value should be 0 regardless of VALUE.
+
+ `const_double' is used for all floating-point constants and for
+ `DImode' fixed-point constants. A given letter can accept either
+ or both kinds of values. It can use `GET_MODE' to distinguish
+ between these kinds. */
+
+#define EXTRA_CONSTRAINT(X, C) ip2k_extra_constraint (X, C)
+
+/* A C expression that defines the optional machine-dependent
+ constraint letters (``Q', `R', `S', `T', `U') that can'
+ be used to segregate specific types of operands, usually memory
+ references, for the target machine. Normally this macro will not
+ be defined. If it is required for a particular target machine, it
+ should return 1 if VALUE corresponds to the operand type
+ represented by the constraint letter C. If C is not defined as an
+ extra constraint, the value returned should be 0 regardless of
+ VALUE.
+
+ For example, on the ROMP, load instructions cannot have their
+ output in r0 if the memory reference contains a symbolic address.
+ Constraint letter `Q' is defined as representing a memory address
+ that does *not* contain a symbolic address. An alternative is
+ specified with a `Q' constraint on the input and `r' on the
+ output. The next alternative specifies `m' on the input and a
+ register class that does not include r0 on the output. */
+
+/* This is an undocumented variable which describes
+ how GCC will pop a data. */
+#define STACK_POP_CODE PRE_INC
+
+#define STACK_PUSH_CODE POST_DEC
+/* This macro defines the operation used when something is pushed on
+ the stack. In RTL, a push operation will be `(set (mem
+ (STACK_PUSH_CODE (reg sp))) ...)'
+
+ The choices are `PRE_DEC', `POST_DEC', `PRE_INC', and `POST_INC'.
+ Which of these is correct depends on the stack direction and on
+ whether the stack pointer points to the last item on the stack or
+ whether it points to the space for the next item on the stack.
+
+ The default is `PRE_DEC' when `STACK_GROWS_DOWNWARD' is defined,
+ which is almost always right, and `PRE_INC' otherwise, which is
+ often wrong. */
+
+
+#define STACK_CHECK_BUILTIN 1
+/* Prologue code will do stack checking as necessary. */
+
+#define STARTING_FRAME_OFFSET (0)
+/* Offset from the frame pointer to the first local variable slot to
+ be allocated.
+
+ If `FRAME_GROWS_DOWNWARD', find the next slot's offset by
+ subtracting the first slot's length from `STARTING_FRAME_OFFSET'.
+ Otherwise, it is found by adding the length of the first slot to
+ the value `STARTING_FRAME_OFFSET'. */
+
+#define FRAME_GROWS_DOWNWARD 1
+#define STACK_GROWS_DOWNWARD 1
+
+/* On IP2K arg pointer is virtual and resolves to either SP or FP
+ after we've resolved what registers are saved (fp chain, return
+ pc, etc. */
+
+#define FIRST_PARM_OFFSET(FUNDECL) 0
+/* Offset from the argument pointer register to the first argument's
+ address. On some machines it may depend on the data type of the
+ function.
+
+ If `ARGS_GROW_DOWNWARD', this is the offset to the location above
+ the first argument's address. */
+
+/* `STACK_DYNAMIC_OFFSET (FUNDECL)'
+ Offset from the stack pointer register to an item dynamically
+ allocated on the stack, e.g., by `alloca'.
+
+ The default value for this macro is `STACK_POINTER_OFFSET' plus the
+ length of the outgoing arguments. The default is correct for most
+ machines. See `function.c' for details. */
+
+#define STACK_POINTER_OFFSET 1
+/* IP2K stack is post-decremented, so 0(sp) is address of open space
+ and 1(sp) is offset to the location avobe the forst location at which
+ outgoing arguments are placed. */
+
+#define STACK_BOUNDARY 8
+/* Define this macro if there is a guaranteed alignment for the stack
+ pointer on this machine. The definition is a C expression for the
+ desired alignment (measured in bits). This value is used as a
+ default if PREFERRED_STACK_BOUNDARY is not defined. */
+
+#define STACK_POINTER_REGNUM REG_SP
+/* The register number of the stack pointer register, which must also
+ be a fixed register according to `FIXED_REGISTERS'. On most
+ machines, the hardware determines which register this is. */
+
+#define FRAME_POINTER_REGNUM REG_VFP
+/* The register number of the frame pointer register, which is used to
+ access automatic variables in the stack frame. On some machines,
+ the hardware determines which register this is. On other
+ machines, you can choose any register you wish for this purpose. */
+
+#define HARD_FRAME_POINTER_REGNUM REG_FP
+
+#define ARG_POINTER_REGNUM REG_AP
+/* The register number of the arg pointer register, which is used to
+ access the function's argument list. On some machines, this is
+ the same as the frame pointer register. On some machines, the
+ hardware determines which register this is. On other machines,
+ you can choose any register you wish for this purpose. If this is
+ not the same register as the frame pointer register, then you must
+ mark it as a fixed register according to `FIXED_REGISTERS', or
+ arrange to be able to eliminate it (*note Elimination::.). */
+
+/* We don't really want to support nested functions. But we'll crash
+ in various testsuite tests if we don't at least define the register
+ to contain the static chain. The return value register is about as
+ bad a place as any for this. */
+
+#define STATIC_CHAIN_REGNUM REG_RESULT
+/* Register numbers used for passing a function's static chain
+ pointer. If register windows are used, the register number as
+ seen by the called function is `STATIC_CHAIN_INCOMING_REGNUM',
+ while the register number as seen by the calling function is
+ `STATIC_CHAIN_REGNUM'. If these registers are the same,
+ `STATIC_CHAIN_INCOMING_REGNUM' need not be defined.
+
+ The static chain register need not be a fixed register.
+
+ If the static chain is passed in memory, these macros should not be
+ defined; instead, the next two macros should be defined. */
+
+#define FRAME_POINTER_REQUIRED (!flag_omit_frame_pointer)
+/* A C expression which is nonzero if a function must have and use a
+ frame pointer. This expression is evaluated in the reload pass.
+ If its value is nonzero the function will have a frame pointer.
+
+ The expression can in principle examine the current function and
+ decide according to the facts, but on most machines the constant 0
+ or the constant 1 suffices. Use 0 when the machine allows code to
+ be generated with no frame pointer, and doing so saves some time
+ or space. Use 1 when there is no possible advantage to avoiding a
+ frame pointer.
+
+ In certain cases, the compiler does not know how to produce valid
+ code without a frame pointer. The compiler recognizes those cases
+ and automatically gives the function a frame pointer regardless of
+ what `FRAME_POINTER_REQUIRED' says. You don't need to worry about
+ them.
+
+ In a function that does not require a frame pointer, the frame
+ pointer register can be allocated for ordinary usage, unless you
+ mark it as a fixed register. See `FIXED_REGISTERS' for more
+ information. */
+
+#define ELIMINABLE_REGS { \
+ {ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+}
+/* If defined, this macro specifies a table of register pairs used to
+ eliminate unneeded registers that point into the stack frame. If
+ it is not defined, the only elimination attempted by the compiler
+ is to replace references to the frame pointer with references to
+ the stack pointer.
+
+ The definition of this macro is a list of structure
+ initializations, each of which specifies an original and
+ replacement register.
+
+ On some machines, the position of the argument pointer is not
+ known until the compilation is completed. In such a case, a
+ separate hard register must be used for the argument pointer.
+ This register can be eliminated by replacing it with either the
+ frame pointer or the argument pointer, depending on whether or not
+ the frame pointer has been eliminated.
+
+ In this case, you might specify:
+ #define ELIMINABLE_REGS \
+ {{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+ Note that the elimination of the argument pointer with the stack
+ pointer is specified first since that is the preferred elimination. */
+
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((FROM) == HARD_FRAME_POINTER_REGNUM \
+ ? (flag_omit_frame_pointer && !frame_pointer_needed) : 1)
+/* Don't eliminate FP unless we EXPLICITLY_ASKED */
+
+/* A C expression that returns non-zero if the compiler is allowed to
+ try to replace register number FROM-REG with register number
+ TO-REG. This macro need only be defined if `ELIMINABLE_REGS' is
+ defined, and will usually be the constant 1, since most of the
+ cases preventing register elimination are things that the compiler
+ already knows about. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = ip2k_init_elim_offset ((FROM), (TO)))
+
+/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
+ specifies the initial difference between the specified pair of
+ registers. This macro must be defined if `ELIMINABLE_REGS' is
+ defined. */
+
+#define RETURN_ADDR_RTX(COUNT, X) \
+ (((COUNT) == 0) ? gen_rtx_REG (HImode, REG_CALLH) : NULL_RTX)
+/* A C expression whose value is RTL representing the value of the
+ return address for the frame COUNT steps up from the current
+ frame, after the prologue. FRAMEADDR is the frame pointer of the
+ COUNT frame, or the frame pointer of the COUNT - 1 frame if
+ `RETURN_ADDR_IN_PREVIOUS_FRAME' is defined.
+
+ The value of the expression must always be the correct address when
+ COUNT is zero, but may be `NULL_RTX' if there is not way to
+ determine the return address of other frames. */
+
+#define PUSH_ROUNDING(NPUSHED) (NPUSHED)
+/* A C expression that is the number of bytes actually pushed onto the
+ stack when an instruction attempts to push NPUSHED bytes.
+
+ If the target machine does not have a push instruction, do not
+ define this macro. That directs GNU CC to use an alternate
+ strategy: to allocate the entire argument block and then store the
+ arguments into it.
+
+ On some machines, the definition
+
+ #define PUSH_ROUNDING(BYTES) (BYTES)
+
+ will suffice. But on other machines, instructions that appear to
+ push one byte actually push two bytes in an attempt to maintain
+ alignment. Then the definition should be
+
+ #define PUSH_ROUNDING(BYTES) (((BYTES) + 1) & ~1) */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
+ ip2k_return_pops_args ((FUNDECL), (FUNTYPE), (SIZE))
+/* A C expression that should indicate the number of bytes of its own
+ arguments that a function pops on returning, or 0 if the function
+ pops no arguments and the caller must therefore pop them all after
+ the function returns.
+
+ FUNDECL is a C variable whose value is a tree node that describes
+ the function in question. Normally it is a node of type
+ `FUNCTION_DECL' that describes the declaration of the function.
+ From this you can obtain the DECL_MACHINE_ATTRIBUTES of the
+ function.
+
+ FUNTYPE is a C variable whose value is a tree node that describes
+ the function in question. Normally it is a node of type
+ `FUNCTION_TYPE' that describes the data type of the function.
+ From this it is possible to obtain the data types of the value and
+ arguments (if known).
+
+ When a call to a library function is being considered, FUNDECL
+ will contain an identifier node for the library function. Thus, if
+ you need to distinguish among various library functions, you can
+ do so by their names. Note that "library function" in this
+ context means a function used to perform arithmetic, whose name is
+ known specially in the compiler and was not mentioned in the C
+ code being compiled.
+
+ STACK-SIZE is the number of bytes of arguments passed on the
+ stack. If a variable number of bytes is passed, it is zero, and
+ argument popping will always be the responsibility of the calling
+ function.
+
+ On the Vax, all functions always pop their arguments, so the
+ definition of this macro is STACK-SIZE. On the 68000, using the
+ standard calling convention, no functions pop their arguments, so
+ the value of the macro is always 0 in this case. But an
+ alternative calling convention is available in which functions
+ that take a fixed number of arguments pop them but other functions
+ (such as `printf') pop nothing (the caller pops all). When this
+ convention is in use, FUNTYPE is examined to determine whether a
+ function takes a fixed number of arguments. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) 0
+/* A C expression that controls whether a function argument is passed
+ in a register, and which register.
+
+ The arguments are CUM, which summarizes all the previous
+ arguments; MODE, the machine mode of the argument; TYPE, the data
+ type of the argument as a tree node or 0 if that is not known
+ (which happens for C support library functions); and NAMED, which
+ is 1 for an ordinary argument and 0 for nameless arguments that
+ correspond to `...' in the called function's prototype.
+
+ The value of the expression is usually either a `reg' RTX for the
+ hard register in which to pass the argument, or zero to pass the
+ argument on the stack.
+
+ For machines like the Vax and 68000, where normally all arguments
+ are pushed, zero suffices as a definition.
+
+ The value of the expression can also be a `parallel' RTX. This is
+ used when an argument is passed in multiple locations. The mode
+ of the of the `parallel' should be the mode of the entire
+ argument. The `parallel' holds any number of `expr_list' pairs;
+ each one describes where part of the argument is passed. In each
+ `expr_list', the first operand can be either a `reg' RTX for the
+ hard register in which to pass this part of the argument, or zero
+ to pass the argument on the stack. If this operand is a `reg',
+ then the mode indicates how large this part of the argument is.
+ The second operand of the `expr_list' is a `const_int' which gives
+ the offset in bytes into the entire argument where this part
+ starts.
+
+ The usual way to make the ANSI library `stdarg.h' work on a machine
+ where some arguments are usually passed in registers, is to cause
+ nameless arguments to be passed on the stack instead. This is done
+ by making `FUNCTION_ARG' return 0 whenever NAMED is 0.
+
+ You may use the macro `MUST_PASS_IN_STACK (MODE, TYPE)' in the
+ definition of this macro to determine if this argument is of a
+ type that must be passed in the stack. If `REG_PARM_STACK_SPACE'
+ is not defined and `FUNCTION_ARG' returns non-zero for such an
+ argument, the compiler will abort. If `REG_PARM_STACK_SPACE' is
+ defined, the argument will be computed in the stack and then
+ loaded into a register. */
+
+#define CUMULATIVE_ARGS int
+
+/* A C type for declaring a variable that is used as the first
+ argument of `FUNCTION_ARG' and other related values. For some
+ target machines, the type `int' suffices and can hold the number
+ of bytes of argument so far.
+
+ There is no need to record in `CUMULATIVE_ARGS' anything about the
+ arguments that have been passed on the stack. The compiler has
+ other variables to keep track of that. For target machines on
+ which all arguments are passed on the stack, there is no need to
+ store anything in `CUMULATIVE_ARGS'; however, the data structure
+ must exist and should not be empty, so use `int'. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = 0)
+
+/* A C statement (sans semicolon) for initializing the variable CUM
+ for the state at the beginning of the argument list. The variable
+ has type `CUMULATIVE_ARGS'. The value of FNTYPE is the tree node
+ for the data type of the function which will receive the args, or 0
+ if the args are to a compiler support library function. The value
+ of INDIRECT is nonzero when processing an indirect call, for
+ example a call through a function pointer. The value of INDIRECT
+ is zero for a call to an explicitly named function, a library
+ function call, or when `INIT_CUMULATIVE_ARGS' is used to find
+ arguments for the function being compiled.
+
+ When processing a call to a compiler support library function,
+ LIBNAME identifies which one. It is a `symbol_ref' rtx which
+ contains the name of the function, as a string. LIBNAME is 0 when
+ an ordinary C function call is being processed. Thus, each time
+ this macro is called, either LIBNAME or FNTYPE is nonzero, but
+ never both of them at once. */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED)
+
+/* All arguments are passed on stack - do nothing here. */
+
+/* A C statement (sans semicolon) to update the summarizer variable
+ CUM to advance past an argument in the argument list. The values
+ MODE, TYPE and NAMED describe that argument. Once this is done,
+ the variable CUM is suitable for analyzing the *following*
+ argument with `FUNCTION_ARG', etc.
+
+ This macro need not do anything if the argument in question was
+ passed on the stack. The compiler knows how to track the amount
+ of stack space used for arguments without any special help. */
+
+#define FUNCTION_ARG_REGNO_P(R) 0
+/* A C expression that is nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. This
+ does *not* include implicit arguments such as the static chain and
+ the structure-value address. On many machines, no registers can be
+ used for this purpose since all function arguments are pushed on
+ the stack. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ ((TYPE_MODE (VALTYPE) == QImode) \
+ ? gen_rtx_REG (TYPE_MODE (VALTYPE), REG_RESULT + 1) \
+ : gen_rtx_REG (TYPE_MODE (VALTYPE), REG_RESULT))
+
+/* Because functions returning 'char' actually widen to 'int', we have to
+ use $81 as the return location if we think we only have a 'char'. */
+
+/* A C expression to create an RTX representing the place where a
+ function returns a value of data type VALTYPE. VALTYPE is a tree
+ node representing a data type. Write `TYPE_MODE (VALTYPE)' to get
+ the machine mode used to represent that type. On many machines,
+ only the mode is relevant. (Actually, on most machines, scalar
+ values are returned in the same place regardless of mode).
+
+ The value of the expression is usually a `reg' RTX for the hard
+ register where the return value is stored. The value can also be a
+ `parallel' RTX, if the return value is in multiple places. See
+ `FUNCTION_ARG' for an explanation of the `parallel' form.
+
+ If `PROMOTE_FUNCTION_RETURN' is defined, you must apply the same
+ promotion rules specified in `PROMOTE_MODE' if VALTYPE is a scalar
+ type.
+
+ If the precise function being called is known, FUNC is a tree node
+ (`FUNCTION_DECL') for it; otherwise, FUNC is a null pointer. This
+ makes it possible to use a different value-returning convention
+ for specific functions when all their calls are known.
+
+ `FUNCTION_VALUE' is not used for return vales with aggregate data
+ types, because these are returned in another way. See
+ `STRUCT_VALUE_REGNUM' and related macros, below. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx_REG ((MODE), REG_RESULT)
+/* A C expression to create an RTX representing the place where a
+ library function returns a value of mode MODE. If the precise
+ function being called is known, FUNC is a tree node
+ (`FUNCTION_DECL') for it; otherwise, FUNC is a null pointer. This
+ makes it possible to use a different value-returning convention
+ for specific functions when all their calls are known.
+
+ Note that "library function" in this context means a compiler
+ support routine, used to perform arithmetic, whose name is known
+ specially by the compiler and was not mentioned in the C code being
+ compiled.
+
+ The definition of `LIBRARY_VALUE' need not be concerned aggregate
+ data types, because none of the library functions returns such
+ types. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == REG_RESULT)
+/* A C expression that is nonzero if REGNO is the number of a hard
+ register in which the values of called function may come back.
+
+ A register whose use for returning values is limited to serving as
+ the second of a pair (for a value of type `double', say) need not
+ be recognized by this macro. So for most machines, this definition
+ suffices:
+
+ #define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+ If the machine has register windows, so that the caller and the
+ called function use different registers for the return value, this
+ macro should recognize only the caller's register numbers. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+ ((TYPE_MODE (TYPE) == BLKmode) ? int_size_in_bytes (TYPE) > 8 : 0)
+/* A C expression which can inhibit the returning of certain function
+ values in registers, based on the type of value. A nonzero value
+ says to return the function value in memory, just as large
+ structures are always returned. Here TYPE will be a C expression
+ of type `tree', representing the data type of the value.
+
+ Note that values of mode `BLKmode' must be explicitly handled by
+ this macro. Also, the option `-fpcc-struct-return' takes effect
+ regardless of this macro. On most systems, it is possible to
+ leave the macro undefined; this causes a default definition to be
+ used, whose value is the constant 1 for `BLKmode' values, and 0
+ otherwise.
+
+ Do not use this macro to indicate that structures and unions
+ should always be returned in memory. You should instead use
+ `DEFAULT_PCC_STRUCT_RETURN' to indicate this. */
+
+/* Indicate that large structures are passed by reference. */
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM,MODE,TYPE,NAMED) 0
+
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+/* Define this macro to be 1 if all structure and union return values
+ must be in memory. Since this results in slower code, this should
+ be defined only if needed for compatibility with other compilers
+ or with an ABI. If you define this macro to be 0, then the
+ conventions used for structure and union return values are decided
+ by the `RETURN_IN_MEMORY' macro.
+
+ If not defined, this defaults to the value 1. */
+
+#define STRUCT_VALUE 0
+/* If the structure value address is not passed in a register, define
+ `STRUCT_VALUE' as an expression returning an RTX for the place
+ where the address is passed. If it returns 0, the address is
+ passed as an "invisible" first argument. */
+
+#define STRUCT_VALUE_INCOMING 0
+/* If the incoming location is not a register, then you should define
+ `STRUCT_VALUE_INCOMING' as an expression for an RTX for where the
+ called function should find the value. If it should find the
+ value on the stack, define this to create a `mem' which refers to
+ the frame pointer. A definition of 0 means that the address is
+ passed as an "invisible" first argument. */
+
+#define EPILOGUE_USES(REGNO) 0
+/* Define this macro as a C expression that is nonzero for registers
+ are used by the epilogue or the `return' pattern. The stack and
+ frame pointer registers are already be assumed to be used as
+ needed. */
+
+#define SETUP_INCOMING_VARARGS(ARGS_SO_FAR,MODE,TYPE, \
+ PRETEND_ARGS_SIZE,SECOND_TIME) \
+ ((PRETEND_ARGS_SIZE) = (0))
+
+
+/* Hmmm. We don't actually like constants as addresses - they always need
+ to be loaded to a register, except for function calls which take an
+ address by immediate value. But changing this to zero had negative
+ effects, causing the compiler to get very confused.... */
+
+#define CONSTANT_ADDRESS_P(X) CONSTANT_P (X)
+
+/* A C expression that is 1 if the RTX X is a constant which is a
+ valid address. On most machines, this can be defined as
+ `CONSTANT_P (X)', but a few machines are more restrictive in which
+ constant addresses are supported.
+
+ `CONSTANT_P' accepts integer-values expressions whose values are
+ not explicitly known, such as `symbol_ref', `label_ref', and
+ `high' expressions and `const' arithmetic expressions, in addition
+ to `const_int' and `const_double' expressions. */
+
+#define MAX_REGS_PER_ADDRESS 1
+/* A number, the maximum number of registers that can appear in a
+ valid memory address. Note that it is up to you to specify a
+ value equal to the maximum number that `GO_IF_LEGITIMATE_ADDRESS'
+ would ever accept. */
+
+#ifdef REG_OK_STRICT
+# define GO_IF_LEGITIMATE_ADDRESS(MODE, OPERAND, ADDR) \
+{ \
+ if (legitimate_address_p ((MODE), (OPERAND), 1)) \
+ goto ADDR; \
+}
+#else
+# define GO_IF_LEGITIMATE_ADDRESS(MODE, OPERAND, ADDR) \
+{ \
+ if (legitimate_address_p ((MODE), (OPERAND), 0)) \
+ goto ADDR; \
+}
+#endif
+/* A C compound statement with a conditional `goto LABEL;' executed
+ if X (an RTX) is a legitimate memory address on the target machine
+ for a memory operand of mode MODE.
+
+ It usually pays to define several simpler macros to serve as
+ subroutines for this one. Otherwise it may be too complicated to
+ understand.
+
+ This macro must exist in two variants: a strict variant and a
+ non-strict one. The strict variant is used in the reload pass. It
+ must be defined so that any pseudo-register that has not been
+ allocated a hard register is considered a memory reference. In
+ contexts where some kind of register is required, a pseudo-register
+ with no hard register must be rejected.
+
+ The non-strict variant is used in other passes. It must be
+ defined to accept all pseudo-registers in every context where some
+ kind of register is required.
+
+ Compiler source files that want to use the strict variant of this
+ macro define the macro `REG_OK_STRICT'. You should use an `#ifdef
+ REG_OK_STRICT' conditional to define the strict variant in that
+ case and the non-strict variant otherwise.
+
+ Subroutines to check for acceptable registers for various purposes
+ (one for base registers, one for index registers, and so on) are
+ typically among the subroutines used to define
+ `GO_IF_LEGITIMATE_ADDRESS'. Then only these subroutine macros
+ need have two variants; the higher levels of macros may be the
+ same whether strict or not.
+
+ Normally, constant addresses which are the sum of a `symbol_ref'
+ and an integer are stored inside a `const' RTX to mark them as
+ constant. Therefore, there is no need to recognize such sums
+ specifically as legitimate addresses. Normally you would simply
+ recognize any `const' as legitimate.
+
+ Usually `PRINT_OPERAND_ADDRESS' is not prepared to handle constant
+ sums that are not marked with `const'. It assumes that a naked
+ `plus' indicates indexing. If so, then you *must* reject such
+ naked constant sums as illegitimate addresses, so that none of
+ them will be given to `PRINT_OPERAND_ADDRESS'.
+
+ On some machines, whether a symbolic address is legitimate depends
+ on the section that the address refers to. On these machines,
+ define the macro `ENCODE_SECTION_INFO' to store the information
+ into the `symbol_ref', and then check for it here. When you see a
+ `const', you will have to look inside it to find the `symbol_ref'
+ in order to determine the section. *Note Assembler Format::.
+
+ The best way to modify the name string is by adding text to the
+ beginning, with suitable punctuation to prevent any ambiguity.
+ Allocate the new name in `saveable_obstack'. You will have to
+ modify `ASM_OUTPUT_LABELREF' to remove and decode the added text
+ and output the name accordingly, and define `STRIP_NAME_ENCODING'
+ to access the original name string.
+
+ You can check the information stored here into the `symbol_ref' in
+ the definitions of the macros `GO_IF_LEGITIMATE_ADDRESS' and
+ `PRINT_OPERAND_ADDRESS'. */
+
+/* A C expression that is nonzero if X (assumed to be a `reg' RTX) is
+ valid for use as a base register. For hard registers, it should
+ always accept those which the hardware permits and reject the
+ others. Whether the macro accepts or rejects pseudo registers
+ must be controlled by `REG_OK_STRICT' as described above. This
+ usually requires two variant definitions, of which `REG_OK_STRICT'
+ controls the one actually used. */
+
+#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#define REG_OK_FOR_BASE_NOSTRICT_P(X) \
+ (REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (REGNO (X) == REG_FP) \
+ || (REGNO (X) == REG_VFP) \
+ || (REGNO (X) == REG_AP) \
+ || REG_OK_FOR_BASE_STRICT_P(X))
+
+#ifdef REG_OK_STRICT
+# define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
+#else
+# define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NOSTRICT_P (X)
+#endif
+
+#define REG_OK_FOR_INDEX_P(X) 0
+/* A C expression that is nonzero if X (assumed to be a `reg' RTX) is
+ valid for use as an index register.
+
+ The difference between an index register and a base register is
+ that the index register may be scaled. If an address involves the
+ sum of two registers, neither one of them scaled, then either one
+ may be labeled the "base" and the other the "index"; but whichever
+ labeling is used must fit the machine's constraints of which
+ registers may serve in each capacity. The compiler will try both
+ labelings, looking for one that is valid, and will reload one or
+ both registers only if neither labeling works. */
+
+
+/* A C compound statement that attempts to replace X with a valid
+ memory address for an operand of mode MODE. WIN will be a C
+ statement label elsewhere in the code; the macro definition may use
+
+ GO_IF_LEGITIMATE_ADDRESS (MODE, X, WIN);
+
+ to avoid further processing if the address has become legitimate.
+
+ X will always be the result of a call to `break_out_memory_refs',
+ and OLDX will be the operand that was given to that function to
+ produce X.
+
+ The code generated by this macro should not alter the substructure
+ of X. If it transforms X into a more legitimate form, it should
+ assign X (which will always be a C variable) a new value.
+
+ It is not necessary for this macro to come up with a legitimate
+ address. The compiler has standard ways of doing so in all cases.
+ In fact, it is safe for this macro to do nothing. But often a
+ machine-dependent strategy can generate better code. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+do { rtx orig_x = (X); \
+ (X) = legitimize_address ((X), (OLDX), (MODE), 0); \
+ if ((X) != orig_x && memory_address_p ((MODE), (X))) \
+ goto WIN; \
+} while (0)
+
+/* Is X a legitimate register to reload, or is it a pseudo stack-temp
+ that is problematic for push_reload() ? */
+
+#define LRA_REG(X) \
+ (! (reg_equiv_memory_loc[REGNO (X)] \
+ && (reg_equiv_address[REGNO (X)] \
+ || num_not_at_initial_offset)))
+
+/* Given a register X that failed the LRA_REG test, replace X
+ by its memory equivalent, find the reloads needed for THAT memory
+ location and substitute that back for the higher-level reload
+ that we're conducting... */
+
+/* WARNING: we reference 'ind_levels' and 'insn' which are local variables
+ in find_reloads_address (), where the LEGITIMIZE_RELOAD_ADDRESS macro
+ expands. */
+
+#define FRA_REG(X,MODE,OPNUM,TYPE) \
+do { \
+ rtx tem = make_memloc ((X), REGNO (X)); \
+ \
+ if (! strict_memory_address_p (GET_MODE (tem), XEXP (tem, 0))) \
+ { \
+ /* Note that we're doing address in address - cf. ADDR_TYPE */ \
+ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0), \
+ &XEXP (tem, 0), (OPNUM), \
+ ADDR_TYPE (TYPE), ind_levels, insn); \
+ } \
+ (X) = tem; \
+} while (0)
+
+
+/* For the IP2K, we want to be clever about picking IP vs DP for a
+ base pointer since IP only directly supports a zero displacement.
+ (Note that we have modified all the HI patterns to correctly handle
+ IP references by manipulating iph:ipl as we fetch the pieces). */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && REG_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int disp = INTVAL (XEXP (X, 1)); \
+ int fit = (disp >= 0 && disp <= (127 - 2 * GET_MODE_SIZE (MODE))); \
+ rtx reg = XEXP (X, 0); \
+ if (!fit) \
+ { \
+ push_reload ((X), NULL_RTX, &(X), \
+ NULL, MODE_BASE_REG_CLASS (MODE), GET_MODE (X), \
+ VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ if (reg_equiv_memory_loc[REGNO (reg)] \
+ && (reg_equiv_address[REGNO (reg)] || num_not_at_initial_offset)) \
+ { \
+ rtx mem = make_memloc (reg, REGNO (reg)); \
+ if (! strict_memory_address_p (GET_MODE (mem), XEXP (mem, 0))) \
+ { \
+ /* Note that we're doing address in address - cf. ADDR_TYPE */\
+ find_reloads_address (GET_MODE (mem), &mem, XEXP (mem, 0), \
+ &XEXP (mem, 0), (OPNUM), \
+ ADDR_TYPE (TYPE), (IND), insn); \
+ } \
+ push_reload (mem, NULL, &XEXP (X, 0), NULL, \
+ GENERAL_REGS, Pmode, VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ push_reload (X, NULL, &X, NULL, \
+ MODE_BASE_REG_CLASS (MODE), GET_MODE (X), VOIDmode, \
+ 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ } \
+}
+/* A C compound statement that attempts to replace X, which is an
+ address that needs reloading, with a valid memory address for an
+ operand of mode MODE. WIN will be a C statement label elsewhere
+ in the code. It is not necessary to define this macro, but it
+ might be useful for performance reasons.
+
+ For example, on the i386, it is sometimes possible to use a single
+ reload register instead of two by reloading a sum of two pseudo
+ registers into a register. On the other hand, for number of RISC
+ processors offsets are limited so that often an intermediate
+ address needs to be generated in order to address a stack slot.
+ By defining LEGITIMIZE_RELOAD_ADDRESS appropriately, the
+ intermediate addresses generated for adjacent some stack slots can
+ be made identical, and thus be shared.
+
+ *Note*: This macro should be used with caution. It is necessary
+ to know something of how reload works in order to effectively use
+ this, and it is quite easy to produce macros that build in too
+ much knowledge of reload internals.
+
+ *Note*: This macro must be able to reload an address created by a
+ previous invocation of this macro. If it fails to handle such
+ addresses then the compiler may generate incorrect code or abort.
+
+ The macro definition should use `push_reload' to indicate parts
+ that need reloading; OPNUM, TYPE and IND_LEVELS are usually
+ suitable to be passed unaltered to `push_reload'.
+
+ The code generated by this macro must not alter the substructure of
+ X. If it transforms X into a more legitimate form, it should
+ assign X (which will always be a C variable) a new value. This
+ also applies to parts that you change indirectly by calling
+ `push_reload'.
+
+ The macro definition may use `strict_memory_address_p' to test if
+ the address has become legitimate.
+
+ If you want to change only a part of X, one standard way of doing
+ this is to use `copy_rtx'. Note, however, that is unshares only a
+ single level of rtl. Thus, if the part to be changed is not at the
+ top level, you'll need to replace first the top leve It is not
+ necessary for this macro to come up with a legitimate address;
+ but often a machine-dependent strategy can generate better code. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+ do { \
+ if (ip2k_mode_dependent_address (ADDR)) goto LABEL; \
+ } while (0)
+
+/* A C statement or compound statement with a conditional `goto
+ LABEL;' executed if memory address X (an RTX) can have different
+ meanings depending on the machine mode of the memory reference it
+ is used for or if the address is valid for some modes but not
+ others.
+
+ Autoincrement and autodecrement addresses typically have
+ mode-dependent effects because the amount of the increment or
+ decrement is the size of the operand being addressed. Some
+ machines have other mode-dependent addresses. Many RISC machines
+ have no mode-dependent addresses.
+
+ You may assume that ADDR is a valid address for the machine. */
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+/* A C expression that is nonzero if X is a legitimate constant for
+ an immediate operand on the target machine. You can assume that X
+ satisfies `CONSTANT_P', so you need not check this. In fact, `1'
+ is a suitable definition for this macro on machines where anything
+ `CONSTANT_P' is valid. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ return 0; \
+ case CONST: \
+ return 8; \
+ case LABEL_REF: \
+ return 0; \
+ case SYMBOL_REF: \
+ return 8; \
+ case CONST_DOUBLE: \
+ return 0;
+
+/* A part of a C `switch' statement that describes the relative costs
+ of constant RTL expressions. It must contain `case' labels for
+ expression codes `const_int', `const', `symbol_ref', `label_ref'
+ and `const_double'. Each case must ultimately reach a `return'
+ statement to return the relative cost of the use of that kind of
+ constant value in an expression. The cost may depend on the
+ precise value of the constant, which is available for examination
+ in X, and the rtx code of the expression in which it is contained,
+ found in OUTER_CODE.
+
+ CODE is the expression code--redundant, since it can be obtained
+ with `GET_CODE (X)'. */
+
+#define DEFAULT_RTX_COSTS(X, CODE, OUTER_CODE) \
+ return default_rtx_costs ((X), (CODE), (OUTER_CODE))
+
+/* Like `CONST_COSTS' but applies to nonconstant RTL expressions.
+ This can be used, for example, to indicate how costly a multiply
+ instruction is. In writing this macro, you can use the construct
+ `COSTS_N_INSNS (N)' to specify a cost equal to N fast
+ instructions. OUTER_CODE is the code of the expression in which X
+ is contained.
+
+ This macro is optional; do not define it if the default cost
+ assumptions are adequate for the target machine. */
+
+#define ADDRESS_COST(ADDRESS) ip2k_address_cost (ADDRESS)
+
+/* An expression giving the cost of an addressing mode that contains
+ ADDRESS. If not defined, the cost is computed from the ADDRESS
+ expression and the `CONST_COSTS' values.
+
+ For most CISC machines, the default cost is a good approximation
+ of the true cost of the addressing mode. However, on RISC
+ machines, all instructions normally have the same length and
+ execution time. Hence all addresses will have equal costs.
+
+ In cases where more than one form of an address is known, the form
+ with the lowest cost will be used. If multiple forms have the
+ same, lowest, cost, the one that is the most complex will be used.
+
+ For example, suppose an address that is equal to the sum of a
+ register and a constant is used twice in the same basic block.
+ When this macro is not defined, the address will be computed in a
+ register and memory references will be indirect through that
+ register. On machines where the cost of the addressing mode
+ containing the sum is no higher than that of a simple indirect
+ reference, this will produce an additional instruction and
+ possibly require an additional register. Proper specification of
+ this macro eliminates this overhead for such machines.
+
+ Similar use of this macro is made in strength reduction of loops.
+
+ ADDRESS need not be valid as an address. In such a case, the cost
+ is not relevant and can be any value; invalid addresses need not be
+ assigned a different cost.
+
+ On machines where an address involving more than one register is as
+ cheap as an address computation involving only one register,
+ defining `ADDRESS_COST' to reflect this can cause two registers to
+ be live over a region of code where only one would have been if
+ `ADDRESS_COST' were not defined in that manner. This effect should
+ be considered in the definition of this macro. Equivalent costs
+ should probably only be given to addresses with different numbers
+ of registers on machines with lots of registers.
+
+ This macro will normally either not be defined or be defined as a
+ constant. */
+
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) 7
+/* A C expression for the cost of moving data from a register in class
+ FROM to one in class TO. The classes are expressed using the
+ enumeration values such as `GENERAL_REGS'. A value of 2 is the
+ default; other values are interpreted relative to that.
+
+ It is not required that the cost always equal 2 when FROM is the
+ same as TO; on some machines it is expensive to move between
+ registers if they are not general registers.
+
+ If reload sees an insn consisting of a single `set' between two
+ hard registers, and if `REGISTER_MOVE_COST' applied to their
+ classes returns a value of 2, reload does not check to ensure that
+ the constraints of the insn are met. Setting a cost of other than
+ 2 will allow reload to verify that the constraints are met. You
+ should do this if the `movM' pattern's constraints do not allow
+ such copying. */
+
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 6
+/* A C expression for the cost of moving data of mode M between a
+ register and memory. A value of 4 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost. */
+
+#define SLOW_BYTE_ACCESS 0
+/* Define this macro as a C expression which is nonzero if accessing
+ less than a word of memory (i.e. a `char' or a `short') is no
+ faster than accessing a word of memory, i.e., if such access
+ require more than one instruction or if there is no difference in
+ cost between byte and (aligned) word loads.
+
+ When this macro is not defined, the compiler will access a field by
+ finding the smallest containing object; when it is defined, a
+ fullword load will be used if alignment permits. Unless bytes
+ accesses are faster than word accesses, using word accesses is
+ preferable since it may eliminate subsequent memory access if
+ subsequent accesses occur to other fields in the same word of the
+ structure, but to different bytes.
+
+ `SLOW_ZERO_EXTEND'
+ Define this macro if zero-extension (of a `char' or `short' to an
+ `int') can be done faster if the destination is a register that is
+ known to be zero.
+
+ If you define this macro, you must have instruction patterns that
+ recognize RTL structures like this:
+
+ (set (strict_low_part (subreg:QI (reg:SI ...) 0)) ...)
+
+ and likewise for `HImode'.
+
+ `SLOW_UNALIGNED_ACCESS'
+ Define this macro to be the value 1 if unaligned accesses have a
+ cost many times greater than aligned accesses, for example if they
+ are emulated in a trap handler.
+
+ When this macro is non-zero, the compiler will act as if
+ `STRICT_ALIGNMENT' were non-zero when generating code for block
+ moves. This can cause significantly more instructions to be
+ produced. Therefore, do not set this macro non-zero if unaligned
+ accesses only add a cycle or two to the time for a memory access.
+
+ If the value of this macro is always zero, it need not be defined.
+
+ `DONT_REDUCE_ADDR'
+ Define this macro to inhibit strength reduction of memory
+ addresses. (On some machines, such strength reduction seems to do
+ harm rather than good.)
+
+ `MOVE_RATIO'
+ The number of scalar move insns which should be generated instead
+ of a string move insn or a library call. Increasing the value
+ will always make code faster, but eventually incurs high cost in
+ increased code size.
+
+ If you don't define this, a reasonable default is used. */
+
+#define NO_FUNCTION_CSE
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+
+#define NO_RECURSIVE_FUNCTION_CSE
+/* Define this macro if it is as good or better for a function to call
+ itself with an explicit address than to call an address kept in a
+ register.
+
+ `ADJUST_COST (INSN, LINK, DEP_INSN, COST)'
+ A C statement (sans semicolon) to update the integer variable COST
+ based on the relationship between INSN that is dependent on
+ DEP_INSN through the dependence LINK. The default is to make no
+ adjustment to COST. This can be used for example to specify to
+ the scheduler that an output- or anti-dependence does not incur
+ the same cost as a data-dependence.
+
+ `ADJUST_PRIORITY (INSN)'
+ A C statement (sans semicolon) to update the integer scheduling
+ priority `INSN_PRIORITY(INSN)'. Reduce the priority to execute
+ the INSN earlier, increase the priority to execute INSN later.
+ Do not define this macro if you do not need to adjust the
+ scheduling priorities of insns. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+/* A C expression whose value is a string containing the assembler
+ operation that should precede instructions and read-only data.
+ Normally `".text"' is right. */
+
+#define DATA_SECTION_ASM_OP ".data"
+/* A C expression whose value is a string containing the assembler
+ operation to identify the following data as writable initialized
+ data. Normally `".data"' is right. */
+
+
+#undef SELECT_SECTION /* Hide default. */
+#define SELECT_SECTION(EXP,RELOC) data_section ()
+/* `SELECT_SECTION (EXP, RELOC)'
+ A C statement or statements to switch to the appropriate section
+ for output of EXP. You can assume that EXP is either a `VAR_DECL'
+ node or a constant of some sort. RELOC indicates whether the
+ initial value of EXP requires link-time relocations. Select the
+ section by calling `text_section' or one of the alternatives for
+ other sections.
+
+ Do not define this macro if you put all read-only variables and
+ constants in the read-only data section (usually the text section). */
+
+/* `SELECT_RTX_SECTION (MODE, RTX)'
+ A C statement or statements to switch to the appropriate section
+ for output of RTX in mode MODE. You can assume that RTX is some
+ kind of constant in RTL. The argument MODE is redundant except in
+ the case of a `const_int' rtx. Select the section by calling
+ `text_section' or one of the alternatives for other sections.
+
+ Do not define this macro if you put all constants in the read-only
+ data section. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used.
+
+ This macro is irrelevant if there is no separate readonly data
+ section. */
+
+#define ASM_COMMENT_START " ; "
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will
+ end at the end of the line. */
+
+#define ASM_APP_ON "/* #APP */\n"
+/* A C string constant for text to be output before each `asm'
+ statement or group of consecutive ones. Normally this is
+ `"#APP"', which is a comment that has no effect on most assemblers
+ but tells the GNU assembler that it must check the lines that
+ follow for all valid assembler constructs. */
+
+#define ASM_APP_OFF "/* #NOAPP */\n"
+/* A C string constant for text to be output after each `asm'
+ statement or group of consecutive ones. Normally this is
+ `"#NO_APP"', which tells the GNU assembler to resume making the
+ time-saving assumptions that are valid for ordinary compiler
+ output. */
+
+
+#define OBJC_PROLOGUE {}
+/* A C statement to output any assembler statements which are
+ required to precede any Objective C object definitions or message
+ sending. The statement is executed only when compiling an
+ Objective C program. */
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+ fprintf ((STREAM), ".double %.20e\n", (VALUE))
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+ asm_output_float ((STREAM), (VALUE))
+
+/* `ASM_OUTPUT_LONG_DOUBLE (STREAM, VALUE)'
+ `ASM_OUTPUT_THREE_QUARTER_FLOAT (STREAM, VALUE)'
+ `ASM_OUTPUT_SHORT_FLOAT (STREAM, VALUE)'
+ `ASM_OUTPUT_BYTE_FLOAT (STREAM, VALUE)'
+ A C statement to output to the stdio stream STREAM an assembler
+ instruction to assemble a floating-point constant of `TFmode',
+ `DFmode', `SFmode', `TQFmode', `HFmode', or `QFmode',
+ respectively, whose value is VALUE. VALUE will be a C expression
+ of type `REAL_VALUE_TYPE'. Macros such as
+ `REAL_VALUE_TO_TARGET_DOUBLE' are useful for writing these
+ definitions. */
+
+#define ASM_OUTPUT_INT(FILE, VALUE) \
+ ( fprintf ((FILE), "\t.long "), \
+ output_addr_const ((FILE), (VALUE)), \
+ fputs ("\n", (FILE)))
+
+ /* Likewise for `short' and `char' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+ asm_output_short ((FILE), (VALUE))
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+ asm_output_char ((FILE), (VALUE))
+
+/* `ASM_OUTPUT_QUADRUPLE_INT (STREAM, EXP)'
+ A C statement to output to the stdio stream STREAM an assembler
+ instruction to assemble an integer of 16, 8, 4, 2 or 1 bytes,
+ respectively, whose value is VALUE. The argument EXP will be an
+ RTL expression which represents a constant value. Use
+ `output_addr_const (STREAM, EXP)' to output this value as an
+ assembler expression.
+
+ For sizes larger than `UNITS_PER_WORD', if the action of a macro
+ would be identical to repeatedly calling the macro corresponding to
+ a size of `UNITS_PER_WORD', once for each word, you need not define
+ the macro. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ asm_output_byte ((FILE), (VALUE))
+/* A C statement to output to the stdio stream STREAM an assembler
+ instruction to assemble a single byte containing the number VALUE. */
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) \
+ ((C) == '\n' || ((C) == '$'))
+/* Define this macro as a C expression which is nonzero if C is used
+ as a logical line separator by the assembler.
+
+ If you do not define this macro, the default is that only the
+ character `;' is treated as a logical line separator. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+/* These macros are defined as C string constant, describing the
+ syntax in the assembler for grouping arithmetic expressions. The
+ following definitions are correct for most assemblers:
+
+ #define ASM_OPEN_PAREN "("
+ #define ASM_CLOSE_PAREN ")"
+
+ These macros are provided by `real.h' for writing the definitions of
+ `ASM_OUTPUT_DOUBLE' and the like: */
+
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ fputs ("\t.comm ", (STREAM)); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ",%d\n", (SIZE)); \
+} while (0)
+/* A C statement (sans semicolon) to output to the stdio stream
+ STREAM the assembler definition of a common-label named NAME whose
+ size is SIZE bytes. The variable ROUNDED is the size rounded up
+ to whatever alignment the caller wants.
+
+ Use the expression `assemble_name (STREAM, NAME)' to output the
+ name itself; before and after that, output the additional
+ assembler syntax for defining the name, and a newline.
+
+ This macro controls how the assembler definitions of uninitialized
+ common global variables are output. */
+
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ fputs ("\t.lcomm ", (STREAM)); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ",%d\n", (SIZE)); \
+} while (0)
+/* A C statement (sans semicolon) to output to the stdio stream
+ STREAM the assembler definition of a local-common-label named NAME
+ whose size is SIZE bytes. The variable ROUNDED is the size
+ rounded up to whatever alignment the caller wants.
+
+ Use the expression `assemble_name (STREAM, NAME)' to output the
+ name itself; before and after that, output the additional
+ assembler syntax for defining the name, and a newline.
+
+ This macro controls how the assembler definitions of uninitialized
+ static variables are output. */
+
+#define ASM_OUTPUT_LABEL(STREAM, NAME) \
+do { \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ":\n"); \
+} while (0)
+/* A C statement (sans semicolon) to output to the stdio stream
+ STREAM the assembler definition of a label named NAME. Use the
+ expression `assemble_name (STREAM, NAME)' to output the name
+ itself; before and after that, output the additional assembler
+ syntax for defining the name, and a newline. */
+
+#undef WEAK_ASM_OP
+#define WEAK_ASM_OP ".weak"
+
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf ((FILE), "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name ((FILE), (FNAME)); \
+ fprintf ((FILE), ","); \
+ assemble_name ((FILE), label); \
+ fprintf ((FILE), "-"); \
+ assemble_name ((FILE), (FNAME)); \
+ putc ('\n', (FILE)); \
+ } \
+ } while (0)
+/* A C statement (sans semicolon) to output to the stdio stream
+ STREAM any text necessary for declaring the size of a function
+ which is being defined. The argument NAME is the name of the
+ function. The argument DECL is the `FUNCTION_DECL' tree node
+ representing the function.
+
+ If this macro is not defined, then the function size is not
+ defined. */
+
+
+
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define ASM_GLOBALIZE_LABEL(STREAM, NAME) \
+do { \
+ fprintf ((STREAM), ".global\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), "\n"); \
+} while (0)
+/* A C statement (sans semicolon) to output to the stdio stream
+ STREAM some commands that will make the label NAME global; that
+ is, available for reference from other files. Use the expression
+ `assemble_name (STREAM, NAME)' to output the name itself; before
+ and after that, output the additional assembler syntax for making
+ that name global, and a newline. */
+
+#undef ASM_FORMAT_PRIVATE_NAME
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* A C expression to assign to OUTVAR (which is a variable of type
+ `char *') a newly allocated string made from the string NAME and
+ the number NUMBER, with some suitable punctuation added. Use
+ `alloca' to get space for the string.
+
+ The string will be used as an argument to `ASM_OUTPUT_LABELREF' to
+ produce an assembler label for an internal static variable whose
+ name is NAME. Therefore, the string must be such as to result in
+ valid assembler code. The argument NUMBER is different each time
+ this macro is executed; it prevents conflicts between
+ similarly-named internal static variables in different scopes.
+
+ Ideally this string should not be a valid C identifier, to prevent
+ any conflict with the user's own symbols. Most assemblers allow
+ periods or percent signs in assembler symbols; putting at least
+ one of these between the name and the number will suffice. */
+
+#define REGISTER_NAMES { \
+ "$00","$01","$02","$03","iph","ipl","sph","spl", \
+ "pch","pcl","wreg","status","dph","dpl","$0e","mulh", \
+ "$10","$11","$12","$13","$14","$15","$16","$17", \
+ "$18","$19","$1a","$1b","$1c","$1d","$1e","$1f", \
+ "$20","$21","$22","$23","$24","$25","$26","$27", \
+ "$28","$29","$2a","$2b","$2c","$2d","$2e","$2f", \
+ "$30","$31","$32","$33","$34","$35","$36","$37", \
+ "$38","$39","$3a","$3b","$3c","$3d","$3e","$3f", \
+ "$40","$41","$42","$43","$44","$45","$46","$47", \
+ "$48","$49","$4a","$4b","$4c","$4d","$4e","$4f", \
+ "$50","$51","$52","$53","$54","$55","$56","$57", \
+ "$58","$59","$5a","$5b","$5c","$5d","$5e","$5f", \
+ "$60","$61","$62","$63","$64","$65","$66","$67", \
+ "$68","$69","$6a","$6b","$6c","$6d","$6e","$6f", \
+ "$70","$71","$72","$73","$74","$75","$76","$77", \
+ "$78","$79","$7a","$7b","$7c","$7d","callh","calll", \
+ "$80","$81","$82","$83","$84","$85","$86","$87", \
+ "$88","$89","$8a","$8b","$8c","$8d","$8e","$8f", \
+ "$90","$91","$92","$93","$94","$95","$96","$97", \
+ "$98","$99","$9a","$9b","$9c","$9d","$9e","$9f", \
+ "$a0","$a1","$a2","$a3","$a4","$a5","$a6","$a7", \
+ "$a8","$a9","$aa","$ab","$ac","$ad","$ae","$af", \
+ "$b0","$b1","$b2","$b3","$b4","$b5","$b6","$b7", \
+ "$b8","$b9","$ba","$bb","$bc","$bd","$be","$bf", \
+ "$c0","$c1","$c2","$c3","$c4","$c5","$c6","$c7", \
+ "$c8","$c9","$ca","$cb","$cc","$cd","$ce","$cf", \
+ "$d0","$d1","$d2","$d3","$d4","$d5","$d6","$d7", \
+ "$d8","$d9","$da","$db","$dc","$dd","$de","$df", \
+ "$e0","$e1","$e2","$e3","$e4","$e5","$e6","$e7", \
+ "$e8","$e9","$ea","$eb","$ec","$ed","$ee","$ef", \
+ "$f0","$f1","$f2","$f3","$f4","$f5","$f6","$f7", \
+ "$f8","$f9","$fa","$fb","$fc","$fd","$fe","$ff", \
+ "vfph","vfpl","vaph","vapl"}
+
+/* A C initializer containing the assembler's names for the machine
+ registers, each one as a C string constant. This is what
+ translates register numbers in the compiler into assembler
+ language. */
+
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ print_operand ((STREAM), (X), (CODE))
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand X. X is an RTL
+ expression.
+
+ CODE is a value that can be used to specify one of several ways of
+ printing the operand. It is used when identical operands must be
+ printed differently depending on the context. CODE comes from the
+ `%' specification that was used to request printing of the
+ operand. If the specification was just `%DIGIT' then CODE is 0;
+ if the specification was `%LTR DIGIT' then CODE is the ASCII code
+ for LTR.
+
+ If X is a register, this macro should print the register's name.
+ The names can be found in an array `reg_names' whose type is `char
+ *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
+
+ When the machine description has a specification `%PUNCT' (a `%'
+ followed by a punctuation character), this macro is called with a
+ null pointer for X and the punctuation character for CODE. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '<' || (CODE) == '>')
+
+/* A C expression which evaluates to true if CODE is a valid
+ punctuation character for use in the `PRINT_OPERAND' macro. If
+ `PRINT_OPERAND_PUNCT_VALID_P' is not defined, it means that no
+ punctuation characters (except for the standard one, `%') are used
+ in this way. */
+
+#define PRINT_OPERAND_ADDRESS(STREAM, X) print_operand_address(STREAM, X)
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is X. X is an RTL expression.
+
+ On some machines, the syntax for a symbolic address depends on the
+ section that the address refers to. On these machines, define the
+ macro `ENCODE_SECTION_INFO' to store the information into the
+ `symbol_ref', and then check for it here. *Note Assembler
+ Format::. */
+
+/* Since register names don't have a prefix, we must preface all
+ user identifiers with the '_' to prevent confusion. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+#define LOCAL_LABEL_PREFIX ".L"
+/* `LOCAL_LABEL_PREFIX'
+ `REGISTER_PREFIX'
+ `IMMEDIATE_PREFIX'
+ If defined, C string expressions to be used for the `%R', `%L',
+ `%U', and `%I' options of `asm_fprintf' (see `final.c'). These
+ are useful when a single `md' file must support multiple assembler
+ formats. In that case, the various `tm.h' files can define these
+ macros differently. */
+
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ asm_fprintf ((STREAM), "\tpage\t%L%d\n\tjmp\t%L%d\n", (VALUE), (VALUE))
+
+/* elfos.h presumes that we will want switch/case dispatch tables aligned.
+ This is not so for the ip2k. */
+#undef ASM_OUTPUT_CASE_LABEL
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ asm_fprintf ((STREAM), "\tpage\t%L%d\n\tjmp\t%L%d\n", (VALUE), (VALUE))
+
+/* This macro should be provided on machines where the addresses in a
+ dispatch table are absolute.
+
+ The definition should be a C statement to output to the stdio
+ stream STREAM an assembler pseudo-instruction to generate a
+ reference to a label. VALUE is the number of an internal label
+ whose definition is output using `ASM_OUTPUT_INTERNAL_LABEL'. For
+ example,
+
+ fprintf ((STREAM), "\t.word L%d\n", (VALUE)) */
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ fprintf ((STREAM), "\t.align %d\n", (POWER))
+/* A C statement to output to the stdio stream STREAM an assembler
+ command to advance the location counter to a multiple of 2 to the
+ POWER bytes. POWER will be a C expression of type `int'. */
+
+/* Since instructions are 16 bit word addresses, we should lie and claim that
+ the dispatch vectors are in QImode. Otherwise the offset into the jump
+ table will be scaled by the MODE_SIZE. */
+
+#define CASE_VECTOR_MODE QImode
+/* An alias for a machine mode name. This is the machine mode that
+ elements of a jump-table should have. */
+
+
+/* `CASE_VALUES_THRESHOLD'
+ Define this to be the smallest number of different values for
+ which it is best to use a jump-table instead of a tree of
+ conditional branches. The default is four for machines with a
+ `casesi' instruction and five otherwise. This is best for most
+ machines. */
+
+#undef WORD_REGISTER_OPERATIONS
+/* Define this macro if operations between registers with integral
+ mode smaller than a word are always performed on the entire
+ register. Most RISC machines have this property and most CISC
+ machines do not. */
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+/* An alias for a tree code that is the easiest kind of division to
+ compile code for in the general case. It may be `TRUNC_DIV_EXPR',
+ `FLOOR_DIV_EXPR', `CEIL_DIV_EXPR' or `ROUND_DIV_EXPR'. These four
+ division operators differ in how they round the result to an
+ integer. `EASY_DIV_EXPR' is used when it is permissible to use
+ any of those kinds of division and the choice should be made on
+ the basis of efficiency. */
+
+#define MOVE_MAX 1
+/* The maximum number of bytes that a single instruction can move
+ quickly between memory and registers or between two memory
+ locations. */
+
+#define MOVE_RATIO 3
+/* MOVE_RATIO is the number of move instructions that is better than a
+ block move. Make this small on the IP2k, since the code size grows very
+ large with each move. */
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+/* A C expression which is nonzero if on this machine it is safe to
+ "convert" an integer of INPREC bits to one of OUTPREC bits (where
+ OUTPREC is smaller than INPREC) by merely operating on it as if it
+ had only OUTPREC bits.
+
+ On many machines, this expression can be 1.
+
+ When `TRULY_NOOP_TRUNCATION' returns 1 for a pair of sizes for
+ modes for which `MODES_TIEABLE_P' is 0, suboptimal code can result.
+ If this is the case, making `TRULY_NOOP_TRUNCATION' return 0 in
+ such cases may improve things. */
+
+#define Pmode HImode
+/* An alias for the machine mode for pointers. On most machines,
+ define this to be the integer mode corresponding to the width of a
+ hardware pointer; `SImode' on 32-bit machine or `DImode' on 64-bit
+ machines. On some machines you must define this to be one of the
+ partial integer modes, such as `PSImode'.
+
+ The width of `Pmode' must be at least as large as the value of
+ `POINTER_SIZE'. If it is not equal, you must define the macro
+ `POINTERS_EXTEND_UNSIGNED' to specify how pointers are extended to
+ `Pmode'. */
+
+#define FUNCTION_MODE HImode
+/* An alias for the machine mode used for memory references to
+ functions being called, in `call' RTL expressions. On most
+ machines this should be `QImode'. */
+
+#define INTEGRATE_THRESHOLD(DECL) \
+ (1 + (3 * list_length (DECL_ARGUMENTS (DECL)) / 2))
+/* A C expression for the maximum number of instructions above which
+ the function DECL should not be inlined. DECL is a
+ `FUNCTION_DECL' node.
+
+ The default definition of this macro is 64 plus 8 times the number
+ of arguments that the function accepts. Some people think a larger
+ threshold should be used on RISC machines. */
+
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+ valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+/* If defined, a C expression whose value is nonzero if IDENTIFIER
+ with arguments ARGS is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+
+#define VALID_MACHINE_TYPE_ATTRIBUTE(TYPE, ATTRIBUTES, IDENTIFIER, ARGS) \
+ valid_machine_type_attribute(TYPE, ATTRIBUTES, IDENTIFIER, ARGS)
+/* If defined, a C expression whose value is nonzero if IDENTIFIER
+ with arguments ARGS is a valid machine specific attribute for TYPE.
+ The attributes in ATTRIBUTES have previously been assigned to TYPE. */
+
+#define DOLLARS_IN_IDENTIFIERS 0
+/* Define this macro to control use of the character `$' in identifier
+ names. 0 means `$' is not allowed by default; 1 means it is
+ allowed. 1 is the default; there is no need to define this macro
+ in that case. This macro controls the compiler proper; it does
+ not affect the preprocessor. */
+
+#define MACHINE_DEPENDENT_REORG(INSN) machine_dependent_reorg (INSN)
+/* In rare cases, correct code generation requires extra machine
+ dependent processing between the second jump optimization pass and
+ delayed branch scheduling. On those machines, define this macro
+ as a C statement to act on the code starting at INSN. */
+
+extern int ip2k_reorg_in_progress;
+/* Flag if we're in the middle of IP2k-specific reorganization. */
+
+extern int ip2k_reorg_completed;
+/* Flag if we've completed our IP2k-specific reorganization. If we have
+ then we allow quite a few more tricks than before. */
+
+extern int ip2k_reorg_split_dimode;
+extern int ip2k_reorg_split_simode;
+extern int ip2k_reorg_split_qimode;
+extern int ip2k_reorg_split_himode;
+/* Flags for various split operations that we run in sequence. */
+
+extern int ip2k_reorg_merge_qimode;
+/* Flag to indicate that it's safe to merge QImode operands. */
+
+#define GIV_SORT_CRITERION(X, Y) \
+ do { \
+ if (GET_CODE ((X)->add_val) == CONST_INT \
+ && GET_CODE ((Y)->add_val) == CONST_INT) \
+ return INTVAL ((X)->add_val) - INTVAL ((Y)->add_val); \
+ } while (0)
+
+/* In some cases, the strength reduction optimization pass can
+ produce better code if this is defined. This macro controls the
+ order that induction variables are combined. This macro is
+ particularly useful if the target has limited addressing modes.
+ For instance, the SH target has only positive offsets in
+ addresses. Thus sorting to put the smallest address first allows
+ the most combinations to be found. */
+
+#define TRAMPOLINE_TEMPLATE(FILE) abort ()
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant ((TRAMP), 2)), \
+ CXT); \
+ emit_move_insn (gen_rtx_MEM (HImode, plus_constant ((TRAMP), 6)), \
+ FNADDR); \
+}
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) (0)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf ((FILE), "/* profiler %d */", (LABELNO))
+
+#define TARGET_MEM_FUNCTIONS
+/* Define this macro if GNU CC should generate calls to the System V
+ (and ANSI C) library functions `memcpy' and `memset' rather than
+ the BSD functions `bcopy' and `bzero'. */
+
+
+#undef ENDFILE_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+
+/* Another C string constant used much like `LINK_SPEC'. The
+ difference between the two is that `ENDFILE_SPEC' is used at the
+ very end of the command given to the linker.
+
+ Do not define this macro if it does not need to do anything. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+#define AS2(a,b,c) #a "\t" #b "," #c
+#define AS1(a,b) #a "\t" #b
+#else
+#define AS1(a,b) "a b"
+#define AS2(a,b,c) "a b,c"
+#endif
+#define OUT_AS1(a,b) output_asm_insn (AS1 (a,b), operands)
+#define OUT_AS2(a,b,c) output_asm_insn (AS2 (a,b,c), operands)
+#define CR_TAB "\n\t"
+
+/* Define this macro as a C statement that declares additional library
+ routines renames existing ones. `init_optabs' calls this macro
+ after initializing all the normal library routines. */
+
+#define INIT_TARGET_OPTABS \
+{ \
+ smul_optab->handlers[(int) SImode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_mulsi3"); \
+ \
+ smul_optab->handlers[(int) DImode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_muldi3"); \
+ \
+ cmp_optab->handlers[(int) HImode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_cmphi2"); \
+ \
+ cmp_optab->handlers[(int) SImode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_cmpsi2"); \
+}
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+#define PREDICATE_CODES \
+ {"ip2k_ip_operand", {MEM}}, \
+ {"ip2k_short_operand", {MEM}}, \
+ {"ip2k_gen_operand", {MEM, REG, SUBREG}}, \
+ {"ip2k_nonptr_operand", {REG, SUBREG}}, \
+ {"ip2k_ptr_operand", {REG, SUBREG}}, \
+ {"ip2k_split_dest_operand", {REG, SUBREG, MEM}}, \
+ {"ip2k_sp_operand", {REG}}, \
+ {"ip2k_nonsp_reg_operand", {REG, SUBREG}}, \
+ {"ip2k_symbol_ref_operand", {SYMBOL_REF}}, \
+ {"ip2k_binary_operator", {PLUS, MINUS, MULT, DIV, \
+ UDIV, MOD, UMOD, AND, IOR, \
+ XOR, COMPARE, ASHIFT, \
+ ASHIFTRT, LSHIFTRT}}, \
+ {"ip2k_unary_operator", {NEG, NOT, SIGN_EXTEND, \
+ ZERO_EXTEND}}, \
+ {"ip2k_unsigned_comparison_operator", {LTU, GTU, NE, \
+ EQ, LEU, GEU}},\
+ {"ip2k_signed_comparison_operator", {LT, GT, LE, GE}},
+
+#define DWARF2_DEBUGGING_INFO 1
+
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Miscellaneous macros to describe machine specifics. */
+
+#define STORE_FLAG_VALUE 1
+
+#define IS_PSEUDO_P(R) (REGNO (R) >= FIRST_PSEUDO_REGISTER)
+
+/* Default calculations would cause DWARF address sizes to be 2 bytes,
+ but the Harvard architecture of the IP2k and the word-addressed 64k
+ of instruction memory causes us to want a 32-bit "address" field. */
+#undef DWARF2_ADDR_SIZE
+#define DWARF2_ADDR_SIZE 4
+
--- /dev/null
+;; -*- Mode: Scheme -*-
+;; GCC machine description for Ubicom IP2022 Communications Controller.
+;; Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+;; Contributed by Red Hat, Inc and Ubicom, Inc.
+;;
+;; This file is part of GNU CC.
+;;
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA. */
+
+;; Default all instruction lengths to two bytes (one 16-bit instruction).
+;;
+(define_attr "length" "" (const_int 2))
+
+;; Define if we can "skip" an insn or not
+(define_attr "skip" "no,yes" (const_string "no"))
+
+;; Define an insn clobbers WREG or not
+(define_attr "clobberw" "no,yes" (const_string "yes"))
+
+;; Performance Issues:
+;;
+;; With the IP2k only having one really useful pointer register we have to
+;; make most of our instruction patterns only match one offsettable address
+;; before addressing becomes strict whereas afterwards of course we can use
+;; any register details that have become fixed. As we've already committed
+;; any reloads at this point of course we're a little late so we have to use
+;; a number of peephole2 optimizations to remerge viable patterns. We can
+;; do a bit more tidying up in the machine-dependent reorg pass to try and
+;; make things better still. None of this is ideal, but it's *much* better
+;; than nothing.
+
+;; Constraints:
+;;
+;; I - -255..-1 - all other literal values have to be loaded
+;; J - 0..7 - valid bit number in a register
+;; K - 0..127 - valid offset for addressing mode
+;; L - 1..127 - positive count suitable for shift.
+;; M - -1 as a literal value
+;; N - +1 as a literal value
+;; O - zero
+;; P - 0..255
+;;
+;; a - DP or IP registers (general address)
+;; f - IP register
+;; j - IPL register
+;; k - IPH register
+;; b - DP register
+;; y - DPH register
+;; z - DPL register
+;; q - SP register
+;; c - DP or SP registers (offsettable address)
+;; d - non-pointer registers (not SP, DP, IP)
+;; u - non-SP registers (everything except SP)
+;;
+;; R - Indirect thru IP - Avoid this except for QI mode, since we
+;; can't access extra bytes.
+;; S - Short (stack/dp address). Pointer with 0..127 displacement
+;; Note that 0(SP) has undefined contents due to post-decrement push
+;; T - data-section immediate value. A CONST_INT or SYMBOL_REF into .data
+
+;; Special assembly-language format effectors:
+;;
+;; ABCD -
+;; Reference up to 4 big-endian registers - %A0 is Rn+0, while %D0 is Rn+3
+;; STUVWXYZ -
+;; Reference up to 8 big-endian registers - %S0 is Rn+0, while %Z0 is Rn+7
+;;
+;; H - High part of 16 bit address or literal %hi8data(v) or %hi8insn(v)
+;; L - Low part of 16 bit address or literal %lo8data(v) or %lo8insn(v)
+;; b - print a literal value with no punctuation (typically bit selector)
+;; e - print 1 << v ('exponent')
+;; n - print negative number
+;; x - print 16 bit hex number
+;; < - interior stack push; adjust any stack-relative operands accordingly
+;; > - interior stack pop; clear adjustment.
+\f
+;;
+;; Basic operations to move data in and out of fr's. Also extended to
+;; cover the loading of w with immediates
+;;
+
+(define_insn "*movqi_w_gen"
+ [(set (reg:QI 10)
+ (match_operand:QI 0 "general_operand" "rSi"))]
+ "(ip2k_reorg_split_qimode)"
+ "mov\\tw,%0"
+ [(set_attr "skip" "yes")])
+
+(define_insn "*movqi_fr_w"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=rS")
+ (reg:QI 10))]
+ "(ip2k_reorg_split_qimode)"
+ "mov\\t%0,w"
+ [(set_attr "skip" "yes")
+ (set_attr "clobberw" "no")])
+
+
+;; Handle the cases where we get back to back redundant mov patterns issued.
+;; This of course sounds somewhat absurd but is actually reasonably common
+;; because we aren't able to match certain patterns before registers are
+;; chosen. This is particularly true of memory to memory operations where
+;; we can't provide patterns that will guarantee to match every time because
+;; this would require reloads in the middle of instructions. If we
+;; discover a case that doesn't need a reload of course then this combiner
+;; operation will tidy up for us.
+;;
+;; Warning! Whilst it would be nice to match operand 0 as a general operand
+;; we mustn't do so because this doesn't work with the REG_DEAD check.
+;;
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "ip2k_gen_operand" ""))
+ (set (match_operand 2 "ip2k_split_dest_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ! (REG_P (operands[2]) && REGNO (operands[2]) == REG_SP)
+ && (REG_P (operands[2])
+ || ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))))"
+ [(set (match_dup 2)
+ (match_dup 1))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "immediate_operand" ""))
+ (set (match_operand 2 "ip2k_gen_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ! (REG_P (operands[2]) && REGNO (operands[2]) == REG_SP)
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_dup 1))]
+ "")
+
+;;
+;; Move 8-bit integers.
+;;
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "" "")
+ (match_operand:QI 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*pushqi"
+ [(set (match_operand:QI 0 "push_operand" "=<")
+ (match_operand:QI 1 "general_operand" "g"))]
+ ""
+ "push\\t%1"
+ [(set_attr "skip" "yes")
+ (set_attr "clobberw" "no")])
+
+;; IP isn't offsettable but we can fake this behaviour here and win if we would
+;; otherwise use DP and require a reload from IP. This instruction is only
+;; matched by peephole2 operations.
+;;
+(define_insn "*movqi_to_ip_plus_offs"
+ [(set (mem:QI (plus:HI (reg:HI 4)
+ (match_operand 0 "const_int_operand" "P,P")))
+ (match_operand:QI 1 "general_operand" "O,g"))]
+ "reload_completed && (INTVAL (operands[0]) < 0x100)"
+ "*{
+ if (INTVAL (operands[0]) == 1)
+ OUT_AS1 (inc, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %0);
+ OUT_AS2 (add, ipl, w);
+ }
+
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (clr, (IP));
+ break;
+
+ case 1:
+ OUT_AS1 (push, %1%<);
+ OUT_AS1 (pop, (IP)%>);
+ break;
+ }
+
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ {
+ if (INTVAL (operands[0]) == 1)
+ OUT_AS1 (dec, ipl);
+ else
+ OUT_AS2 (sub, ipl, w);
+ }
+ return \"\";
+ }")
+
+;; IP isn't offsettable but we can fake this behaviour here and win if we would
+;; otherwise use DP and require a reload from IP. This instruction is only
+;; matched by peephole2 operations.
+;;
+(define_insn "*movqi_from_ip_plus_offs"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
+ (mem:QI (plus:HI (reg:HI 4)
+ (match_operand 1 "const_int_operand" "P"))))]
+ "reload_completed && (INTVAL (operands[1]) < 0x100)"
+ "*{
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (inc, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, ipl, w);
+ }
+ OUT_AS1 (push, (IP)%<);
+ OUT_AS1 (pop, %0%>);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP)
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_IP, 2))
+ {
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (dec, ipl);
+ else
+ OUT_AS2 (sub, ipl, w);
+ }
+ return \"\";
+ }")
+
+(define_insn_and_split "*movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR,roR,r, rS,roR")
+ (match_operand:QI 1 "general_operand" " O, ri,o,rioR,rSi"))]
+ ""
+ "@
+ clr\\t%0
+ #
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) != 0))"
+ [(set (reg:QI 10) (match_dup 1))
+ (set (match_dup 0) (reg:QI 10))]
+ ""
+ [(set_attr "skip" "yes,no,no,no,no")
+ (set_attr "clobberw" "no,yes,yes,yes,yes")])
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (match_operand:QI 0 "nonimmediate_operand" "")
+ (mem:QI (plus:HI (reg:HI 12)
+ (match_operand 1 "const_int_operand" ""))))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_regno_dead_p (2, REG_DP)
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_DP, 2)
+ && (INTVAL (operands[1]) < 0x100))"
+ [(set (match_dup 0)
+ (mem:QI (plus:HI (reg:HI 4)
+ (match_dup 1))))]
+ "")
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (mem:QI (plus:HI (reg:HI 12)
+ (match_operand 0 "const_int_operand" "")))
+ (match_operand:QI 1 "general_operand" ""))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_regno_dead_p (2, REG_DP)
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_DP, 2)
+ && (INTVAL (operands[0]) < 0x100))"
+ [(set (mem:QI (plus:HI (reg:HI 4)
+ (match_dup 0)))
+ (match_dup 1))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (mem:QI (plus:HI (reg:HI 4)
+ (match_operand 1 "const_int_operand" ""))))
+ (set (match_operand:QI 2 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 2)
+ (mem:QI (plus:HI (reg:HI 4)
+ (match_dup 1))))]
+ "")
+
+;; We sometimes want to copy a value twice, usually when we copy a value into
+;; both a structure slot and into a temporary register. We can win here
+;; because gcc doesn't know about ways of reusing w while we're copying.
+;;
+(define_insn_and_split "*movqi_twice"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
+ (match_operand:QI 1 "general_operand" "g"))
+ (set (match_operand:QI 2 "nonimmediate_operand" "=g")
+ (match_dup 1))]
+ "ip2k_reorg_merge_qimode"
+ "mov\\tw,%1\;mov\\t%0,w\;mov\\t%2,w"
+ "(ip2k_reorg_split_qimode)"
+ [(set (reg:QI 10) (match_dup 1))
+ (set (match_dup 0) (reg:QI 10))
+ (set (match_dup 2) (reg:QI 10))]
+ "")
+
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))
+ (set (match_operand:QI 2 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_merge_qimode
+ && (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 0))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand:QI 1 "general_operand" ""))
+ (set (match_operand:QI 2 "nonimmediate_operand" "")
+ (match_dup 1))]
+ "(ip2k_reorg_merge_qimode
+ && (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 0))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;;
+;; Move 16-bit integers.
+;;
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "" "")
+ (match_operand:HI 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*pushhi_ip"
+ [(set (match_operand:HI 0 "push_operand" "=<")
+ (mem:HI (reg:HI 4)))]
+ "reload_completed"
+ "inc\\tipl\;push\\t(IP)\;dec\\tipl\;push\\t(IP)"
+ [(set_attr "clobberw" "no")])
+
+(define_insn "*movhi_to_ip"
+ [(set (mem:HI (reg:HI 4))
+ (match_operand:HI 0 "general_operand" "O,roi"))]
+ "reload_completed"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (clr, (IP));
+ OUT_AS1 (inc, ipl);
+ OUT_AS1 (clr, (IP));
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ OUT_AS1 (dec, ipl);
+ return \"\";
+
+ case 1:
+ OUT_AS2 (mov, w, %H0);
+ OUT_AS2 (mov, (IP), w);
+ OUT_AS2 (mov, w, %L0);
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, (IP), w);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ OUT_AS1 (dec, ipl);
+ return \"\";
+ }
+ }")
+
+(define_insn "*movhi_from_ip"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=f,bqdo")
+ (mem:HI (reg:HI 4)))]
+ "reload_completed"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (push, (IP));
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, ipl, w);
+ OUT_AS1 (pop, iph);
+ return \"\";
+
+ case 1:
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %L0, w);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ OUT_AS1 (dec, ipl);
+ return \"\";
+ }
+ }")
+
+(define_insn "*movhi_from_ip_plus_offs"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=f,bqdo")
+ (mem:HI (plus:HI (reg:HI 4)
+ (match_operand 1 "const_int_operand" "P, P"))))]
+ "reload_completed && (INTVAL (operands[1]) < 0x100)"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, ipl, w);
+ OUT_AS1 (push, (IP));
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, ipl, w);
+ OUT_AS1 (pop, iph);
+ return \"\";
+
+ case 1:
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (inc, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, ipl, w);
+ }
+ OUT_AS1 (push, (IP)%<);
+ OUT_AS1 (pop, %H0%>);
+ OUT_AS1 (inc, ipl);
+ OUT_AS1 (push, (IP)%<);
+ OUT_AS1 (pop, %L0%>);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ {
+ OUT_AS1 (dec, ipl);
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (dec, ipl);
+ else
+ OUT_AS2 (sub, ipl, w);
+ }
+ return \"\";
+ }
+ }")
+
+(define_insn_and_split "*movhi"
+ [(set
+ (match_operand:HI 0 "ip2k_split_dest_operand" "=<,<,uo,b, uS,uo,uo, q,u")
+ (match_operand:HI 1 "general_operand" "ron,i, n,T,uoi,uS,ui,ui,q"))]
+ ""
+ "@
+ push\\t%L1%<\;push\\t%H1%>
+ push\\t%L1%<\;push\\t%H1%>
+ mov\\tw,%H1\;mov\\t%H0,w\;mov\\tw,%L1\;mov\\t%L0,w
+ loadl\\t%x1\;loadh\\t%x1
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w
+ mov\\tw,%H1\;mov\\t%H0,w\;mov\\tw,%L1\;mov\\t%L0,w
+ mov\\tw,%H1\;mov\\t%H0,w\;mov\\tw,%L1\;mov\\t%L0,w"
+ "(ip2k_reorg_split_himode
+ && (GET_CODE (operands[1]) == CONST_INT
+ || (push_operand (operands[0], HImode)
+ && GET_CODE (operands[1]) == REG)
+ || (register_operand (operands[0], HImode)
+ && REGNO (operands[0]) >= 0x80
+ && ip2k_gen_operand (operands[1], HImode))))"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "{
+ ip2k_split_words (QImode, HImode, operands); /* Split into 2=3,4=5 */
+ }"
+ [(set_attr "clobberw" "no,no,yes,no,yes,yes,yes,yes,yes")])
+
+;; We don't generally use IP for HImode indirections because it's not
+;; offsettable, however if we're accessing something that's already pointed
+;; to by IP and would otherwise require a reload of DP then we can win by
+;; simulating HImode accesses via IP instead.
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (mem:HI (reg:HI 12))
+ (match_operand:HI 0 "general_operand" ""))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_DP, 2)
+ && peep2_regno_dead_p (2, REG_DP))"
+ [(set (mem:HI (reg:HI 4))
+ (match_dup 0))]
+ "")
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (match_operand:HI 0 "nonimmediate_operand" "")
+ (mem:HI (reg:HI 12)))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_DP, 2)
+ && peep2_regno_dead_p (2, REG_DP))"
+ [(set (match_dup 0)
+ (mem:HI (reg:HI 4)))]
+ "")
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (match_operand:HI 0 "nonimmediate_operand" "")
+ (mem:HI (plus:HI (reg:HI 12)
+ (match_operand 1 "const_int_operand" ""))))]
+ ;
+ ; We only match here if IP and DP both go dead because emulating
+ ; offsets in conjunction with IP doesn't win unless IP goes
+ ; dead too.
+ ;
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_regno_dead_p (2, REG_DP)
+ && peep2_regno_dead_p (2, REG_IP)
+ && (INTVAL (operands[1]) < 0x100))"
+ [(set (match_dup 0)
+ (mem:HI (plus:HI (reg:HI 4)
+ (match_dup 1))))]
+ "")
+
+(define_peephole2
+ [(set (reg:HI 12)
+ (reg:HI 4))
+ (set (reg:HI 4)
+ (mem:HI (plus:HI (reg:HI 12)
+ (match_operand 0 "const_int_operand" ""))))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_regno_dead_p (2, REG_DP)
+ && (INTVAL (operands[0]) < 0x100))"
+ [(set (reg:HI 4)
+ (mem:HI (plus:HI (reg:HI 4)
+ (match_dup 0))))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mem:HI (reg:HI 4)))
+ (set (match_operand:HI 2 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 2)
+ (mem:HI (reg:HI 4)))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mem:HI (plus:HI (reg:HI 4)
+ (match_operand 1 "const_int_operand" ""))))
+ (set (match_operand:HI 2 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_reg_dead_p (2, operands[0])
+ && (INTVAL (operands[1]) < 0x100))"
+ [(set (match_dup 2)
+ (mem:HI (plus:HI (reg:HI 4)
+ (match_dup 1))))]
+ "")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand:HI 1 "ip2k_short_operand" ""))
+ (set (reg:HI 12)
+ (reg:HI 4))
+ (set (mem:HI (reg:HI 12))
+ (match_dup 0))]
+ "(peep2_reg_dead_p (3, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[0], REG_DP, 2)
+ && peep2_regno_dead_p (3, REG_DP))"
+ [(set (mem:HI (reg:HI 4))
+ (match_dup 1))]
+ "")
+
+;; We sometimes want to copy a value twice, usually when we copy a value into
+;; both a structure slot and into a temporary register. We can win here
+;; because gcc doesn't know about ways of reusing w while we're copying.
+;;
+(define_insn "*movhi_twice"
+ [(set (match_operand:HI 0 "ip2k_gen_operand" "=&uS,uS")
+ (match_operand:HI 1 "ip2k_gen_operand" "uS,uS"))
+ (set (match_operand:HI 2 "ip2k_gen_operand" "=&uS,uS")
+ (match_dup 1))]
+ "ip2k_reorg_split_simode"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, %L2, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (mov, %H2, w);
+
+ case 1:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS1 (push, %H1%<) CR_TAB
+ AS1 (push, %H1%<) CR_TAB
+ AS1 (pop, %H0%>) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (pop, %H2%>) CR_TAB
+ AS2 (mov, %L2, w);
+ }
+ }")
+
+;; We have to be *very* careful with this one to use predicates that do not
+;; allow this to match if there are any register dependencies between the
+;; operands.
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:HI 0 "ip2k_gen_operand" "")
+ (match_operand:HI 1 "ip2k_gen_operand" ""))
+ (set (match_operand:HI 2 "ip2k_gen_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_split_simode)"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;; We have to be *very* careful with this one to use predicates that do not
+;; allow this to match if there are any register dependencies between the
+;; operands.
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:HI 0 "ip2k_gen_operand" "")
+ (match_operand:HI 1 "ip2k_gen_operand" ""))
+ (set (match_operand:HI 2 "ip2k_gen_operand" "")
+ (match_dup 1))]
+ "(ip2k_reorg_split_simode
+ && (!REG_P (operands[0])
+ || ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 2)))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;;
+;; Move 32-bit integers.
+;;
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))]
+ ""
+ "")
+
+(define_insn_and_split "*movsi"
+ [(set (match_operand:SI 0 "ip2k_split_dest_operand" "=<, ro, S")
+ (match_operand:SI 1 "general_operand" "roSi,rSi,roi"))]
+ ""
+ "#"
+ "ip2k_reorg_split_simode"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "{
+ ip2k_split_words (HImode, SImode, operands); /* Split into 2=3,4=5 */
+ }")
+
+;; We sometimes want to copy a value twice, usually when we copy a value into
+;; both a structure slot and into a temporary register. We can win here
+;; because gcc doesn't know about ways of reusing w while we're copying.
+;;
+(define_insn "*movsi_twice"
+ [(set (match_operand:SI 0 "ip2k_gen_operand" "=&uS,uS")
+ (match_operand:SI 1 "ip2k_gen_operand" "uS,uS"))
+ (set (match_operand:SI 2 "ip2k_gen_operand" "=&uS,uS")
+ (match_dup 1))]
+ "ip2k_reorg_split_dimode"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, %A2, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %B2, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %C2, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, %D2, w);
+
+ case 1:
+ return AS2 (mov, w, %D1) CR_TAB
+ AS1 (push, %C1%<) CR_TAB
+ AS1 (push, %B1%<) CR_TAB
+ AS1 (push, %A1%<) CR_TAB
+ AS1 (push, %C1%<) CR_TAB
+ AS1 (push, %B1%<) CR_TAB
+ AS1 (push, %A1%<) CR_TAB
+ AS1 (pop, %A0%>) CR_TAB
+ AS1 (pop, %B0%>) CR_TAB
+ AS1 (pop, %C0%>) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (pop, %A2%>) CR_TAB
+ AS1 (pop, %B2%>) CR_TAB
+ AS1 (pop, %C2%>) CR_TAB
+ AS2 (mov, %D2, w);
+ }
+ }")
+
+;; We have to be *very* careful with this one to use predicates that do not
+;; allow this to match if there are any register dependencies between the
+;; operands.
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:SI 0 "ip2k_gen_operand" "")
+ (match_operand:SI 1 "ip2k_gen_operand" ""))
+ (set (match_operand:SI 2 "ip2k_gen_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_split_dimode
+ && (!REG_P (operands[0])
+ || (ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]), 4)))
+ && (!REG_P (operands[1])
+ || (ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[1]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[1]), 4)))
+ && (!REG_P (operands[2])
+ || (ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 4))))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;; We have to be *very* careful with this one to use predicates that do not
+;; allow this to match if there are any register dependencies between the
+;; operands.
+;; Don't try to match until we've removed redundant reloads. Often this
+;; simplification will remove the need to do two moves!
+;;
+(define_peephole2
+ [(set (match_operand:SI 0 "ip2k_gen_operand" "")
+ (match_operand:SI 1 "ip2k_gen_operand" ""))
+ (set (match_operand:SI 2 "ip2k_gen_operand" "")
+ (match_dup 1))]
+ "(ip2k_reorg_split_dimode
+ && (!REG_P (operands[0])
+ || (ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]), 4)))
+ && (!REG_P (operands[1])
+ || (ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[1]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[1]), 4)))
+ && (!REG_P (operands[2])
+ || (ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 4)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 4))))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (match_dup 2)
+ (match_dup 1))])]
+ "")
+
+;;
+;; Move 64-bit integers.
+;;
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "" "")
+ (match_operand:DI 1 "" ""))]
+ ""
+ "")
+
+(define_insn_and_split "*movdi"
+ [(set (match_operand:DI 0 "ip2k_split_dest_operand" "=<, ro, S")
+ (match_operand:DI 1 "general_operand" "roSi,rSi,roi"))]
+ ""
+ "#"
+ "ip2k_reorg_split_dimode"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "{
+ ip2k_split_words (SImode, DImode, operands); /* Split into 2=3,4=5 */
+ }")
+
+;;
+;; Move 32-bit floating point values.
+;;
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "" "")
+ (match_operand:SF 1 "" ""))]
+ ""
+ "if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+ operands[1] = copy_to_mode_reg (SFmode, operands[1]);
+ ")
+
+(define_insn_and_split "*movsf"
+ [(set (match_operand:SF 0 "ip2k_split_dest_operand" "=r<, o")
+ (match_operand:SF 1 "general_operand" "roi,ri"))]
+ "(ip2k_short_operand (operands[0], SFmode)
+ && ip2k_short_operand (operands[1], SFmode))
+ || ! (memory_operand (operands[0], SFmode)
+ && memory_operand (operands[1], SFmode))"
+ "#"
+ "(reload_completed || reload_in_progress)"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))
+ (set (match_dup 6) (match_dup 7))
+ (set (match_dup 8) (match_dup 9))]
+ "{
+ /* Split into 2=3,4=5 */
+ ip2k_split_words (HImode, SImode, operands);
+ /* Split 4=5 into 6=7,8=9 */
+ ip2k_split_words (QImode, HImode, &operands[4]);
+ operands[0] = operands[2];
+ operands[1] = operands[3];
+ ip2k_split_words (QImode, HImode, operands);
+ }")
+
+;;
+;; Move 64-bit floating point values.
+;;
+
+;;
+;; Block move operations.
+;;
+
+;; Copy a block of bytes (memcpy()). We expand the definition to convert
+;; our memory operand into a register pointer operand instead.
+;;
+(define_expand "movstrhi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:HI 2 "general_operand" ""))
+ (use (match_operand 3 "const_int_operand" ""))]
+ ""
+ "{
+ rtx addr0, addr1, count;
+
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ addr1 = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ count = gen_int_mode (INTVAL (operands[2]) & 0xffff, HImode);
+ else
+ count = operands[2];
+
+ emit_insn (gen_movstrhi_expanded (addr0, count, addr1));
+ DONE;
+ }")
+
+;; Block copy instruction. We handle this by calling one of two functions in
+;; libgcc. The first of these is a special case (faster) routine that handles
+;; constant block sizes under 256 bytes. This one is particularly common
+;; because we use it when copying data structures. The second routine handles
+;; the general case where we have either a variable block size or one that is
+;; greater than 255 bytes.
+;;
+(define_insn "movstrhi_expanded"
+ [(set
+ (mem:BLK
+ (match_operand:HI 0 "nonimmediate_operand" "rS,ro,rS, rS, ro, rS"))
+ (mem:BLK
+ (match_operand:HI 2 "nonimmediate_operand" "ro,rS,rS, ro, rS, rS")))
+ (use
+ (match_operand:HI 1 "general_operand" "P, P, P,rSi,rSi,roi"))]
+ ""
+ "@
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
+ push\\t%L1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>\;page\\t__movstrhi_countqi\;call\\t__movstrhi_countqi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi
+ push\\t%L1%<\;push\\t%H1%<\;push\\t%L2%<\;push\\t%H2%<\;push\\t%L0%<\;push\\t%H0%>%>%>%>%>\;page\\t__movstrhi_counthi\;call\\t__movstrhi_counthi")
+
+\f
+;; Bit insert
+;;
+(define_expand "insv"
+ [(set (zero_extract:QI (match_operand:QI 0 "nonimmediate_operand" "")
+ (match_operand 1 "immediate_operand" "") ;size
+ (match_operand 2 "immediate_operand" "")) ;pos
+ (match_operand:QI 3 "general_operand" ""))]
+ ""
+ "{
+ if (! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'J')
+ || ! CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J'))
+ FAIL;
+ }")
+
+(define_insn "*insv"
+ [(set (zero_extract:QI
+ (match_operand:QI
+ 0 "nonimmediate_operand" "+roR,roR,roR,roR,&roR,&roR,&r")
+ (match_operand
+ 1 "immediate_operand" "N, N, J, J, N, J, J") ;sz
+ (match_operand
+ 2 "immediate_operand" "J, J, J, J, J, J, J"));pos
+ (match_operand:QI
+ 3 "general_operand" "MN, O, M, O, roR, rn,oR"))]
+ ""
+ "*{
+ unsigned int pos = INTVAL (operands[2]),
+ siz = INTVAL (operands[1]),
+ mask = (1 << (pos + siz)) - (1 << pos);
+
+ switch (which_alternative)
+ {
+ case 0:
+ return \"setb\\t%0,%b1\";
+
+ case 1:
+ return \"clrb\\t%0,%b1\";
+
+ case 2:
+ operands[3] = gen_int_mode (mask & 0xff, QImode);
+ return AS2 (mov, w, %3) CR_TAB
+ AS2 (or, %0, w);
+
+ case 3:
+ operands[3] = gen_int_mode (0xff & ~mask, QImode);
+ return AS2 (mov, w, %3) CR_TAB
+ AS2 (and, %0, w);
+
+ case 4:
+ return AS2 (clrb, %0,%b2) CR_TAB
+ AS2 (snb, %3, 0) CR_TAB
+ AS2 (setb, %0, %b2);
+
+ case 5:
+ case 6:
+ {
+ static char buff[256];
+ char *p = buff;
+
+ /* Clear the destination field */
+
+ p += sprintf (buff, \"mov\\tw,#$%2.2x\;and\\t%%0,w\;\",
+ 0xff & ~mask);
+
+ if (CONSTANT_P (operands[3]))
+ /* Constant can just be or-ed in. */
+ {
+ p += sprintf (p, \"mov\\tw,#$%2.2x\;or\\t%%0,w\",
+ (INTVAL (operands[3]) << pos) & mask & 0xff);
+ return buff;
+ }
+
+ p += sprintf (p, \"mov\\tw,%%3\;\"); /* Value to deposit */
+
+ /* Shift and mask the value before OR-ing into the destination. */
+
+ if (pos != 0)
+ p += sprintf (p, \"mulu\\tw,#%d\;\", 1<<pos);
+
+ p += sprintf (p, \"\;and\\tw,#$%2.2x\;or\\t%%0,w\", mask);
+ return buff;
+ }
+ }
+ }"
+ [(set_attr "skip" "yes,yes,no,no,no,no,no")
+ (set_attr "clobberw" "no,no,yes,yes,no,yes,yes")])
+
+;;
+;; Add bytes
+;;
+
+(define_expand "addqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (plus:QI (match_operand:QI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "*push_addqi3"
+ [(set (match_operand:QI 0 "push_operand" "=<,<,<")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%g,g,g")
+ (match_operand:QI 2 "general_operand" "N,M,g")))]
+ ""
+ "@
+ push\\t%1\;inc\\t1(SP)
+ push\\t%1\;dec\\t1(SP)
+ mov\\tw,%2\;add\\tw,%1\;push\\twreg"
+ [(set_attr "clobberw" "no,no,yes")])
+
+(define_insn "*addqi3_w"
+ [(set
+ (reg:QI 10)
+ (plus:QI
+ (match_operand:QI 0 "nonimmediate_operand" "%rS, g,rS, g, rS, g,rS")
+ (match_operand:QI 1 "general_operand" "N, N, M, M,rSi,rSi, g")))]
+ "(ip2k_reorg_split_qimode)"
+ "@
+ inc\\tw,%0
+ inc\\tw,%0
+ dec\\tw,%0
+ dec\\tw,%0
+ mov\\tw,%1\;add\\tw,%0
+ mov\\tw,%1\;add\\tw,%0
+ mov\\tw,%1\;add\\tw,%0"
+ [(set_attr "skip" "no,no,no,no,no,no,no")])
+
+(define_insn_and_split "*addqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=k,k,z,z,djyoR,djyoR,djyoR,djyS, g,rS, g,rS, g, rS,rS")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,0,0, 0, 0, 0, 0,rS, g,rS, g, rS, g,rS")
+ (match_operand:QI 2 "general_operand" "N,g,N,g, N, M, rSi, g, N, N, M, M,rSi,rSi, g")))]
+ ""
+ "@
+ incsnz\\t%0\;dec\\tiph
+ mov\\tw,%2\;add\\t%0,w
+ incsnz\\t%0\;dec\\tdph
+ mov\\tw,%2\;add\\t%0,w
+ inc\\t%0
+ dec\\t%0
+ mov\\tw,%2\;add\\t%0,w
+ mov\\tw,%2\;add\\t%0,w
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && ! rtx_equal_p (operands[0], operands[1]))"
+ [(set (reg:QI 10)
+ (plus:QI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:QI 10))]
+ ""
+ [(set_attr "skip" "no,no,no,no,yes,yes,no,no,no,no,no,no,no,no,no")
+ (set_attr
+ "clobberw" "no,yes,no,yes,no,no,yes,yes,yes,yes,yes,yes,yes,yes,yes")])
+
+;;
+;; Add 16-bit integers.
+;;
+
+(define_expand "addhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (plus:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "if (rtx_equal_p (operands[1], operands[2]))
+ {
+ /* It is not impossible to wind up with two constants here.
+ If we simply emit the ashl, we'll generate unrecognizable
+ instructions. */
+ if (! nonimmediate_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ emit_insn (gen_ashlhi3 (operands[0], operands[1], GEN_INT (1)));
+ DONE;
+ }
+ ")
+
+(define_insn "*push_addhi3" ; 0 1 2 3 4 5 6 7
+ [(set
+ (match_operand:HI 0 "push_operand" "=<,<, <, <, <, <,<,<")
+ (plus:HI
+ (match_operand:HI 1 "nonimmediate_operand" "%uo,q,uo,bf, uo, uS,q,q")
+ (match_operand:HI 2 "general_operand" "N,N, M, P,uSi,uoi,u,n")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ return AS1 (push, %L1%<) CR_TAB
+ AS1 (push, %H1%>) CR_TAB
+ AS1 (incsnz, 2(SP)) CR_TAB
+ AS1 (inc, 1(SP));
+
+ case 1:
+ return AS2 (mov, w, %H1) CR_TAB
+ AS1 (push, %L1) CR_TAB
+ AS1 (push, wreg) CR_TAB
+ AS1 (incsnz, 2(SP)) CR_TAB
+ AS1 (inc, 1(SP));
+
+ case 2:
+ return AS1 (push, %L1%<) CR_TAB
+ AS1 (push, %H1%>) CR_TAB
+ AS2 (mov, w, #-1) CR_TAB
+ AS2 (add, 2(SP), w) CR_TAB
+ AS2 (addc, 1(SP), w);
+
+ case 3:
+ OUT_AS2 (mov, w, %L2);
+ OUT_AS2 (add, %L1, w);
+ OUT_AS1 (push, %L1);
+ OUT_AS1 (push, %H1);
+ if (!find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
+ OUT_AS2 (sub, %L1, w);
+ return \"\";
+
+ case 4:
+ case 5:
+ return AS2 (mov, w, %L2) CR_TAB
+ AS2 (add, w, %L1) CR_TAB
+ AS1 (push, wreg%<) CR_TAB
+ AS2 (mov, w, %H2) CR_TAB
+ AS2 (addc, w, %H1) CR_TAB
+ AS1 (push, wreg%>);
+
+ case 6:
+ return AS2 (mov, w, %H1) CR_TAB
+ AS1 (push, %L1) CR_TAB
+ AS1 (push, wreg) CR_TAB
+ AS2 (mov, w, %L2) CR_TAB
+ AS2 (add, 2(SP), w) CR_TAB
+ AS2 (mov, w, %H2) CR_TAB
+ AS2 (addc, 1(SP), w);
+
+ case 7:
+ {
+ operands[3] = GEN_INT (INTVAL (operands[2]) + 2);
+ return AS1 (push, %L3) CR_TAB
+ AS1 (push, %H3) CR_TAB
+ AS2 (mov, w, %L1) CR_TAB
+ AS2 (add, 2(SP), w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (addc, 1(SP), w);
+ }
+ }
+ }"
+ [(set_attr "clobberw" "no,yes,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*push_addhi3_zero_ext" ; 0 1 2 3
+ [(set (match_operand:HI 0 "push_operand" "=<, <, <, <")
+ (plus:HI
+ (zero_extend:HI
+ (match_operand:QI 1 "general_operand" "%roRi,roRi,roRi,rSi"))
+ (match_operand:HI 2 "general_operand" "N, P, rSi,roi")))]
+ ""
+ "@
+ inc\\tw,%L2\;push\\twreg\;push\\t#0\;rl\\t1(SP)
+ mov\\tw,%L2\;add\\tw,%1\;push\\twreg\;push\\t#0\;rl\\t1(SP)
+ mov\\tw,%L2\;add\\tw,%1\;push\\twreg%<\;mov\\tw,%H2\;addc\\tw,$ff\;push\\twreg%>
+ mov\\tw,%L2\;add\\tw,%1\;push\\twreg%<\;mov\\tw,%H2\;addc\\tw,$ff\;push\\twreg%>")
+
+(define_insn "*addhi3_imm_zero_ext_w"
+ [(set
+ (match_operand:HI 0 "nonimmediate_operand" "=rS,o,a,b,a,a,rS,o,rS,o")
+ (plus:HI (zero_extend:HI (reg:QI 10))
+ (match_operand 1 "immediate_operand" "O,O,M,i,P,I, P,P, i,i")))]
+ ""
+ "@
+ mov\\t%L0,w\;clr\\t%H0
+ mov\\t%L0,w\;clr\\t%H0
+ mov\\t%L0,w\;clr\\t%H0\;dec\\t%L0
+ loadh\\t%x1\;loadl\\t%x1\;add\\t%L0,w
+ mov\\t%L0,w\;clr\\t%H0\;mov\\tw,%1\;add\\t%L0,w
+ mov\\t%L0,w\;clr\\t%H0\;mov\\tw,#%n1\;sub\\t%L0,w
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\t%H0\;rl\\t%H0
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\t%H0\;rl\\t%H0
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\t%H0\;mov\\tw,%H1\;addc\\t%H0,w
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\t%H0\;mov\\tw,%H1\;addc\\t%H0,w")
+
+(define_insn_and_split "*addhi3_imm_zero_ext"
+ [(set
+ (match_operand:HI
+ 0 "nonimmediate_operand" "=rS, o, rS, o, a, b, a, a, rS, o, rS, o")
+ (plus:HI
+ (zero_extend:HI
+ (match_operand:QI
+ 1 "general_operand" "%roR,rS,roR,rS,roR,roR,roR,roR,roR,rS,roR,rS"))
+ (match_operand
+ 2 "immediate_operand" " O, O, N, N, M, i, P, I, P, P, i, i")))]
+ ""
+ "@
+ #
+ #
+ clr\\t%H0\;incsnz\\tw,%1\;inc\\t%H0\;mov\\t%L0,w
+ clr\\t%H0\;incsnz\\tw,%1\;inc\\t%H0\;mov\\t%L0,w
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) != 1))"
+ [(set (reg:QI 10)
+ (match_dup 1))
+ (set (match_dup 0)
+ (plus:HI (zero_extend:HI (reg:QI 10))
+ (match_dup 2)))])
+
+(define_insn "*addhi3_immediate" ; 0 1 2 3 4 5 6 7 8 9 a b c d e f
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=a,do,a,do,a,a,a,do,&uo,&uS,bf,bf,bf,&uS,&uo, u")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0, 0,0, 0,0,0,0, 0, rS, ro,uo,uo,uo, ro, rS,uo")
+ (match_operand 2 "immediate_operand" "N, N,M, M,P,I,i, i, N, N, M, P, I, i, i, i")))]
+ ""
+ "@
+ inc\\t%L0
+ incsnz\\t%L0\;inc\\t%H0
+ dec\\t%L0
+ mov\\tw,#-1\;add\\t%L0,w\;addc\\t%H0,w
+ mov\\tw,%2\;add\\t%L0,w
+ mov\\tw,#%n2\;sub\\t%L0,w
+ mov\\tw,%L2\;add\\t%L0,w\;mov\\tw,%H2\;add\\t%H0,w
+ mov\\tw,%L2\;add\\t%L0,w\;mov\\tw,%H2\;addc\\t%H0,w
+ mov\\tw,%H1\;mov\\t%H0,w\;incsnz\\tw,%L1\;inc\\t%H0\;mov\\t%L0,w
+ mov\\tw,%H1\;mov\\t%H0,w\;incsnz\\tw,%L1\;inc\\t%H0\;mov\\t%L0,w
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w\;dec\\t%L0
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w\;mov\\tw,%2\;add\\t%L0,w
+ mov\\tw,%H1\;push\\t%L1%<\;pop\\t%L0%>\;mov\\t%H0,w\;mov\\tw,#%n2\;sub\\t%L0,w
+ mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;add\\tw,%L1\;push\\twreg%<\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w\;pop\\t%L0%>"
+ [(set_attr "skip" "yes,no,yes,no,no,no,no,no,no,no,no,no,no,no,no,no")
+ (set_attr "clobberw" "no,no,no,yes,yes,yes,yes,yes,yes,yes,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*addhi3_nonimmediate" ; 0 1 2 3 4 5 6 7
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=&bf,bf,&dS,&do,d,&rS,&rS, o")
+ (plus:HI (match_operand:HI 1 "general_operand" "%0, 0, 0, 0,0, ro, rS,rS")
+ (match_operand:HI 2 "nonimmediate_operand" "ro,uo, ro, rS,r, rS, ro,rS")))]
+ ""
+ "@
+ mov\\tw,%L2\;add\\t%L0,w\;mov\\tw,%H2\;add\\t%H0,w
+ mov\\tw,%L2\;push\\t%H2%<\;add\\t%L0,w\;pop\\twreg%>\;add\\t%H0,w
+ mov\\tw,%L2\;add\\t%L0,w\;mov\\tw,%H2\;addc\\t%H0,w
+ mov\\tw,%L2\;add\\t%L0,w\;mov\\tw,%H2\;addc\\t%H0,w
+ mov\\tw,%L2\;push\\t%H2%<\;add\\t%L0,w\;pop\\twreg%>\;addc\\t%H0,w
+ mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w")
+
+(define_insn "*addhi3_nonimm_zero_extend_w"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=a,ro,&ro,&rS,&rS, u")
+ (plus:HI
+ (zero_extend:HI (reg:QI 10))
+ (match_operand:HI 1 "nonimmediate_operand" "0, 0, rS, rS, ro,uo")))]
+ ""
+ "@
+ add\\t%L0,w
+ add\\t%L0,w\;clr\\twreg\;addc\\t%H0,w
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;addc\\tw,%H1\;mov\\t%H0,w
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;addc\\tw,%H1\;mov\\t%H0,w
+ add\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;addc\\tw,%H1\;mov\\t%H0,w
+ add\\tw,%L1\;push\\twreg%<\;clr\\twreg\;addc\\tw,%H1\;mov\\t%H0,w\;pop\\t%L0%>"
+ [(set_attr "skip" "yes,no,no,no,no,no")
+ (set_attr "clobberw" "no,yes,yes,yes,yes,yes")])
+
+(define_insn_and_split "*addhi3_nonimm_zero_extend"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=a, ro,&ro,&rS,&rS, u")
+ (plus:HI
+ (zero_extend:HI
+ (match_operand:QI 1 "general_operand" "roR,roR, rS,roR, rS,rS"))
+ (match_operand:HI 2 "nonimmediate_operand" "0, 0, rS, rS, ro,uo")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_qimode)"
+ [(set (reg:QI 10)
+ (match_dup 1))
+ (set (match_dup 0)
+ (plus:HI (zero_extend:HI (reg:QI 10))
+ (match_dup 2)))])
+
+
+;;
+;; Add 32-bit integers.
+;;
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (plus:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "*push_addsi3"
+ [(set (match_operand:SI 0 "push_operand" "=<,<, <")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%g,g, g")
+ (match_operand:SI 2 "general_operand" "N,M,rSi")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ OUT_AS1 (push, %D1%<);
+ OUT_AS1 (push, %C1%<);
+ OUT_AS1 (push, %B1%<);
+ OUT_AS1 (push, %A1%>%>%>);
+ OUT_AS1 (incsnz, 4(SP));
+ OUT_AS1 (incsz, 3(SP));
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, 2(SP));
+ OUT_AS1 (inc, 1(SP));
+ OUT_AS1 (1:,);
+ return \"\";
+
+ case 1:
+ OUT_AS1 (push, %D1%<);
+ OUT_AS1 (push, %C1%<);
+ OUT_AS1 (push, %B1%<);
+ OUT_AS1 (push, %A1%>%>%>);
+ OUT_AS2 (mov, w, #-1);
+ OUT_AS2 (add, 4(SP), w);
+ OUT_AS2 (addc, 3(SP), w);
+ OUT_AS2 (addc, 2(SP), w);
+ OUT_AS2 (addc, 1(SP), w);
+ return \"\";
+
+ case 2:
+ OUT_AS2 (mov, w, %D2);
+ OUT_AS2 (add, w, %D1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %C2);
+ OUT_AS2 (addc, w, %C1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %B2);
+ OUT_AS2 (addc, w, %B1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %A2);
+ OUT_AS2 (addc, w, %A1);
+ OUT_AS1 (push, wreg%>%>%>);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }"
+ [(set_attr "clobberw" "no,yes,yes")])
+
+(define_insn "*addsi3" ; 0 1 2 3 4 5 6
+ [(set
+ (match_operand:SI 0 "nonimmediate_operand" "=ro,ro, ro, rS,&ro,&rS,&rS")
+ (plus:SI
+ (match_operand:SI 1 "nonimmediate_operand" "%0, 0, 0, 0, rS, ro, rS")
+ (match_operand:SI 2 "general_operand" "N, M,rSi,roi,rSi,rSi,roi")))]
+ ""
+ "@
+ incsnz\\t%D0\;incsz\\t%C0\;page\\t1f\;jmp\\t1f\;incsnz\\t%B0\;inc\\t%A0\;1:
+ mov\\tw,#-1\;add\\t%D0,w\;addc\\t%C0,w\;addc\\t%B0,w\;addc\\t%A0,w
+ mov\\tw,%D2\;add\\t%D0,w\;mov\\tw,%C2\;addc\\t%C0,w\;mov\\tw,%B2\;addc\\t%B0,w\;mov\\tw,%A2\;addc\\t%A0,w
+ mov\\tw,%D2\;add\\t%D0,w\;mov\\tw,%C2\;addc\\t%C0,w\;mov\\tw,%B2\;addc\\t%B0,w\;mov\\tw,%A2\;addc\\t%A0,w
+ mov\\tw,%D2\;add\\tw,%D1\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%C1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,%B1\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,%A1\;mov\\t%A0,w
+ mov\\tw,%D2\;add\\tw,%D1\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%C1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,%B1\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,%A1\;mov\\t%A0,w
+ mov\\tw,%D2\;add\\tw,%D1\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%C1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,%B1\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,%A1\;mov\\t%A0,w"
+ [(set_attr "clobberw" "no,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*push_addsi3_zero_extendqi" ; 0 1
+ [(set (match_operand:SI 0 "push_operand" "=<, <")
+ (plus:SI (zero_extend:SI
+ (match_operand:QI 1 "general_operand" "%roRi,rSi"))
+ (match_operand:SI 2 "general_operand" "rSi,roi")))]
+ ""
+ "@
+ mov\\tw,%D2\;add\\tw,%1\;push\\twreg%<\;mov\\tw,%C2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%B2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%A2\;addc\\tw,$ff\;push\\twreg%>%>%>
+ mov\\tw,%D2\;add\\tw,%1\;push\\twreg%<\;mov\\tw,%C2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%B2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%A2\;addc\\tw,$ff\;push\\twreg%>%>%>")
+
+(define_insn "*addsi3_zero_extendqi" ;
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS,&ro,&rS,&rS")
+ (plus:SI
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "rS,roR, rS,roR, rS"))
+ (match_operand:SI 2 "general_operand" "0, 0,rSi,rSi,roi")))]
+ ""
+ "@
+ mov\\tw,%1\;add\\t%D0,w\;clr\\twreg\;addc\\t%C0,w\;addc\\t%B0,w\;addc\\t%A0,w
+ mov\\tw,%1\;add\\t%D0,w\;clr\\twreg\;addc\\t%C0,w\;addc\\t%B0,w\;addc\\t%A0,w
+ mov\\tw,%1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,$ff\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w
+ mov\\tw,%1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,$ff\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w
+ mov\\tw,%1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,$ff\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w")
+
+(define_insn "*push_addsi3_zero_extendhi" ; 0 1
+ [(set (match_operand:SI 0 "push_operand" "=<, <")
+ (plus:SI (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "%roR,rSR"))
+ (match_operand:SI 2 "general_operand" "rSi,roi")))]
+ ""
+ "@
+ mov\\tw,%D2\;add\\tw,%L1\;push\\twreg%<\;mov\\tw,%C2\;addc\\tw,%H1\;push\\twreg%<\;mov\\tw,%B2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%A2\;addc\\tw,$ff\;push\\twreg%>%>%>
+ mov\\tw,%D2\;add\\tw,%L1\;push\\twreg%<\;mov\\tw,%C2\;addc\\tw,%H1\;push\\twreg%<\;mov\\tw,%B2\;addc\\tw,$ff\;push\\twreg%<\;mov\\tw,%A2\;addc\\tw,$ff\;push\\twreg%>%>%>")
+
+(define_insn "*addsi3_zero_extendhi" ;
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,rS,&ro,&rS,&rS")
+ (plus:SI
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "rS,ro, rS, ro, rS"))
+ (match_operand:SI 2 "general_operand" "0, 0,rSi,rSi,roi")))]
+ ""
+ "@
+ mov\\tw,%L1\;add\\t%D0,w\;mov\\tw,%H1\;addc\\t%C0,w\;clr\\twreg\;addc\\t%B0,w\;addc\\t%A0,w
+ mov\\tw,%L1\;add\\t%D0,w\;mov\\tw,%H1\;addc\\t%C0,w\;clr\\twreg\;addc\\t%B0,w\;addc\\t%A0,w
+ mov\\tw,%L1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%H1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w
+ mov\\tw,%L1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%H1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w
+ mov\\tw,%L1\;add\\tw,%D2\;mov\\t%D0,w\;mov\\tw,%C2\;addc\\tw,%H1\;mov\\t%C0,w\;mov\\tw,%B2\;addc\\tw,$ff\;mov\\t%B0,w\;mov\\tw,%A2\;addc\\tw,$ff\;mov\\t%A0,w")
+
+;;
+;; Add 64-bit integers.
+;;
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "")
+ (plus:DI (match_operand:DI 1 "general_operand" "")
+ (match_operand:DI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "*push_adddi3"
+ [(set (match_operand:DI 0 "push_operand" "=<,<, <")
+ (plus:DI (match_operand:DI 1 "nonimmediate_operand" "%g,g, g")
+ (match_operand:DI 2 "general_operand" "N,M,rSi")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%>%>%>%>%>%>%>);
+ OUT_AS1 (incsnz, 8(SP));
+ OUT_AS1 (incsz, 7(SP));
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, 6(SP));
+ OUT_AS1 (incsz, 5(SP));
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, 4(SP));
+ OUT_AS1 (incsz, 3(SP));
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, 2(SP));
+ OUT_AS1 (inc, 1(SP));
+ OUT_AS1 (1:,);
+ return \"\";
+
+ case 1:
+ OUT_AS1 (push, %Z1%<);
+ OUT_AS1 (push, %Y1%<);
+ OUT_AS1 (push, %X1%<);
+ OUT_AS1 (push, %W1%<);
+ OUT_AS1 (push, %V1%<);
+ OUT_AS1 (push, %U1%<);
+ OUT_AS1 (push, %T1%<);
+ OUT_AS1 (push, %S1%>%>%>%>%>%>%>);
+ OUT_AS2 (mov, w, #-1);
+ OUT_AS2 (add, 8(SP), w);
+ OUT_AS2 (addc, 7(SP), w);
+ OUT_AS2 (addc, 6(SP), w);
+ OUT_AS2 (addc, 5(SP), w);
+ OUT_AS2 (addc, 4(SP), w);
+ OUT_AS2 (addc, 3(SP), w);
+ OUT_AS2 (addc, 2(SP), w);
+ OUT_AS2 (addc, 1(SP), w);
+ return \"\";
+
+ case 2:
+ OUT_AS2 (mov, w, %Z2);
+ OUT_AS2 (add, w, %Z1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, w, %Y1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, w, %X1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, w, %W1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, w, %V1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, w, %U1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, w, %T1);
+ OUT_AS1 (push, wreg%<);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, w, %S1);
+ OUT_AS1 (push, wreg%>%>%>%>%>%>%>);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }"
+ [(set_attr "clobberw" "no,yes,yes")])
+
+(define_insn "*adddi3" ; 0 1 2 3 4 5 6
+ [(set
+ (match_operand:DI 0 "nonimmediate_operand" "=ro,ro, ro, rS,&ro,&rS,&rS")
+ (plus:DI
+ (match_operand:DI 1 "nonimmediate_operand" "%0, 0, 0, 0, rS, ro, rS")
+ (match_operand:DI 2 "general_operand" "N, M,rSi,roi,rSi,rSi,roi")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (incsnz, %Z0);
+ OUT_AS1 (incsz, %Y0);
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, %X0);
+ OUT_AS1 (incsz, %W0);
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, %V0);
+ OUT_AS1 (incsz, %U0);
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ OUT_AS1 (incsnz, %T0);
+ OUT_AS1 (inc, %S0);
+ OUT_AS1 (1:, );
+ return \"\";
+
+ case 1:
+ OUT_AS2 (mov, w, #-1);
+ OUT_AS2 (add, %Z0, w);
+ OUT_AS2 (addc, %Y0, w);
+ OUT_AS2 (addc, %X0, w);
+ OUT_AS2 (addc, %W0, w);
+ OUT_AS2 (addc, %V0, w);
+ OUT_AS2 (addc, %U0, w);
+ OUT_AS2 (addc, %T0, w);
+ OUT_AS2 (addc, %S0, w);
+ return \"\";
+
+ case 2:
+ case 3:
+ OUT_AS2 (mov, w, %Z2);
+ OUT_AS2 (add, %Z0, w);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, %Y0, w);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, %X0, w);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, %W0, w);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, %V0, w);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, %U0, w);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, %T0, w);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, %S0, w);
+ return \"\";
+
+ case 4:
+ case 5:
+ case 6:
+ OUT_AS2 (mov, w, %Z2);
+ OUT_AS2 (add, w, %Z1);
+ OUT_AS2 (mov, %Z0, w);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, w, %Y1);
+ OUT_AS2 (mov, %Y0, w);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, w, %X1);
+ OUT_AS2 (mov, %X0, w);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, w, %W1);
+ OUT_AS2 (mov, %W0, w);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, w, %V1);
+ OUT_AS2 (mov, %V0, w);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, w, %U1);
+ OUT_AS2 (mov, %U0, w);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, w, %T1);
+ OUT_AS2 (mov, %T0, w);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, w, %S1);
+ OUT_AS2 (mov, %S0, w);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }"
+ [(set_attr "clobberw" "no,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*adddi3_zero_extendqi" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro, rS,&ro,&rS,&rS")
+ (plus:DI
+ (zero_extend:DI
+ (match_operand:QI 1 "nonimmediate_operand" "rS,roR, rS,roR, rS"))
+ (match_operand:DI 2 "general_operand" "0, 0,rSi,rSi,roi")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, %Z0, w);
+ OUT_AS1 (clr, wreg);
+ OUT_AS2 (addc, %Y0, w);
+ OUT_AS2 (addc, %X0, w);
+ OUT_AS2 (addc, %W0, w);
+ OUT_AS2 (addc, %V0, w);
+ OUT_AS2 (addc, %U0, w);
+ OUT_AS2 (addc, %T0, w);
+ OUT_AS2 (addc, %S0, w);
+ return \"\";
+
+ case 2:
+ case 3:
+ case 4:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, w, %Z2);
+ OUT_AS2 (mov, %Z0, w);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %Y0, w);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %X0, w);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %W0, w);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %V0, w);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %U0, w);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %T0, w);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %S0, w);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }")
+
+(define_insn "*adddi3_zero_extendhi" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,rS,&ro,&rS,&rS")
+ (plus:DI
+ (zero_extend:DI
+ (match_operand:HI 1 "nonimmediate_operand" "rS,ro, rS, ro, rS"))
+ (match_operand:DI 2 "general_operand" "0, 0,rSi,rSi,roi")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (add, %Z0, w);
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (addc, %Y0, w);
+ OUT_AS1 (clr, wreg);
+ OUT_AS2 (addc, %X0, w);
+ OUT_AS2 (addc, %W0, w);
+ OUT_AS2 (addc, %V0, w);
+ OUT_AS2 (addc, %U0, w);
+ OUT_AS2 (addc, %T0, w);
+ OUT_AS2 (addc, %S0, w);
+ return \"\";
+
+ case 2:
+ case 3:
+ case 4:
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (add, w, %Z2);
+ OUT_AS2 (mov, %Z0, w);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, w, %H1);
+ OUT_AS2 (mov, %Y0, w);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %X0, w);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %W0, w);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %V0, w);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %U0, w);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %T0, w);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %S0, w);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }")
+
+(define_insn "*adddi3_zero_extendsi" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,rS,&ro,&rS,&rS")
+ (plus:DI
+ (zero_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "rS,ro, rS, ro, rS"))
+ (match_operand:DI 2 "general_operand" "0, 0,rSi,rSi,roi")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (add, %Z0, w);
+ OUT_AS2 (mov, w, %C1);
+ OUT_AS2 (addc, %Y0, w);
+ OUT_AS2 (mov, w, %B1);
+ OUT_AS2 (addc, %X0, w);
+ OUT_AS2 (mov, w, %A1);
+ OUT_AS2 (addc, %W0, w);
+ OUT_AS1 (clr, wreg);
+ OUT_AS2 (addc, %V0, w);
+ OUT_AS2 (addc, %U0, w);
+ OUT_AS2 (addc, %T0, w);
+ OUT_AS2 (addc, %S0, w);
+ return \"\";
+
+ case 2:
+ case 3:
+ case 4:
+ OUT_AS2 (mov, w, %D1);
+ OUT_AS2 (add, w, %Z2);
+ OUT_AS2 (mov, %Z0, w);
+ OUT_AS2 (mov, w, %Y2);
+ OUT_AS2 (addc, w, %C1);
+ OUT_AS2 (mov, %Y0, w);
+ OUT_AS2 (mov, w, %X2);
+ OUT_AS2 (addc, w, %B1);
+ OUT_AS2 (mov, %X0, w);
+ OUT_AS2 (mov, w, %W2);
+ OUT_AS2 (addc, w, %A1);
+ OUT_AS2 (mov, %W0, w);
+ OUT_AS2 (mov, w, %V2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %V0, w);
+ OUT_AS2 (mov, w, %U2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %U0, w);
+ OUT_AS2 (mov, w, %T2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %T0, w);
+ OUT_AS2 (mov, w, %S2);
+ OUT_AS2 (addc, w, $ff);
+ OUT_AS2 (mov, %S0, w);
+ return \"\";
+
+ default:
+ abort();
+ }
+ }")
+
+;;
+;; Subtract bytes.
+;;
+
+(define_expand "subqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (minus:QI (match_operand:QI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_addqi3 (operands[0], operands[1],
+ gen_int_mode (-INTVAL (operands[2]), QImode)));
+ DONE;
+ }
+ ")
+
+(define_insn "*push_subqi3"
+ [(set (match_operand:QI 0 "push_operand" "=<, <")
+ (minus:QI (match_operand:QI 1 "general_operand" "g,rSn")
+ (match_operand:QI 2 "general_operand" "rSn, g")))]
+ ""
+ "@
+ push\\t%1%<\;mov\\tw,%2\;sub\\t1(SP),w%>
+ push\\t%1%<\;mov\\tw,%2\;sub\\t1(SP),w%>")
+
+(define_insn "*subqi3_w"
+ [(set (reg:QI 10)
+ (minus:QI (match_operand:QI 0 "general_operand" "rS,rSi, g,rSi")
+ (match_operand:QI 1 "general_operand" "rSi, rS,rSi, g")))]
+ "(ip2k_reorg_split_qimode)"
+ "@
+ mov\\tw,%1\;sub\\tw,%0
+ mov\\tw,%1\;sub\\tw,%0
+ mov\\tw,%1\;sub\\tw,%0
+ mov\\tw,%1\;sub\\tw,%0")
+
+(define_insn_and_split "*subqi3"
+ [(set
+ (match_operand:QI
+ 0 "nonimmediate_operand" "=k,k,z,z,djyoR,djyoR,djyS,djyoR, g, g, rS, rS")
+ (minus:QI
+ (match_operand:QI
+ 1 "general_operand" "0,0,0,0, 0, 0, 0, 0, rS,rSi, g,rSi")
+ (match_operand:QI
+ 2 "general_operand" "M,g,M,g, M, N, g, rSi,rSi, rS,rSi, g")))]
+ ""
+ "@
+ incsnz\\t%0\;dec\\tiph
+ mov\\tw,%2\;sub\\t%0,w
+ incsnz\\t%0\;dec\\tdph
+ mov\\tw,%2\;sub\\t%0,w
+ inc\\t%0
+ dec\\t%0
+ mov\\tw,%2\;sub\\t%0,w
+ mov\\tw,%2\;sub\\t%0,w
+ #
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && ! rtx_equal_p (operands[0], operands[1]))"
+ [(set (reg:QI 10)
+ (minus:QI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:QI 10))]
+ ""
+ [(set_attr "skip" "no,no,no,no,yes,yes,no,no,no,no,no,no")
+ (set_attr "clobberw" "no,yes,no,yes,no,no,yes,yes,yes,yes,yes,yes")])
+
+;;
+;; Subtract 16-bit integers.
+;;
+
+(define_expand "subhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (minus:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_addhi3 (operands[0], operands[1],
+ gen_int_mode (-INTVAL (operands[2]), HImode)));
+ DONE;
+ }
+ ")
+
+(define_insn "*push_subhi3"
+ [(set (match_operand:HI 0 "push_operand" "=<, <")
+ (minus:HI (match_operand:HI 1 "general_operand" "ron,rSn")
+ (match_operand:HI 2 "general_operand" "rSn,ron")))]
+ ""
+ "@
+ push\\t%L1%<\;mov\\tw,%L2\;sub\\t1(SP),w\;push\\t%H1%<\;mov\\tw,%H2\;subc\\t1(SP),w%>%>
+ push\\t%L1%<\;mov\\tw,%L2\;sub\\t1(SP),w\;push\\t%H1%<\;mov\\tw,%H2\;subc\\t1(SP),w%>%>")
+
+(define_insn "*subhi3_imm"
+ [(set
+ (match_operand:HI 0 "nonimmediate_operand" "=a,a,a,a,do,&r,&ro,&rS")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,0,0,0, 0,ro, rS, ro")
+ (match_operand 2 "immediate_operand" "N,M,P,i, i, O, i, i")))]
+ ""
+ "@
+ dec\\t%L0
+ inc\\t%L0
+ mov\\tw,%2\;sub\\t%L0,w
+ mov\\tw,%L2\;sub\\t%L0,w\;mov\\tw,%H2\;sub\\t%H0,w
+ mov\\tw,%L2\;sub\\t%L0,w\;mov\\tw,%H2\;subc\\t%H0,w
+ mov\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w"
+ [(set_attr "skip" "yes,yes,no,no,no,no,no,no")
+ (set_attr "clobberw" "no,no,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*subhi3_ximm_zero_extend"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS")
+ (minus:HI (match_operand:HI 1 "immediate_operand" "i, i")
+ (zero_extend:HI
+ (match_operand:QI 2 "nonimmediate_operand" "rS,roR"))))]
+ ""
+ "@
+ mov\\tw,%2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H1\;mov\\t%H0,w\;clr\\twreg\;subc\\t%H0,w
+ mov\\tw,%2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H1\;mov\\t%H0,w\;clr\\twreg\;subc\\t%H0,w")
+
+(define_insn "*subhi3_ximm"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=&uo,&ro,&rS")
+ (minus:HI (match_operand:HI 1 "immediate_operand" "i, i, i")
+ (match_operand:HI 2 "nonimmediate_operand" "0, rS, ro")))]
+ ""
+ "@
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;push\\t%H2%<\;mov\\tw,%H1\;mov\\t%H0,w\;pop\\twreg%>\;subc\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H1\;mov\\t%H0,w\;mov\\tw,%H2\;subc\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H1\;mov\\t%H0,w\;mov\\tw,%H2\;subc\\t%H0,w")
+
+(define_insn "*subhi3_nonimm_zero_extend"
+ [(set
+ (match_operand:HI 0 "nonimmediate_operand" "=a,ro, rS,&ro,&rS,&rS")
+ (minus:HI
+ (match_operand:HI 1 "nonimmediate_operand" "0, 0, 0, rS, ro, rS")
+ (zero_extend:HI
+ (match_operand:QI 2 "general_operand" "roR,rS,roR, rS, rS,roR"))))]
+ ""
+ "@
+ mov\\tw,%2\;sub\\t%L0,w
+ mov\\tw,%2\;sub\\t%L0,w\;clr\\twreg\;subc\\t%H0,w
+ mov\\tw,%2\;sub\\t%L0,w\;clr\\twreg\;subc\\t%H0,w
+ mov\\tw,%2\;sub\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%2\;sub\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%2\;sub\\tw,%L1\;mov\\t%L0,w\;clr\\twreg\;subc\\tw,%H1\;mov\\t%H0,w")
+
+(define_insn "*subhi3_nonimm" ; 0 1 2 3 4 5 6
+ [(set
+ (match_operand:HI 0 "nonimmediate_operand" "=a,dS, o,&rS,&rS,&rS, o")
+ (minus:HI
+ (match_operand:HI 1 "nonimmediate_operand" "0, 0, 0, ro, ro, rS,rS")
+ (match_operand:HI 2 "nonimmediate_operand" "ro,ro,rS, 0, rS, ro,rS")))]
+ ""
+ "@
+ mov\\tw,%L2\;sub\\t%L0,w\;mov\\tw,%H2\;sub\\t%H0,w
+ mov\\tw,%L2\;sub\\t%L0,w\;mov\\tw,%H2\;subc\\t%H0,w
+ mov\\tw,%L2\;sub\\t%L0,w\;mov\\tw,%H2\;subc\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w
+ mov\\tw,%L2\;sub\\tw,%L1\;mov\\t%L0,w\;mov\\tw,%H2\;subc\\tw,%H1\;mov\\t%H0,w")
+
+;;
+;; Subtract 32-bit integers.
+;;
+
+(define_insn "subsi3" ; 0 1 2 3 4 5 6 7 8 9 a b
+ [(set
+ (match_operand:SI
+ 0 "nonimmediate_operand" "=ro,ro, ro, rS,&ro,&rS,&ro,&rS,&rS,&ro,&ro,&rS")
+ (minus:SI
+ (match_operand:SI
+ 1 "general_operand" "0, 0, 0, 0, i, ro, rS, rS, ro, rS, i, i")
+ (match_operand:SI
+ 2 "general_operand" "M, N,rSi,roi, 0, 0, 0,roi,rSi,rSi, rS, ro")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ return AS2 (mov, w, #1) CR_TAB
+ AS2 (add, %D0, w) CR_TAB
+ AS1 (clr, wreg) CR_TAB
+ AS2 (addc, %C0, w) CR_TAB
+ AS2 (addc, %B0, w) CR_TAB
+ AS2 (addc, %A0, w);
+
+ case 1:
+ return AS2 (mov, w, #-1) CR_TAB
+ AS2 (sub, %D0, w) CR_TAB
+ AS2 (subc, %C0, w) CR_TAB
+ AS2 (subc, %B0, w) CR_TAB
+ AS2 (subc, %A0, w);
+
+ case 2:
+ case 3:
+ return AS2 (mov, w, %D2) CR_TAB
+ AS2 (sub, %D0, w) CR_TAB
+ AS2 (mov, w, %C2) CR_TAB
+ AS2 (subc, %C0, w) CR_TAB
+ AS2 (mov, w, %B2) CR_TAB
+ AS2 (subc, %B0, w) CR_TAB
+ AS2 (mov, w, %A2) CR_TAB
+ AS2 (subc, %A0, w);
+
+ case 4:
+ return AS2 (mov, w, %D2) CR_TAB
+ AS2 (sub, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (push, %C2%<) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %C0, w) CR_TAB
+ AS1 (push, %B2%<) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %B0, w) CR_TAB
+ AS1 (push, %A2%<) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %A0, w);
+
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ return AS2 (mov, w, %D2) CR_TAB
+ AS2 (sub, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %C2) CR_TAB
+ AS2 (subc, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %B2) CR_TAB
+ AS2 (subc, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %A2) CR_TAB
+ AS2 (subc, w, %A1) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 10:
+ case 11:
+ return AS2 (mov, w, %D2) CR_TAB
+ AS2 (sub, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %C2) CR_TAB
+ AS2 (subc, %C0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %B2) CR_TAB
+ AS2 (subc, %B0, w) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %A2) CR_TAB
+ AS2 (subc, %A0, w);
+ }
+ }")
+
+;;
+;; Subtract 64-bit integers.
+;;
+
+(define_insn "subdi3" ; 0 1 2 3 4 5 6 7 8 9 a b
+ [(set
+ (match_operand:DI
+ 0 "nonimmediate_operand" "=ro,ro, ro, rS,ro,&rS,&ro,&rS,&rS,&ro,&ro,&rS")
+ (minus:DI
+ (match_operand:DI
+ 1 "general_operand" "0, 0, 0, 0, i, ro, rS, rS, ro, rS, i, i")
+ (match_operand:DI
+ 2 "general_operand" "M, N,rSi,roi, 0, 0, 0,roi,rSi,rSi, rS, ro")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ return AS2 (mov, w, #1) CR_TAB
+ AS2 (add, %Z0, w) CR_TAB
+ AS1 (clr, wreg) CR_TAB
+ AS2 (addc, %Y0, w) CR_TAB
+ AS2 (addc, %X0, w) CR_TAB
+ AS2 (addc, %W0, w) CR_TAB
+ AS2 (addc, %V0, w) CR_TAB
+ AS2 (addc, %U0, w) CR_TAB
+ AS2 (addc, %T0, w) CR_TAB
+ AS2 (addc, %S0, w);
+
+ case 1:
+ return AS2 (mov, w, #-1) CR_TAB
+ AS2 (sub, %Z0, w) CR_TAB
+ AS2 (subc, %Y0, w) CR_TAB
+ AS2 (subc, %X0, w) CR_TAB
+ AS2 (subc, %W0, w) CR_TAB
+ AS2 (subc, %V0, w) CR_TAB
+ AS2 (subc, %U0, w) CR_TAB
+ AS2 (subc, %T0, w) CR_TAB
+ AS2 (subc, %S0, w);
+
+ case 2:
+ case 3:
+ return AS2 (mov, w, %Z2) CR_TAB
+ AS2 (sub, %Z0, w) CR_TAB
+ AS2 (mov, w, %Y2) CR_TAB
+ AS2 (subc, %Y0, w) CR_TAB
+ AS2 (mov, w, %X2) CR_TAB
+ AS2 (subc, %X0, w) CR_TAB
+ AS2 (mov, w, %W2) CR_TAB
+ AS2 (subc, %W0, w) CR_TAB
+ AS2 (mov, w, %V2) CR_TAB
+ AS2 (subc, %V0, w) CR_TAB
+ AS2 (mov, w, %U2) CR_TAB
+ AS2 (subc, %U0, w) CR_TAB
+ AS2 (mov, w, %T2) CR_TAB
+ AS2 (subc, %T0, w) CR_TAB
+ AS2 (mov, w, %S2) CR_TAB
+ AS2 (subc, %S0, w);
+
+ case 4:
+ return AS2 (mov, w, %Z2) CR_TAB
+ AS2 (sub, w, %Z1) CR_TAB
+ AS2 (mov, %Z0, w) CR_TAB
+ AS1 (push, %Y2%<) CR_TAB
+ AS2 (mov, w, %Y1) CR_TAB
+ AS2 (mov, %Y0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %Y0, w) CR_TAB
+ AS1 (push, %X2%<) CR_TAB
+ AS2 (mov, w, %X1) CR_TAB
+ AS2 (mov, %X0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %X0, w) CR_TAB
+ AS1 (push, %W2%<) CR_TAB
+ AS2 (mov, w, %W1) CR_TAB
+ AS2 (mov, %W0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %W0, w) CR_TAB
+ AS1 (push, %V2%<) CR_TAB
+ AS2 (mov, w, %V1) CR_TAB
+ AS2 (mov, %V0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %V0, w) CR_TAB
+ AS1 (push, %U2%<) CR_TAB
+ AS2 (mov, w, %U1) CR_TAB
+ AS2 (mov, %U0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %U0, w) CR_TAB
+ AS1 (push, %T2%<) CR_TAB
+ AS2 (mov, w, %T1) CR_TAB
+ AS2 (mov, %T0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %T0, w) CR_TAB
+ AS1 (push, %S2%<) CR_TAB
+ AS2 (mov, w, %S1) CR_TAB
+ AS2 (mov, %S0, w) CR_TAB
+ AS1 (pop, wreg%>) CR_TAB
+ AS2 (subc, %S0, w);
+
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ return AS2 (mov, w, %Z2) CR_TAB
+ AS2 (sub, w, %Z1) CR_TAB
+ AS2 (mov, %Z0, w) CR_TAB
+ AS2 (mov, w, %Y2) CR_TAB
+ AS2 (subc, w, %Y1) CR_TAB
+ AS2 (mov, %Y0, w) CR_TAB
+ AS2 (mov, w, %X2) CR_TAB
+ AS2 (subc, w, %X1) CR_TAB
+ AS2 (mov, %X0, w) CR_TAB
+ AS2 (mov, w, %W2) CR_TAB
+ AS2 (subc, w, %W1) CR_TAB
+ AS2 (mov, %W0, w) CR_TAB
+ AS2 (mov, w, %V2) CR_TAB
+ AS2 (subc, w, %V1) CR_TAB
+ AS2 (mov, %V0, w) CR_TAB
+ AS2 (mov, w, %U2) CR_TAB
+ AS2 (subc, w, %U1) CR_TAB
+ AS2 (mov, %U0, w) CR_TAB
+ AS2 (mov, w, %T2) CR_TAB
+ AS2 (subc, w, %T1) CR_TAB
+ AS2 (mov, %T0, w) CR_TAB
+ AS2 (mov, w, %S2) CR_TAB
+ AS2 (subc, w, %S1) CR_TAB
+ AS2 (mov, %S0, w);
+
+ case 10:
+ case 11:
+ return AS2 (mov, w, %Z2) CR_TAB
+ AS2 (sub, w, %Z1) CR_TAB
+ AS2 (mov, %Z0, w) CR_TAB
+ AS2 (mov, w, %Y1) CR_TAB
+ AS2 (mov, %Y0, w) CR_TAB
+ AS2 (mov, w, %Y2) CR_TAB
+ AS2 (subc, %Y0, w) CR_TAB
+ AS2 (mov, w, %X1) CR_TAB
+ AS2 (mov, %X0, w) CR_TAB
+ AS2 (mov, w, %X2) CR_TAB
+ AS2 (subc, %X0, w) CR_TAB
+ AS2 (mov, w, %W1) CR_TAB
+ AS2 (mov, %W0, w) CR_TAB
+ AS2 (mov, w, %W2) CR_TAB
+ AS2 (subc, %W0, w) CR_TAB
+ AS2 (mov, w, %V1) CR_TAB
+ AS2 (mov, %V0, w) CR_TAB
+ AS2 (mov, w, %V2) CR_TAB
+ AS2 (subc, %V0, w) CR_TAB
+ AS2 (mov, w, %U1) CR_TAB
+ AS2 (mov, %U0, w) CR_TAB
+ AS2 (mov, w, %U2) CR_TAB
+ AS2 (subc, %U0, w) CR_TAB
+ AS2 (mov, w, %T1) CR_TAB
+ AS2 (mov, %T0, w) CR_TAB
+ AS2 (mov, w, %T2) CR_TAB
+ AS2 (subc, %T0, w) CR_TAB
+ AS2 (mov, w, %S1) CR_TAB
+ AS2 (mov, %S0, w) CR_TAB
+ AS2 (mov, w, %S2) CR_TAB
+ AS2 (subc, %S0, w);
+ }
+ }")
+
+;;
+;; Bitwise and instructions.
+;;
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (and:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "*andqi3_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR")
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "const_int_operand" "n")))]
+ "(find_one_clear_bit_p (INTVAL (operands[1]) | 0xffffff00UL) != -1)"
+ "*{
+ operands[2] = GEN_INT (find_one_clear_bit_p (INTVAL (operands[1])
+ | 0xffffff00UL));
+ return AS2 (clrb, %0, %b2);
+ }"
+ [(set_attr "skip" "yes")
+ (set_attr "clobberw" "no")])
+
+(define_insn "*andqi3_w_fr"
+ [(set (reg:QI 10)
+ (and:QI (match_operand:QI 0 "general_operand" "roRn")
+ (reg:QI 10)))]
+ "(ip2k_reorg_split_qimode)"
+ "and\\tw,%0"
+ [(set_attr "skip" "yes")])
+
+(define_insn_and_split "*andqi3_fr_w"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR,rS,roR, rS,rS")
+ (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0, 0, rS,roR,rS")
+ (reg:QI 10)))]
+ "(ip2k_reorg_split_qimode)"
+ "@
+ and\\t%0,w
+ and\\t%0,w
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && ! rtx_equal_p (operands[0], operands[1]))"
+ [(set (reg:QI 10)
+ (and:QI (match_dup 1)
+ (reg:QI 10)))
+ (set (match_dup 0)
+ (reg:QI 10))]
+ ""
+ [(set_attr "skip" "yes,yes,no,no,no")
+ (set_attr "clobberw" "no,no,yes,yes,yes")])
+
+(define_insn_and_split "*andqi3" ; 0 1 2 3 4
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR, rS,roR, rS, rS")
+ (and:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0, 0, rS,roR, rS")
+ (match_operand:QI 2 "general_operand" "rSn,roRn,rSn,rSn,roRn")))]
+ ""
+ "@
+ mov\\tw,%2\;and\\t%0,w
+ #
+ #
+ #
+ #"
+ "(ip2k_reorg_split_qimode
+ && (! rtx_equal_p (operands[0], operands[1])
+ || GET_CODE (operands[2]) != CONST_INT
+ || find_one_clear_bit_p (INTVAL (operands[2]) | 0xffffff00UL) == -1))"
+ [(set (reg:QI 10)
+ (match_dup 2))
+ (set (match_dup 0)
+ (and:QI (match_dup 1)
+ (reg:QI 10)))])
+
+(define_insn_and_split "andhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (and:HI
+ (match_operand:HI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:HI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_himode)"
+ [(set (match_dup 3)
+ (and:QI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (and:QI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_high_half (operands[1], QImode);
+ operands[5] = ip2k_get_high_half (operands[2], QImode);
+ operands[6] = ip2k_get_low_half (operands[0], QImode);
+ operands[7] = ip2k_get_low_half (operands[1], QImode);
+ operands[8] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+(define_insn_and_split "andsi3" ; 0 1 2 3 4
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (and:SI
+ (match_operand:SI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:SI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_simode)"
+ [(set (match_dup 3)
+ (and:HI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (and:HI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_high_half (operands[1], HImode);
+ operands[5] = ip2k_get_high_half (operands[2], HImode);
+ operands[6] = ip2k_get_low_half (operands[0], HImode);
+ operands[7] = ip2k_get_low_half (operands[1], HImode);
+ operands[8] = ip2k_get_low_half (operands[2], HImode);
+ }")
+
+(define_insn_and_split "anddi3" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (and:DI
+ (match_operand:DI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:DI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_dimode)"
+ [(set (match_dup 3)
+ (and:SI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (and:SI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_high_half (operands[1], SImode);
+ operands[5] = ip2k_get_high_half (operands[2], SImode);
+ operands[6] = ip2k_get_low_half (operands[0], SImode);
+ operands[7] = ip2k_get_low_half (operands[1], SImode);
+ operands[8] = ip2k_get_low_half (operands[2], SImode);
+ }")
+
+;;
+;; Bitwise or instructions.
+;;
+
+(define_expand "iorqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ior:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn "*iorqi3_bit"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR")
+ (ior:QI (match_dup 0)
+ (match_operand:QI 1 "const_int_operand" "n")))]
+ "(find_one_set_bit_p (INTVAL (operands[1]) & 0xff) != -1)"
+ "*{
+ operands[2] = GEN_INT (find_one_set_bit_p (INTVAL (operands[1]) & 0xff));
+ return AS2 (setb, %0, %b2);
+ }"
+ [(set_attr "skip" "yes")
+ (set_attr "clobberw" "no")])
+
+(define_insn "*iorqi3" ; 0 1 2 3 4
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR, rS,roR, rS, rS")
+ (ior:QI
+ (match_operand:QI 1 "nonimmediate_operand" "%0, 0, rS,roR, rS")
+ (match_operand:QI 2 "general_operand" "rSi,roRi,rSi,rSi,roRi")))]
+ ""
+ "@
+ mov\\tw,%2\;or\\t%0,w
+ mov\\tw,%2\;or\\t%0,w
+ mov\\tw,%2\;or\\tw,%1\;mov\\t%0,w
+ mov\\tw,%2\;or\\tw,%1\;mov\\t%0,w
+ mov\\tw,%2\;or\\tw,%1\;mov\\t%0,w")
+
+(define_insn_and_split "iorhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (ior:HI
+ (match_operand:HI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:HI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_himode)"
+ [(set (match_dup 3)
+ (ior:QI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (ior:QI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_high_half (operands[1], QImode);
+ operands[5] = ip2k_get_high_half (operands[2], QImode);
+ operands[6] = ip2k_get_low_half (operands[0], QImode);
+ operands[7] = ip2k_get_low_half (operands[1], QImode);
+ operands[8] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+(define_insn_and_split "iorsi3" ; 0 1 2 3 4
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (ior:SI
+ (match_operand:SI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:SI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_simode)"
+ [(set (match_dup 3)
+ (ior:HI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (ior:HI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_high_half (operands[1], HImode);
+ operands[5] = ip2k_get_high_half (operands[2], HImode);
+ operands[6] = ip2k_get_low_half (operands[0], HImode);
+ operands[7] = ip2k_get_low_half (operands[1], HImode);
+ operands[8] = ip2k_get_low_half (operands[2], HImode);
+ }")
+
+(define_insn_and_split "iordi3" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (ior:DI
+ (match_operand:DI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:DI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_dimode)"
+ [(set (match_dup 3)
+ (ior:SI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (ior:SI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_high_half (operands[1], SImode);
+ operands[5] = ip2k_get_high_half (operands[2], SImode);
+ operands[6] = ip2k_get_low_half (operands[0], SImode);
+ operands[7] = ip2k_get_low_half (operands[1], SImode);
+ operands[8] = ip2k_get_low_half (operands[2], SImode);
+ }")
+
+;;
+;; Bitwise xor instructions
+;;
+;; TODO: xor ops can also use "not w, fr"!
+;;
+
+(define_insn "xorqi3"
+ [(set
+ (match_operand:QI 0 "nonimmediate_operand" "=roR,roR, rS,roR, rS, rS")
+ (xor:QI (match_operand:QI 1 "general_operand" "%0, 0, 0, rS,roR, rS")
+ (match_operand:QI 2 "general_operand" "M,rSi,roRi,rSi,rSi,roRi")))]
+ ""
+ "@
+ not\\t%0
+ mov\\tw,%2\;xor\\t%0,w
+ mov\\tw,%2\;xor\\t%0,w
+ mov\\tw,%1\;xor\\tw,%2\;mov\\t%0,w
+ mov\\tw,%1\;xor\\tw,%2\;mov\\t%0,w
+ mov\\tw,%1\;xor\\tw,%2\;mov\\t%0,w"
+ [(set_attr "clobberw" "no,yes,yes,yes,yes,yes")])
+
+(define_insn_and_split "xorhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (xor:HI
+ (match_operand:HI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:HI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_himode)"
+ [(set (match_dup 3)
+ (xor:QI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (xor:QI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_high_half (operands[1], QImode);
+ operands[5] = ip2k_get_high_half (operands[2], QImode);
+ operands[6] = ip2k_get_low_half (operands[0], QImode);
+ operands[7] = ip2k_get_low_half (operands[1], QImode);
+ operands[8] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+(define_insn_and_split "xorsi3" ; 0 1 2 3 4
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (xor:SI
+ (match_operand:SI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:SI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_simode)"
+ [(set (match_dup 3)
+ (xor:HI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (xor:HI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_high_half (operands[1], HImode);
+ operands[5] = ip2k_get_high_half (operands[2], HImode);
+ operands[6] = ip2k_get_low_half (operands[0], HImode);
+ operands[7] = ip2k_get_low_half (operands[1], HImode);
+ operands[8] = ip2k_get_low_half (operands[2], HImode);
+ }")
+
+(define_insn_and_split "xordi3" ; 0 1 2 3 4
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=uo, uS,&dS,&do,&dS")
+ (xor:DI
+ (match_operand:DI 1 "nonimmediate_operand" "%0, 0, ro, rS, rS")
+ (match_operand:DI 2 "general_operand" "rSn,ron,rSn,rSn,ron")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_dimode)"
+ [(set (match_dup 3)
+ (xor:SI (match_dup 4)
+ (match_dup 5)))
+ (set (match_dup 6)
+ (xor:SI (match_dup 7)
+ (match_dup 8)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_high_half (operands[1], SImode);
+ operands[5] = ip2k_get_high_half (operands[2], SImode);
+ operands[6] = ip2k_get_low_half (operands[0], SImode);
+ operands[7] = ip2k_get_low_half (operands[1], SImode);
+ operands[8] = ip2k_get_low_half (operands[2], SImode);
+ }")
+
+;;
+;; Multiply instructions.
+;;
+
+(define_insn "umulqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=ro, rS, rS")
+ (mult:QI (match_operand:QI 1 "nonimmediate_operand" "%rS,roR, rS")
+ (match_operand:QI 2 "general_operand" "rSi,rSi,roRi")))]
+ ""
+ "mov\\tw,%1\;mulu\\tw,%2\;mov\\t%0,w")
+
+(define_insn "mulqihi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS, rS")
+ (mult:HI
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "%rS,roR, rS"))
+ (sign_extend:HI
+ (match_operand:QI 2 "general_operand" "rSi,rSi,roRi"))))]
+ ""
+ "@
+ mov\\tw,%1\;muls\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w
+ mov\\tw,%1\;muls\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w
+ mov\\tw,%1\;muls\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w")
+
+(define_insn_and_split "umulqihi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS, rS")
+ (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "%rS,roR, rS"))
+ (zero_extend:HI
+ (match_operand:QI 2 "general_operand" "rSi,rSi,roRi"))))]
+ ""
+ "#"
+ "ip2k_reorg_split_qimode"
+ [(set (match_dup 3)
+ (mult:QI (match_dup 1)
+ (match_dup 2)))
+ (set (reg:QI 10)
+ (reg:QI 15))
+ (set (match_dup 4)
+ (reg:QI 10))]
+ "{
+ operands[3] = ip2k_get_low_half (operands[0], QImode);
+ operands[4] = ip2k_get_high_half (operands[0], QImode);
+ }")
+
+(define_insn "*mulhi3_by2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "0, rS, ro")
+ (zero_extend:HI (const_int 2))))]
+ ""
+ "@
+ clrb\\tSTATUS,0\;rl\\t%L0\;rl\\t%H0
+ clrb\\tSTATUS,0\;rl\\tw,%L1\;mov\\t%L0,w\;rl\\tw,%H1\;mov\\t%H0,w
+ clrb\\tSTATUS,0\;rl\\tw,%L1\;mov\\t%L0,w\;rl\\tw,%H1\;mov\\t%H0,w"
+ [(set_attr "clobberw" "no,yes,yes")])
+
+(define_insn "*mulhi3_byqi"
+ [(set (match_operand:HI
+ 0 "nonimmediate_operand" "=ro,&ro,&rS, &rS")
+ (mult:HI (match_operand:HI
+ 1 "nonimmediate_operand" "0, rS, ro, rS")
+ (zero_extend:HI (match_operand:QI
+ 2 "general_operand" "rSi,rSi,rSi,roRi"))))]
+ ""
+ "@
+ mov\\tw,%L1\;mulu\\tw,%2\;mov\\t%L0,w\;push\\tmulh%<\;mov\\tw,%H1\;mulu\\tw,%2\;pop\\t%H0%>\;add\\t%H0,w
+ mov\\tw,%L1\;mulu\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w\;mov\\tw,%H1\;mulu\\tw,%2\;add\\t%H0,w
+ mov\\tw,%L1\;mulu\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w\;mov\\tw,%H1\;mulu\\tw,%2\;add\\t%H0,w
+ mov\\tw,%L1\;mulu\\tw,%2\;mov\\t%L0,w\;mov\\tw,mulh\;mov\\t%H0,w\;mov\\tw,%H1\;mulu\\tw,%2\;add\\t%H0,w")
+
+(define_insn "smulqi_highpart"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR, rS, rS")
+ (truncate:QI
+ (lshiftrt:HI
+ (mult:HI
+ (sign_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "%rS,roR, rS"))
+ (sign_extend:HI
+ (match_operand:QI 2 "general_operand" "rSi,rSi,roRi")))
+ (const_int 8))))]
+ ""
+ "@
+ mov\\tw,%1\;muls\\tw,%2\;mov\\tw,mulh\;mov %0,w
+ mov\\tw,%1\;muls\\tw,%2\;mov\\tw,mulh\;mov %0,w
+ mov\\tw,%1\;muls\\tw,%2\;mov\\tw,mulh\;mov %0,w")
+
+(define_insn "umulqi_highpart"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR, rS, rS")
+ (truncate:QI
+ (lshiftrt:HI
+ (mult:HI
+ (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "%rS,roR, rS"))
+ (zero_extend:HI
+ (match_operand:QI 2 "general_operand" "rSi,rSi,roRi")))
+ (const_int 8))))]
+ ""
+ "@
+ mov\\tw,%1\;mulu\\tw,%2\;mov\\tw,mulh\;mov %0,w
+ mov\\tw,%1\;mulu\\tw,%2\;mov\\tw,mulh\;mov %0,w
+ mov\\tw,%1\;mulu\\tw,%2\;mov\\tw,mulh\;mov %0,w")
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=uo, uS, uS")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "%rS, ro, rS")
+ (match_operand:HI 2 "general_operand" "rSi,rSi,roi")))]
+ ""
+ "push\\t%L2%<\;push\\t%H2%<\;push\\t%L1%<\;push\\t%H1%>\;page\\t__mulhi3\;call\\t__mulhi3\;pop\\t%H0%>\;pop\\t%L0%>")
+
+;; If we find that we're multiplying by a constant that's less than 256 we
+;; can replace a full "mulhi3" with one of the lighter weight variants
+;; that multiplies an HImode value by a QImode one.
+;;
+(define_split
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,rS")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "rS,ro")
+ (match_operand:HI 2 "const_int_operand" "P, P")))]
+ "(INTVAL (operands[2]) < 0x100)"
+ [(set (match_dup 0)
+ (mult:HI (match_dup 1)
+ (zero_extend:HI (match_dup 3))))]
+ "operands[3] = gen_int_mode (INTVAL (operands[2]), QImode);")
+
+;;
+;; Divide/Modulus functions.
+;;
+
+(define_expand "udivmodhi4"
+ [(parallel [(set (reg:HI 128)
+ (udiv:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (set (reg:HI 130)
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 132))
+ (clobber (reg:QI 133))])
+ (set (match_operand:HI 0 "general_operand" "") (reg:HI 128))
+ (set (match_operand:HI 3 "general_operand" "") (reg:HI 130))]
+ ""
+ "")
+
+(define_insn "*udivmodhi4_call"
+ [(set (reg:HI 128)
+ (udiv:HI (match_operand:HI 0 "general_operand" "uSi,uoi")
+ (match_operand:HI 1 "general_operand" "uoi,uSi")))
+ (set (reg:HI 130)
+ (umod:HI (match_dup 0) (match_dup 1)))
+ (clobber (reg:QI 132))
+ (clobber (reg:QI 133))]
+ ""
+ "push\\t%L1%<\;push\\t%H1%<\;push\\t%L0%<\;push\\t%H0%>%>%>\;page\\t__udivmodhi4\;call\\t__udivmodhi4")
+
+(define_expand "divmodhi4"
+ [(parallel [(set (reg:HI 128)
+ (div:HI (match_operand:HI 1 "general_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (set (reg:HI 130)
+ (mod:HI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:QI 132))
+ (clobber (reg:QI 133))
+ (clobber (reg:QI 134))
+ (clobber (reg:QI 135))])
+ (set (match_operand:HI 0 "general_operand" "") (reg:HI 128))
+ (set (match_operand:HI 3 "general_operand" "") (reg:HI 130))]
+ ""
+ "")
+
+(define_insn "*divmodhi4_call"
+ [(set (reg:HI 128)
+ (div:HI (match_operand:HI 0 "general_operand" "uSi,uoi")
+ (match_operand:HI 1 "general_operand" "uoi,uSi")))
+ (set (reg:HI 130)
+ (mod:HI (match_dup 0) (match_dup 1)))
+ (clobber (reg:QI 132))
+ (clobber (reg:QI 133))
+ (clobber (reg:QI 134))
+ (clobber (reg:QI 135))]
+ ""
+ "push\\t%L1%<\;push\\t%H1%<\;push\\t%L0%<\;push\\t%H0%>%>%>\;page\\t__divmodhi4\;call\\t__divmodhi4")
+
+(define_expand "udivmodsi4"
+ [(parallel [(set (reg:SI 128)
+ (udiv:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (set (reg:SI 132)
+ (umod:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:QI 136))
+ (clobber (reg:QI 137))
+ (clobber (reg:QI 138))
+ (clobber (reg:QI 139))])
+ (set (match_operand:SI 0 "general_operand" "") (reg:SI 128))
+ (set (match_operand:SI 3 "general_operand" "") (reg:SI 132))]
+ ""
+ "")
+
+(define_insn "*udivmodsi4_call"
+ [(set (reg:SI 128)
+ (udiv:SI (match_operand:SI 0 "general_operand" "rSi,roi")
+ (match_operand:SI 1 "general_operand" "roi,rSi")))
+ (set (reg:SI 132)
+ (umod:SI (match_dup 0)
+ (match_dup 1)))
+ (clobber (reg:QI 136))
+ (clobber (reg:QI 137))
+ (clobber (reg:QI 138))
+ (clobber (reg:QI 139))]
+ ""
+ "push\\t%D1%<\;push\\t%C1%<\;push\\t%B1%<\;push\\t%A1%<\;push\\t%D0%<\;push\\t%C0%<\;push\\t%B0%<\;push\\t%A0%>%>%>%>%>%>%>\;page\\t__udivmodsi4\;call\\t__udivmodsi4")
+
+(define_expand "divmodsi4"
+ [(parallel [(set (reg:SI 128)
+ (div:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")))
+ (set (reg:SI 132)
+ (mod:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:QI 136))
+ (clobber (reg:QI 137))
+ (clobber (reg:QI 138))
+ (clobber (reg:QI 139))
+ (clobber (reg:QI 140))
+ (clobber (reg:QI 141))])
+ (set (match_operand:SI 0 "general_operand" "") (reg:SI 128))
+ (set (match_operand:SI 3 "general_operand" "") (reg:SI 132))]
+ ""
+ "")
+
+(define_insn "*divmodsi4_call"
+ [(set (reg:SI 128)
+ (div:SI (match_operand:SI 0 "general_operand" "rSn,ron")
+ (match_operand:SI 1 "general_operand" "ron,rSn")))
+ (set (reg:SI 132)
+ (mod:SI (match_dup 0)
+ (match_dup 1)))
+ (clobber (reg:QI 136))
+ (clobber (reg:QI 137))
+ (clobber (reg:QI 138))
+ (clobber (reg:QI 139))
+ (clobber (reg:QI 140))
+ (clobber (reg:QI 141))]
+ ""
+ "push\\t%D1%<\;push\\t%C1%<\;push\\t%B1%<\;push\\t%A1%<\;push\\t%D0%<\;push\\t%C0%<\;push\\t%B0%<\;push\\t%A0%>%>%>%>%>%>%>\;page\\t__divmodsi4\;call\\t__divmodsi4")
+
+(define_expand "udivmoddi4"
+ [(parallel [(set (reg:DI 128)
+ (udiv:DI (match_operand:DI 1 "general_operand" "")
+ (match_operand:DI 2 "general_operand" "")))
+ (set (reg:DI 136)
+ (umod:DI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:QI 144))
+ (clobber (reg:QI 145))
+ (clobber (reg:QI 146))
+ (clobber (reg:QI 147))
+ (clobber (reg:QI 148))
+ (clobber (reg:QI 149))
+ (clobber (reg:QI 150))
+ (clobber (reg:QI 151))])
+ (set (match_operand:DI 0 "general_operand" "") (reg:DI 128))
+ (set (match_operand:DI 3 "general_operand" "") (reg:DI 136))]
+ ""
+ "")
+
+(define_insn "*udivmoddi4_call"
+ [(set (reg:DI 128)
+ (udiv:DI (match_operand:DI 0 "general_operand" "rSi,roi")
+ (match_operand:DI 1 "general_operand" "roi,rSi")))
+ (set (reg:DI 136)
+ (umod:DI (match_dup 0)
+ (match_dup 1)))
+ (clobber (reg:QI 144))
+ (clobber (reg:QI 145))
+ (clobber (reg:QI 146))
+ (clobber (reg:QI 147))
+ (clobber (reg:QI 148))
+ (clobber (reg:QI 149))
+ (clobber (reg:QI 150))
+ (clobber (reg:QI 151))]
+ ""
+ "push\\t%Z1%<\;push\\t%Y1%<\;push\\t%X1%<\;push\\t%W1%<\;push\\t%V1%<\;push\\t%U1%<\;push\\t%T1%<\;push\\t%S1%<\;push\\t%Z0%<\;push\\t%Y0%<\;push\\t%X0%<\;push\\t%W0%<\;push\\t%V0%<\;push\\t%U0%<\;push\\t%T0%<\;push\\t%S00%>%>%>%>%>%>%>%>%>%>%>%>%>%>%>\;page\\t__udivmoddi4\;call\\t__udivmoddi4")
+
+(define_expand "divmoddi4"
+ [(parallel [(set (reg:DI 128)
+ (div:DI (match_operand:DI 1 "general_operand" "")
+ (match_operand:DI 2 "general_operand" "")))
+ (set (reg:DI 136)
+ (mod:DI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:QI 144))
+ (clobber (reg:QI 145))
+ (clobber (reg:QI 146))
+ (clobber (reg:QI 147))
+ (clobber (reg:QI 148))
+ (clobber (reg:QI 149))
+ (clobber (reg:QI 150))
+ (clobber (reg:QI 151))])
+ (set (match_operand:DI 0 "general_operand" "") (reg:DI 128))
+ (set (match_operand:DI 3 "general_operand" "") (reg:DI 136))]
+ ""
+ "")
+
+(define_insn "*divmoddi4_call"
+ [(set (reg:DI 128)
+ (div:DI (match_operand:DI 0 "general_operand" "rSn,ron")
+ (match_operand:DI 1 "general_operand" "ron,rSn")))
+ (set (reg:DI 136)
+ (mod:DI (match_dup 0)
+ (match_dup 1)))
+ (clobber (reg:QI 144))
+ (clobber (reg:QI 145))
+ (clobber (reg:QI 146))
+ (clobber (reg:QI 147))
+ (clobber (reg:QI 148))
+ (clobber (reg:QI 149))
+ (clobber (reg:QI 150))
+ (clobber (reg:QI 151))]
+ ""
+ "push\\t%Z1%<\;push\\t%Y1%<\;push\\t%X1%<\;push\\t%W1%<\;push\\t%V1%<\;push\\t%U1%<\;push\\t%T1%<\;push\\t%S1%<\;push\\t%Z0%<\;push\\t%Y0%<\;push\\t%X0%<\;push\\t%W0%<\;push\\t%V0%<\;push\\t%U0%<\;push\\t%T0%<\;push\\t%S00%>%>%>%>%>%>%>%>%>%>%>%>%>%>%>\;page\\t__divmoddi4\;call\\t__divmoddi4")
+
+;;
+;; Arithmetic shift left instructions.
+;;
+
+(define_insn "ashlqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=roR,roR, rS,roR, rS")
+ (ashift:QI
+ (match_operand:QI 1 "nonimmediate_operand" "0, rS,roR, 0, 0")
+ (match_operand:QI 2 "general_operand" "N, L, L, rS,roR")))]
+ ""
+ "@
+ clrb status,0\;rl\\t%0
+ mov\\tw,%e2\;mulu\\tw,%1\;mov\\t%0,w
+ mov\\tw,%e2\;mulu\\tw,%1\;mov\\t%0,w
+ mov\\tw,%2\;snz\;page\\t1f\;jmp\\t1f\;2:clrb\\tstatus,0\;rl\\t%0\;decsz\\twreg\;page\\t2b\;jmp\\t2b\;1:
+ mov\\tw,%2\;snz\;page\\t1f\;jmp\\t1f\;2:clrb\\tstatus,0\;rl\\t%0\;decsz\\twreg\;page\\t2b\;jmp\\t2b\;1:"
+ [(set_attr "clobberw" "no,yes,yes,yes,yes")])
+
+;; Convert simple fixed-size shift of a zero-extended QImode value into a
+;; multiply as our multiplier is much faster. We also do this so that the
+;; multiply can possibly be merged into a much faster multiply-and-accumulate
+;; operation.
+;;
+(define_split
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS")
+ (ashift:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "rS,roR"))
+ (match_operand:QI 2 "const_int_operand" "J, J")))]
+ "(INTVAL (operands[2]) < 8)"
+ [(set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 3))))]
+ "operands[3] = gen_int_mode (1 << INTVAL (operands[2]), QImode);")
+
+(define_insn_and_split "*ashlhi3_by8_zero_extend"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS")
+ (ashift:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "rS,roR"))
+ (const_int 8)))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], QImode);
+ operands[3] = ip2k_get_low_half (operands[0], QImode);
+ }")
+
+(define_insn "*ashlhi3_zero_extend" ; 0 1
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS")
+ (ashift:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "rS,roR"))
+ (match_operand:QI 2 "const_int_operand" "n, n")))]
+ ""
+ "*{
+ if (INTVAL (operands[2]) < 8)
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %H0, w);
+ else
+ {
+ operands[3] = GEN_INT (INTVAL (operands[2]) - 8);
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mulu, w, %e3) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS1 (clr, %L0);
+ }
+ }")
+
+;; Convert simple fixed-size shift of a HImode value into a multiply as
+;; our multiplier is much faster. We also do this so that the multiply can
+;; possibly be merged into a much faster multiply-and-accumulate operation.
+;;
+(define_split
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,rS")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "rS,ro")
+ (match_operand:QI 2 "const_int_operand" "J, J")))]
+ "(INTVAL (operands[2]) < 8)"
+ [(set (match_dup 0)
+ (mult:HI (match_dup 1)
+ (zero_extend:HI (match_dup 3))))]
+ "operands[3] = gen_int_mode (1 << INTVAL (operands[2]), QImode);")
+
+(define_insn_and_split "ashlhi3_split"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,rS")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "rS,ro")
+ (match_operand:QI 2 "const_int_operand" "n, n")))]
+ "(INTVAL (operands[2]) >= 8)"
+ "#"
+ "&& ip2k_reorg_split_himode"
+ [(set (match_dup 4) (const_int 0))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_low_half (operands[0], QImode);
+ operands[5] = ip2k_get_low_half (operands[1], QImode);
+
+ if (INTVAL (operands[2]) == 8)
+ emit_insn (gen_movqi (operands[3], operands[5]));
+ else
+ {
+ operands[6] = gen_int_mode (INTVAL (operands[2]) - 8, QImode);
+ emit_insn (gen_ashlqi3 (operands[3], operands[5], operands[6]));
+ }
+ }")
+
+(define_insn "ashlhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&rS,&ro,ro, rS")
+ (ashift:HI
+ (match_operand:HI 1 "nonimmediate_operand" "0, ro, rS, 0, 0")
+ (match_operand:QI 2 "general_operand" "L, L, L,rS,roR")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %L0) CR_TAB
+ AS1 (rl, %H0);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %L0) CR_TAB
+ AS1 (rl, %H0) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %L0) CR_TAB
+ AS1 (rl, %H0);
+
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (push, MULH%<) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (or, 1(SP), w) CR_TAB
+ AS1 (pop, %H0%>);
+
+ case 7:
+ return AS1 (rr, %H0) CR_TAB
+ AS2 (mov, w, %L0) CR_TAB
+ AS1 (clr, %L0) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ default:
+ /* Should be caught by a different insn pattern */
+ abort ();
+ }
+
+ case 1:
+ case 2:
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rl, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (rl, w, %H1) CR_TAB
+ AS2 (mov, %H0, w);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rl, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (rl, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %L0) CR_TAB
+ AS1 (rl, %H0);
+
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (push, MULH%<) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (or, 1(SP), w) CR_TAB
+ AS1 (pop, %H0%>);
+
+ case 7:
+ return AS2 (rr, w, %H1) CR_TAB
+ AS2 (mov, w, %L1) CR_TAB
+ AS1 (clr, %L0) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ default:
+ /* Should be caught by a different insn pattern */
+ abort ();
+ }
+
+ case 3:
+ case 4:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %L0) CR_TAB
+ AS1 (rl, %H0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ default:
+ abort();
+ }
+ }")
+
+(define_insn_and_split "*ashlsi3_by16_zero_extend"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,rS")
+ (ashift:SI (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "rS,ro"))
+ (const_int 16)))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], HImode);
+ operands[3] = ip2k_get_low_half (operands[0], HImode);
+ }")
+
+(define_insn_and_split "ashlsi3_split"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "0, rS, ro")
+ (match_operand:QI 2 "const_int_operand" "n, n, n")))]
+ "(INTVAL (operands[2]) >= 16)"
+ "#"
+ "&& ip2k_reorg_split_simode"
+ [(const_int 0)]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_low_half (operands[0], HImode);
+ operands[5] = ip2k_get_low_half (operands[1], HImode);
+
+ if (INTVAL (operands[2]) == 16)
+ {
+ emit_insn (gen_movhi (operands[3], operands[5]));
+ emit_insn (gen_movhi (operands[4], GEN_INT (0)));
+ }
+ else
+ {
+ operands[6] = GEN_INT (INTVAL (operands[2]) - 16);
+ emit_insn (gen_ashlhi3 (operands[3], operands[5], operands[6]));
+ emit_insn (gen_movhi (operands[4], GEN_INT (0)));
+ }
+ }")
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS,ro,&ro,&rS")
+ (ashift:SI
+ (match_operand:SI 1 "nonimmediate_operand" "0, 0, 0, rS, ro")
+ (match_operand:QI 2 "general_operand" "L,roR,rS, L, L")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0);
+
+ case 8:
+ return AS2 (mov, w, %B0) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %C0) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %D0) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, %D0);
+
+ case 16:
+ return AS2 (mov, w, %C0) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %D0) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0);
+
+ case 23:
+ return AS2 (rr, w, %C0) CR_TAB
+ AS2 (mov, w, %D0) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0);
+
+ case 24:
+ return AS2 (mov, w, %D0) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0);
+
+ case 31:
+ return AS2 (rr, w, %D0) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (rr, %A0);
+
+ default:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+
+ case 1:
+ case 2:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ case 3:
+ case 4:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rl, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (rl, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rl, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rl, w, %A1) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rl, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (rl, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rl, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rl, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0);
+
+ case 8:
+ return AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, %D0);
+
+ case 16:
+ return AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0);
+
+ case 23:
+ return AS2 (rr, w, %C1) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0);
+
+ case 24:
+ return AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0);
+
+ case 31:
+ return AS2 (rr, w, %D1) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (rr, %A0);
+
+ default:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0) CR_TAB
+ AS1 (rl, %B0) CR_TAB
+ AS1 (rl, %A0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+ }
+ }")
+
+(define_insn_and_split "ashldi3_split"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "0, rS, ro")
+ (match_operand:QI 2 "const_int_operand" "n, n, n")))]
+ "((INTVAL (operands[2]) >= 32) || (INTVAL (operands[2]) == 16))"
+ "#"
+ "&& ip2k_reorg_split_dimode"
+ [(const_int 0)]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_low_half (operands[0], SImode);
+ operands[5] = ip2k_get_low_half (operands[1], SImode);
+
+ if (INTVAL (operands[2]) == 16)
+ {
+ operands[6] = ip2k_get_high_half (operands[1], SImode);
+ operands[7] = ip2k_get_high_half (operands[3], HImode);
+ operands[8] = ip2k_get_low_half (operands[3], HImode);
+ operands[9] = ip2k_get_high_half (operands[4], HImode);
+ operands[10] = ip2k_get_low_half (operands[4], HImode);
+ operands[11] = ip2k_get_low_half (operands[6], HImode);
+ operands[12] = ip2k_get_high_half (operands[5], HImode);
+ operands[13] = ip2k_get_low_half (operands[5], HImode);
+ emit_insn (gen_movhi (operands[7], operands[11]));
+ emit_insn (gen_movhi (operands[8], operands[12]));
+ emit_insn (gen_movhi (operands[9], operands[13]));
+ emit_insn (gen_movhi (operands[10], GEN_INT (0)));
+ }
+ else if (INTVAL (operands[2]) == 32)
+ {
+ emit_insn (gen_movsi (operands[3], operands[5]));
+ emit_insn (gen_movsi (operands[4], GEN_INT (0)));
+ }
+ else
+ {
+ operands[6] = GEN_INT (INTVAL (operands[2]) - 32);
+ emit_insn (gen_ashlsi3 (operands[3], operands[5], operands[6]));
+ emit_insn (gen_movsi (operands[4], GEN_INT (0)));
+ }
+ }")
+
+;;
+;; Arithmetic shift right instructions.
+;;
+
+(define_expand "ashrqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
+ ""
+ "if (operands[2] == const0_rtx)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+ ")
+
+(define_insn "*ashrqi3"
+ [(set
+ (match_operand:QI 0 "nonimmediate_operand" "=roR,roR, rS,roR, rS,roR, rS")
+ (ashiftrt:QI
+ (match_operand:QI 1 "nonimmediate_operand" "0, 0, 0, rS,roR, rS,roR")
+ (match_operand:QI 2 "general_operand" "N, rS,roR, N, N, L, L")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ return AS2 (rl, w, %0) CR_TAB
+ AS1 (rr, %0);
+
+ case 3:
+ case 4:
+ return AS2 (rl, w, %1) CR_TAB /* dup the sign bit */
+ AS2 (rr, w, %1) CR_TAB
+ AS2 (mov, %0, w);
+
+ case 5:
+ case 6:
+ /* Do >> by left-shifting partially into MULH. */
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (muls, w, %e2) CR_TAB
+ AS2 (mov, w, mulh) CR_TAB
+ AS2 (mov, %0, w);
+
+ case 1:
+ case 2:
+ default:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+ }
+ }")
+
+(define_insn "ashrhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&rS,&ro,ro, rS")
+ (ashiftrt:HI
+ (match_operand:HI 1 "nonimmediate_operand" "0, ro, rS, 0, 0")
+ (match_operand:QI 2 "general_operand" "L, L, L,rS,roR")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (rl, w, %H0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 2:
+ return AS2 (rl, w, %H0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS2 (rl, w, %H0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 8:
+ return AS2 (mov, w, %H0) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (clr, %H0) CR_TAB
+ AS2 (snb, %L0, 7) CR_TAB
+ AS1 (not, %H0);
+
+ default:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %H0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+
+ case 1:
+ case 2:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (rl, w, %H1) CR_TAB
+ AS2 (rr, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (rr, w, %L1) CR_TAB
+ AS2 (mov, %L0, w);
+
+ case 2:
+ return AS2 (rl, w, %H1) CR_TAB
+ AS2 (rr, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (rr, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (rl, w, %H0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 8:
+ return AS2 (mov, w, %H1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (clr, %H0) CR_TAB
+ AS2 (snb, %L0, 7) CR_TAB
+ AS1 (not, %H0);
+
+ default:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %H0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+
+ case 3:
+ case 4:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %H0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ default:
+ abort();
+ }
+ }")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS,ro,&ro,&rS")
+ (ashiftrt:SI
+ (match_operand:SI 1 "nonimmediate_operand" "0, 0, 0, rS, ro")
+ (match_operand:QI 2 "general_operand" "L,roR,rS, L, L")))]
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (rl, w, %A0) CR_TAB /* dup the sign bit */
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 2:
+ return AS2 (rl, w, %A0) CR_TAB /* dup the sign bit */
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS2 (rl, w, %A0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 8:
+ return AS2 (mov, w, %C0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %B0) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS2 (snb, %B0, 7) CR_TAB
+ AS1 (not, %A0);
+
+ case 16:
+ return AS2 (mov, w, %B0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %C0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 23:
+ return AS2 (rl, w, %B0) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %D0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0);
+
+ case 24:
+ return AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %D0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 31:
+ return AS2 (rl, w, %A0) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %A0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (rl, %D0);
+
+ default:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %A0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, WREG) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+
+ case 1:
+ case 2:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %A0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, WREG) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ case 3:
+ case 4:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (rl, w, %A1) CR_TAB /* dup the sign bit */
+ AS2 (rr, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (rr, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rr, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rr, w, %D1) CR_TAB
+ AS2 (mov, %D0, w);
+
+ case 2:
+ return AS2 (rl, w, %A1) CR_TAB /* dup the sign bit */
+ AS2 (rr, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (rr, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rr, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rr, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (rl, w, %A0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 8:
+ return AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS2 (snb, %B0, 7) CR_TAB
+ AS1 (not, %A0);
+
+ case 16:
+ return AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %C0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 23:
+ return AS2 (rl, w, %B1) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %D0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0);
+
+ case 24:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %D0, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w);
+
+ case 31:
+ return AS2 (rl, w, %A1) CR_TAB
+ AS1 (clr, WREG) CR_TAB
+ AS2 (snb, %A1, 7) CR_TAB
+ AS1 (not, WREG) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS1 (rl, %D0);
+
+ default:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (setb, status, 0) CR_TAB
+ AS2 (sb, %A0, 7) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+ }
+ }")
+
+;;
+;; Logical shift right instructions.
+;;
+
+(define_insn "lshrqi3"
+ [(set (match_operand:QI
+ 0 "nonimmediate_operand" "=roR, rS,roR,roR, rS,&roR,roR, rS")
+ (lshiftrt:QI
+ (match_operand:QI
+ 1 "nonimmediate_operand" "0, 0, 0, rS,roR, rS, rS,roR")
+ (match_operand:QI
+ 2 "general_operand" "N,roR, rS, N, N, rS, L, L")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %0);
+
+ case 1:
+ case 2:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ case 3:
+ case 4:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rr, w, %1) CR_TAB
+ AS2 (mov, %0, w);
+
+ case 5:
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mov, %0, w) CR_TAB
+ AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,)
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ case 6:
+ case 7:
+ /* Do >> by left-shifting partially into MULH. */
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, w, mulh) CR_TAB
+ AS2 (mov, %0, w);
+ }
+ }")
+
+(define_insn_and_split "lshrhi3_split"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,rS")
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "rS,ro")
+ (match_operand:QI 2 "const_int_operand" "n, n")))]
+ "(INTVAL (operands[2]) >= 8)"
+ "#"
+ "&& ip2k_reorg_split_himode"
+ [(const_int 0)]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_low_half (operands[0], QImode);
+ operands[5] = ip2k_get_high_half (operands[1], QImode);
+
+ if (INTVAL (operands[2]) == 8)
+ emit_insn (gen_movqi (operands[4], operands[5]));
+ else
+ {
+ operands[6] = GEN_INT (INTVAL (operands[2]) - 8);
+ emit_insn (gen_lshrqi3 (operands[4], operands[5], operands[6]));
+ }
+ emit_insn (gen_movqi (operands[3], GEN_INT (0)));
+ }")
+
+(define_insn "lshrhi3" ; 0 1 2 3 4
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&rS,&ro,ro, rS")
+ (lshiftrt:HI
+ (match_operand:HI 1 "nonimmediate_operand" " 0, ro, rS, 0, 0")
+ (match_operand:QI 2 "general_operand" "L, L, L,rS,roR")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return AS2 (mov, w, %L0) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, %H0) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (or, %L0, w) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %H0, w);
+
+ default:
+ /* Should be caught by a different insn pattern */
+ abort ();
+ }
+
+ case 1:
+ case 2:
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rr, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (rr, w, %L1) CR_TAB
+ AS2 (mov, %L0, w);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rr, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (rr, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0);
+
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ operands[2] = GEN_INT (8 - INTVAL (operands[2]));
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %e2) CR_TAB
+ AS2 (or, %L0, w) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %H0, w);
+
+ default:
+ /* Should be caught by a different insn pattern */
+ abort ();
+ }
+
+ case 3:
+ case 4:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %H0) CR_TAB
+ AS1 (rr, %L0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ default:
+ abort();
+ }
+ }")
+
+(define_insn_and_split "lshrsi3_split"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0, rS, ro")
+ (match_operand:QI 2 "const_int_operand" "n, n, n")))]
+ "(INTVAL (operands[2]) >= 16)"
+ "#"
+ "&& ip2k_reorg_split_simode"
+ [(const_int 0)]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_low_half (operands[0], HImode);
+ operands[5] = ip2k_get_high_half (operands[1], HImode);
+
+ if (INTVAL (operands[2]) == 16)
+ emit_insn (gen_movhi (operands[4], operands[5]));
+ else
+ {
+ operands[6] = GEN_INT (INTVAL (operands[2]) - 16);
+ emit_insn (gen_lshrhi3 (operands[4], operands[5], operands[6]));
+ }
+ emit_insn (gen_movhi (operands[3], GEN_INT (0)));
+ }")
+
+;; This occurs frequently in supporting FP among other things,
+;; and out-of-line is almost as big as inline, so....
+;;
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS,ro,&ro,&rS")
+ (lshiftrt:SI
+ (match_operand:SI 1 "nonimmediate_operand" "0, 0, 0, rS, ro")
+ (match_operand:QI 2 "general_operand" "L,roR,rS, L, L")))]
+
+ ""
+ "*{
+ switch (which_alternative) {
+ case 0:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 8:
+ return AS2 (mov, w, %C0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %B0) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %A0);
+
+ case 16:
+ return AS2 (mov, w, %B0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0);
+
+ case 23:
+ return AS2 (rl, w, %B0) CR_TAB
+ AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0);
+
+ case 24:
+ return AS2 (mov, w, %A0) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0);
+
+ case 31:
+ return AS2 (rl, w, %A0) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (rl, %D0);
+
+ default:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+
+ case 1:
+ case 2:
+ return AS2 (mov, w, %2) CR_TAB
+ AS1 (snz,) CR_TAB
+ AS1 (page, 2f) CR_TAB
+ AS1 (jmp, 2f) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b) CR_TAB
+ AS1 (2:,);
+
+ case 3:
+ case 4:
+ switch (INTVAL (operands[2])) {
+ case 1:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rr, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (rr, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rr, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rr, w, %D1) CR_TAB
+ AS2 (mov, %D0, w);
+
+ case 2:
+ return AS2 (clrb, status, 0) CR_TAB
+ AS2 (rr, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (rr, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (rr, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (rr, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0);
+
+ case 8:
+ return AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS1 (clr, %A0);
+
+ case 16:
+ return AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0);
+
+ case 23:
+ return AS2 (rl, w, %B1) CR_TAB
+ AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (rl, %D0) CR_TAB
+ AS1 (rl, %C0);
+
+ case 24:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0);
+
+ case 31:
+ return AS2 (rl, w, %A1) CR_TAB
+ AS1 (clr, %D0) CR_TAB
+ AS1 (clr, %C0) CR_TAB
+ AS1 (clr, %B0) CR_TAB
+ AS1 (clr, %A0) CR_TAB
+ AS1 (rl, %D0);
+
+ default:
+ return AS2 (mov, w, %A1) CR_TAB
+ AS2 (mov, %A0, w) CR_TAB
+ AS2 (mov, w, %B1) CR_TAB
+ AS2 (mov, %B0, w) CR_TAB
+ AS2 (mov, w, %C1) CR_TAB
+ AS2 (mov, %C0, w) CR_TAB
+ AS2 (mov, w, %D1) CR_TAB
+ AS2 (mov, %D0, w) CR_TAB
+ AS2 (mov, w, %2) CR_TAB
+ AS1 (1:,) CR_TAB
+ AS2 (clrb, status, 0) CR_TAB
+ AS1 (rr, %A0) CR_TAB
+ AS1 (rr, %B0) CR_TAB
+ AS1 (rr, %C0) CR_TAB
+ AS1 (rr, %D0) CR_TAB
+ AS1 (decsz, wreg) CR_TAB
+ AS1 (page, 1b) CR_TAB
+ AS1 (jmp, 1b);
+ }
+ }
+ }")
+
+(define_insn_and_split "lshrdi3_split"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "0, rS, ro")
+ (match_operand:QI 2 "const_int_operand" "n, n, n")))]
+ "((INTVAL (operands[2]) >= 32) || (INTVAL (operands[2]) == 16))"
+ "#"
+ "&& ip2k_reorg_split_dimode"
+ [(const_int 0)]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_low_half (operands[0], SImode);
+ operands[5] = ip2k_get_high_half (operands[1], SImode);
+
+ if (INTVAL (operands[2]) == 16)
+ {
+ operands[6] = ip2k_get_low_half (operands[1], SImode);
+ operands[7] = ip2k_get_high_half (operands[3], HImode);
+ operands[8] = ip2k_get_low_half (operands[3], HImode);
+ operands[9] = ip2k_get_high_half (operands[4], HImode);
+ operands[10] = ip2k_get_low_half (operands[4], HImode);
+ operands[11] = ip2k_get_high_half (operands[6], HImode);
+ operands[12] = ip2k_get_low_half (operands[5], HImode);
+ operands[13] = ip2k_get_high_half (operands[5], HImode);
+ emit_insn (gen_movhi (operands[10], operands[11]));
+ emit_insn (gen_movhi (operands[9], operands[12]));
+ emit_insn (gen_movhi (operands[8], operands[13]));
+ emit_insn (gen_movhi (operands[7], GEN_INT(0)));
+ }
+ else if (INTVAL (operands[2]) == 32)
+ {
+ emit_insn (gen_movsi (operands[4], operands[5]));
+ emit_insn (gen_movsi (operands[3], GEN_INT (0)));
+ }
+ else
+ {
+ operands[6] = GEN_INT (INTVAL (operands[2]) - 32);
+ emit_insn (gen_lshrsi3 (operands[4], operands[5], operands[6]));
+ emit_insn (gen_movsi (operands[3], GEN_INT (0)));
+ }
+ }")
+
+;;
+;; Absolute value conversion instructions.
+;;
+
+(define_insn "absqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g")
+ (abs:QI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "mov\\tw,%1\;snb\\twreg,7\;sub\\tw,#0\;mov\\t%0,w")
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=ro")
+ (abs:SF (match_operand:SF 1 "nonimmediate_operand" "0")))]
+ ""
+ "clrb %A0,7"
+ [(set_attr "clobberw" "no")])
+
+;;
+;; Negate (X = 0 - Y) instructions.
+;;
+
+(define_insn_and_split "negqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (neg:QI (match_operand:QI 1 "nonimmediate_operand" "0, rS, ro")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0)
+ (not:QI (match_dup 1)))
+ (set (match_dup 0)
+ (plus:QI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+(define_insn_and_split "neghi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (neg:HI (match_operand:HI 1 "nonimmediate_operand" "0, rS, ro")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0)
+ (not:HI (match_dup 1)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+(define_insn_and_split "negsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (neg:SI (match_operand:SI 1 "nonimmediate_operand" "0, rS, ro")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0)
+ (not:SI (match_dup 1)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+(define_insn_and_split "negdi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (neg:DI (match_operand:DI 1 "nonimmediate_operand" "0, rS, ro")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0)
+ (not:DI (match_dup 1)))
+ (set (match_dup 0)
+ (plus:DI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;;
+;; Bitwise not (one's complement) instructions.
+;;
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=g,roR, rS")
+ (not:QI (match_operand:QI 1 "general_operand" "0, rS,roR")))]
+ ""
+ "@
+ not\\t%0
+ not\\tw,%1\;mov\\t%0,w
+ not\\tw,%1\;mov\\t%0,w"
+ [(set_attr "skip" "yes,no,no")
+ (set_attr "clobberw" "no,yes,yes")])
+
+(define_insn_and_split "one_cmplhi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (not:HI (match_operand:HI 1 "general_operand" "0, rS, ro")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_himode)"
+ [(set (match_dup 3)
+ (not:QI (match_dup 4)))
+ (set (match_dup 5)
+ (not:QI (match_dup 6)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], QImode);
+ operands[4] = ip2k_get_high_half (operands[1], QImode);
+ operands[5] = ip2k_get_low_half (operands[0], QImode);
+ operands[6] = ip2k_get_low_half (operands[1], QImode);
+ }")
+
+(define_insn_and_split "one_cmplsi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (not:SI (match_operand:SI 1 "general_operand" "0, rS, ro")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_simode)"
+ [(set (match_dup 3)
+ (not:HI (match_dup 4)))
+ (set (match_dup 5)
+ (not:HI (match_dup 6)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], HImode);
+ operands[4] = ip2k_get_high_half (operands[1], HImode);
+ operands[5] = ip2k_get_low_half (operands[0], HImode);
+ operands[6] = ip2k_get_low_half (operands[1], HImode);
+ }")
+
+(define_insn_and_split "one_cmpldi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro,&ro,&rS")
+ (not:DI (match_operand:DI 1 "general_operand" "0, rS, ro")))]
+ ""
+ "#"
+ "(ip2k_reorg_split_dimode)"
+ [(set (match_dup 3)
+ (not:SI (match_dup 4)))
+ (set (match_dup 5)
+ (not:SI (match_dup 6)))]
+ "{
+ operands[3] = ip2k_get_high_half (operands[0], SImode);
+ operands[4] = ip2k_get_high_half (operands[1], SImode);
+ operands[5] = ip2k_get_low_half (operands[0], SImode);
+ operands[6] = ip2k_get_low_half (operands[1], SImode);
+ }")
+
+;;
+;; Sign extension instructions.
+;;
+
+(define_insn "*push_extendqihi2"
+ [(set (match_operand:HI 0 "push_operand" "=<,<")
+ (sign_extend:HI (match_operand:QI 1 "general_operand" "roR,n")))]
+ ""
+ "@
+ push\\t%1%<\;push\\t#0%<\;snb\\t%1,7\;not\\t1(SP)%>%>
+ push\\t%L1\;push\\t%H1"
+ [(set_attr "clobberw" "no,no")])
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rS,ro,ro")
+ (sign_extend:HI (match_operand:QI 1 "general_operand" "roR,rS, n")))]
+ ""
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ if (register_operand (operands[0], HImode)
+ && register_operand (operands[1], QImode)
+ && REGNO (operands[0]) == (REGNO (operands[1]) - 1))
+ return AS1 (clr, %H0) CR_TAB
+ AS2 (snb, %1, 7) CR_TAB
+ AS1 (not, %H0);
+ else
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (clr, %H0) CR_TAB
+ AS2 (snb, wreg, 7) CR_TAB
+ AS1 (not, %H0);
+
+ case 2:
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mov, %H0, w);
+ }
+ }")
+
+(define_insn "*push_extendhisi2"
+ [(set (match_operand:SI 0 "push_operand" "=<,<,<")
+ (sign_extend:SI (match_operand:HI 1 "general_operand" "roS,n,s")))]
+ ""
+ "@
+ push\\t%L1%<\;push\\t%H1%<\;clr\\twreg\;snb\\t%H1,7\;not\\twreg\;push\\twreg\;push\\twreg%>%>
+ push\\t%D1\;push\\t%C1\;push\\t%B1\;push\\t%A1
+ push\\t%L1\;push\\t%H1\;push\\t#0\;push\\t#0"
+ [(set_attr "clobberw" "yes,no,no")])
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro,rS,ro,ro")
+ (sign_extend:SI (match_operand:HI 1 "general_operand" "rS,ro, n, s")))]
+ ""
+ "@
+ mov\\tw,%L1\;push\\t%H1%<\;pop\\t%C0%>\;mov\\t%D0,w\;clr\\twreg\;snb\\t%C0,7\;not\\twreg\;mov\\t%B0,w\;mov\\t%A0,w
+ mov\\tw,%L1\;push\\t%H1%<\;pop\\t%C0%>\;mov\\t%D0,w\;clr\\twreg\;snb\\t%C0,7\;not\\twreg\;mov\\t%B0,w\;mov\\t%A0,w
+ mov\\tw,%D1\;mov\\t%D0,w\;mov\\tw,%C1\;mov\\t%C0,w\;mov\\tw,%B1\;mov\\t%B0,w\;mov\\tw,%A1\;mov\\t%A0,w
+ mov\\tw,%L1\;push\\t%H1%<\;pop\\t%C0%>\;mov\\t%D0,w\;clr\\t%B0\;clr\\t%A0")
+
+(define_insn "*push_extendqisi2"
+ [(set (match_operand:SI 0 "push_operand" "=<,<")
+ (sign_extend:SI (match_operand:QI 1 "general_operand" "roR,n")))]
+ ""
+ "@
+ push\\t%1%<\;clr\\twreg\;snb\\t%1,7\;not\\twreg\;push\\twreg\;push\\twreg\;push\\twreg%>
+ push\\t%D1\;push\\t%C1\;push\\t%B1\;push\\t%A1"
+ [(set_attr "clobberw" "yes,no")])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS,ro")
+ (sign_extend:SI (match_operand:QI 1 "general_operand" "rS,roR, n")))]
+ ""
+ "@
+ mov\\tw,%1\;mov\\t%D0,w\;clr\\twreg\;snb\\t%1,7\;not\\twreg\;mov\\t%C0,w\;mov\\t%B0,w\;mov\\t%A0,w
+ mov\\tw,%1\;mov\\t%D0,w\;clr\\twreg\;snb\\t%1,7\;not\\twreg\;mov\\t%C0,w\;mov\\t%B0,w\;mov\\t%A0,w
+ mov\\tw,%D1\;mov\\t%D0,w\;mov\\tw,%C1\;mov\\t%C0,w\;mov\\tw,%B1\;mov\\t%B0,w\;mov\\tw,%A1\;mov\\t%A0,w")
+
+;;
+;; Zero extension instructions.
+;;
+
+(define_insn "*push_zero_extendqihi2"
+ [(set (match_operand:HI 0 "push_operand" "=<")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "roRi")))]
+ ""
+ "push\\t%1\;push\\t#0"
+ [(set_attr "clobberw" "no")])
+
+(define_insn_and_split "zero_extendqihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "rSi,roRi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], QImode);
+ operands[3] = ip2k_get_low_half (operands[0], QImode);
+ }")
+
+(define_insn "*push_zero_extendhisi2"
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (zero_extend:SI (match_operand:HI 1 "general_operand" "roSi")))]
+ ""
+ "push\\t%L1%<\;push\\t%H1%>\;push\\t#0\;push\\t#0")
+
+(define_insn_and_split "zero_extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:SI (match_operand:HI 1 "general_operand" "rSi,roi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], HImode);
+ operands[3] = ip2k_get_low_half (operands[0], HImode);
+ }")
+
+(define_insn "*push_zero_extendqisi2"
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (zero_extend:SI (match_operand:QI 1 "general_operand" "roRi")))]
+ ""
+ "push\\t%1\;push\\t#0\;push\\t#0\;push\\t#0"
+ [(set_attr "clobberw" "no")])
+
+(define_insn_and_split "zero_extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:SI (match_operand:QI 1 "general_operand" "rSi,roRi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (zero_extend:HI (match_dup 1)))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], HImode);
+ operands[3] = ip2k_get_low_half (operands[0], HImode);
+ }")
+
+(define_insn "*push_zero_extendsidi2"
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (zero_extend:DI (match_operand:SI 1 "general_operand" "roSi")))]
+ ""
+ "push\\t%D1%<\;push\\t%C1%<\;push\\t%B1%<\;push\\t%A1%>%>%>\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0")
+
+(define_insn_and_split "zero_extendsidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:DI (match_operand:SI 1 "general_operand" "rSi,roi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], SImode);
+ operands[3] = ip2k_get_low_half (operands[0], SImode);
+ }")
+
+(define_insn "*push_zero_extendhidi2"
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (zero_extend:DI (match_operand:HI 1 "general_operand" "roSi")))]
+ ""
+ "push\\t%L1%<\;push\\t%H1%>\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0"
+ [(set_attr "clobberw" "no")])
+
+(define_insn_and_split "zero_extendhidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:DI (match_operand:HI 1 "general_operand" "rSi,roi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], SImode);
+ operands[3] = ip2k_get_low_half (operands[0], SImode);
+ }")
+
+(define_insn "*push_zero_extendqidi2"
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (zero_extend:DI (match_operand:QI 1 "general_operand" "roRi")))]
+ ""
+ "push\\t%1\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0\;push\\t#0"
+ [(set_attr "clobberw" "no")])
+
+(define_insn_and_split "zero_extendqidi2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=ro, rS")
+ (zero_extend:DI (match_operand:QI 1 "general_operand" "rSi,roRi")))]
+ ""
+ "#"
+ "ip2k_reorg_completed"
+ [(set (match_dup 3) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2) (const_int 0))]
+ "{
+ operands[2] = ip2k_get_high_half (operands[0], SImode);
+ operands[3] = ip2k_get_low_half (operands[0], SImode);
+ }")
+
+;;
+;; Truncation instructions.
+;;
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rS, ro")
+ (truncate:HI (match_operand:SI 1 "general_operand" "roi,rSi")))]
+ ""
+ "@
+ mov\\tw,%D1\;push\\t%C1%<\;pop\\t%H0%>\;mov\\t%L0,w
+ mov\\tw,%D1\;push\\t%C1%<\;pop\\t%H0%>\;mov\\t%L0,w")
+
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=rS, ro")
+ (truncate:QI (match_operand:SI 1 "general_operand" "roi,rSi")))]
+ ""
+ "@
+ mov\\tw,%D1\;mov\\t%0,w
+ mov\\tw,%D1\;mov\\t%0,w")
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=rS, ro")
+ (truncate:QI (match_operand:HI 1 "general_operand" "roi,rSi")))]
+ ""
+ "@
+ mov\\tw,%L1\;mov\\t%0,w
+ mov\\tw,%L1\;mov\\t%0,w")
+
+;;
+;; Compare with zero (test) instructions.
+;;
+;; As we don't have a particularly good set of condition codes we simply
+;; tagging our comparison operands for use later within our "compare
+;; and branch" instructions.
+;;
+
+(define_insn "tstqi"
+ [(set (cc0)
+ (match_operand:QI 0 "nonimmediate_operand" "roR"))]
+ ""
+ "* return ip2k_set_compare (operands[0], const0_rtx);")
+
+(define_insn "tsthi"
+ [(set (cc0)
+ (match_operand:HI 0 "nonimmediate_operand" "roS"))]
+ ""
+ "* return ip2k_set_compare (operands[0], const0_rtx);")
+
+(define_insn "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "nonimmediate_operand" "roS"))]
+ ""
+ "* return ip2k_set_compare (operands[0], const0_rtx);")
+
+(define_insn "tstdi"
+ [(set (cc0)
+ (match_operand:DI 0 "nonimmediate_operand" "roS"))]
+ ""
+ "* return ip2k_set_compare (operands[0], const0_rtx);")
+
+;;
+;; General value comparison instructions.
+;;
+;; As we don't have a particularly good set of condition codes we simply
+;; tagging our comparison operands for use later within our "compare
+;; and branch" instructions.
+;;
+
+(define_insn "cmpqi"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "roR, rS")
+ (match_operand:QI 1 "general_operand" "rSn,roRn")))]
+ ""
+ "* return ip2k_set_compare (operands[0], operands[1]);")
+
+(define_insn "cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "ro, rS")
+ (match_operand:HI 1 "general_operand" "rSn,ron")))]
+ ""
+ "* return ip2k_set_compare (operands[0], operands[1]);")
+
+(define_insn "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "ro, rS")
+ (match_operand:SI 1 "general_operand" "rSn,ron")))]
+ ""
+ "* return ip2k_set_compare (operands[0], operands[1]);")
+
+(define_insn "cmpdi"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "nonimmediate_operand" "ro, rS")
+ (match_operand:DI 1 "general_operand" "rSn,ron")))]
+ ""
+ "* return ip2k_set_compare (operands[0], operands[1]);")
+
+;;
+;; Conditional jump instructions.
+;;
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+\f
+;; Implementation of conditional jumps.
+;;
+;; The assumption is that a previous test or compare instruction will have
+;; provided the arguments to be compared to form cc0 and then we perform
+;; a compare and branch operation here.
+;;
+(define_insn "*unsigned_cmp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "ip2k_unsigned_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return ip2k_gen_unsigned_comp_branch (insn, GET_CODE (operands[1]),
+ operands[0]);")
+
+;; Signed branches use Z, N or synthesized V.
+;; result is generated as 0 (LT), 1 (EQ), 2 (GT)
+;;
+(define_insn "*signed_cmp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "ip2k_signed_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return ip2k_gen_signed_comp_branch (insn, GET_CODE (operands[1]),
+ operands[0]);")
+
+;; Reverse branch - reverse our comparison condition so that we can
+;; branch in the opposite sense.
+;;
+(define_insn_and_split "*rvbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator" [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (pc)
+ (if_then_else (match_dup 2)
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "{
+ operands[2] = gen_rtx (reverse_condition (GET_CODE (operands[1])),
+ GET_MODE (operands[1]),
+ cc0_rtx, const0_rtx);
+ }")
+
+;; This is a bit test and jump sequence.
+;;
+(define_insn "*bit_cmpqi_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(zero_extract
+ (match_operand:QI 1 "nonimmediate_operand" "roR")
+ (const_int 1)
+ (match_operand 2 "immediate_operand" "i"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[0]) == EQ || GET_CODE (operands[0]) == NE)"
+ "*{
+ if (GET_CODE (operands[0]) == EQ)
+ OUT_AS2 (sb, %1, %b2);
+ else
+ OUT_AS2 (snb, %1, %b2);
+ return AS1 (page, %3) CR_TAB
+ AS1 (jmp, %3);
+ }"
+ [(set_attr "clobberw" "no")])
+
+;; This is a bit test and jump sequence but for 16-bit operands. It's pretty
+;; certain that there must be a way to do this using a zero_extract operation,
+;; but this didn't seem to want to work so we use a bitwise and instead. This
+;; is exactly as efficient but the combiner handles this OK - the implementation
+;; here isn't quite as nice though.
+;;
+(define_insn "*bit_cmphi_branch"
+ [(set
+ (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator"
+ [(and:HI (match_operand:HI 1 "nonimmediate_operand" "roS")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[0]) == EQ || GET_CODE (operands[0]) == NE)
+ && find_one_set_bit_p (INTVAL (operands[2])) != -1)"
+ "*{
+ int bp = find_one_set_bit_p (INTVAL (operands[2]));
+ if (INTVAL (operands[2]) >= 8)
+ operands[4] = GEN_INT (bp - 8);
+ else
+ operands[4] = GEN_INT (bp);
+
+ if (GET_CODE (operands[0]) == EQ)
+ {
+ if (INTVAL (operands[2]) >= 8)
+ OUT_AS2 (sb, %H1, %b4);
+ else
+ OUT_AS2 (sb, %L1, %b4);
+ }
+ else
+ {
+ if (INTVAL (operands[2]) >= 8)
+ OUT_AS2 (snb, %H1, %b4);
+ else
+ OUT_AS2 (snb, %L1, %b4);
+ }
+ return AS1 (page, %3) CR_TAB
+ AS1 (jmp, %3);
+ }"
+ [(set_attr "clobberw" "no")])
+
+;; Add two operands, compare with a third and branch if equal or not-equal.
+;;
+(define_insn "*add_and_comp_branch"
+ [(set
+ (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator"
+ [(plus:HI
+ (match_operand:HI 1 "nonimmediate_operand" "ro, rS, rS")
+ (match_operand:HI 2 "general_operand" "rSn,ron,rSn"))
+ (match_operand:HI 3 "general_operand" "rSn,rSn,ron")])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[0]) == EQ || GET_CODE (operands[0]) == NE)"
+ "*{
+ OUT_AS2 (mov, w, %L2);
+ OUT_AS2 (add, w, %L1);
+ OUT_AS2 (cse, w, %L3);
+ if (GET_CODE (operands[0]) == EQ)
+ {
+ OUT_AS1 (page, 1f);
+ OUT_AS1 (jmp, 1f);
+ }
+ else
+ {
+ OUT_AS1 (page, %4);
+ OUT_AS1 (jmp, %4);
+ }
+ OUT_AS2 (mov, w, %H2);
+ OUT_AS2 (addc, w, %H1);
+ if (GET_CODE (operands[0]) == EQ)
+ OUT_AS2 (csne, w, %H3);
+ else
+ OUT_AS2 (cse, w, %H3);
+ OUT_AS1 (page, %4);
+ OUT_AS1 (jmp, %4);
+ return AS1 (1:, );
+ }")
+
+;; Unconditional jump
+;;
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "page\\t%0\;jmp\\t%0"
+ [(set_attr "clobberw" "no")])
+
+;; Indirect jump
+;;
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:HI 0 "nonimmediate_operand" "ro"))]
+ ""
+ "page\\t1f\;call\\t1f\;1:mov\\tw,%H0\;mov\\tcallh,w\;mov\\tw,%L0\;mov\\tcalll,w\;ret")
+
+;;
+;; Function call instructions.
+;;
+
+(define_expand "call"
+ [(call (match_operand 0 "" "")
+ (match_operand:HI 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call"
+ [(call (mem:HI (match_operand:HI 0 "general_operand" "i,roS"))
+ (match_operand:HI 1 "" ""))]
+ ""
+ "@
+ page\\t%b0\;call\\t%b0
+ push\\t%L0%<\;push\\t%H0%>\;page\\t__indcall\;call\\t__indcall")
+
+(define_expand "call_pop"
+ [(parallel [(call (match_operand 0 "" "")
+ (match_operand:HI 1 "" ""))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 3 "immediate_operand" "")))])]
+ ""
+ "")
+
+(define_insn "*call_pop"
+ [(call (mem:HI (match_operand:HI 0 "general_operand" "i,roS"))
+ (match_operand:HI 1 "" ""))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 2 "immediate_operand" "")))]
+ ""
+ "@
+ page\\t%b0\;call\\t%b0
+ push\\t%L0%<\;push\\t%H0%>\;page\\t__indcall\;call\\t__indcall")
+
+;; Undo any splitting of operands that lead to redundant movhi3 instructions.
+;;
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (parallel [(call (mem:HI (match_dup 0))
+ (match_operand:HI 2 "" ""))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 3 "immediate_operand" "")))])]
+ ""
+ [(parallel [(call (mem:HI (match_dup 1))
+ (match_dup 2))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_dup 3)))])]
+ "")
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "" "")
+ (match_operand:HI 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value"
+ [(set (match_operand 0 "" "")
+ (call (mem:HI (match_operand:HI 1 "general_operand" "i,roS"))
+ (match_operand:HI 2 "" "")))]
+ ""
+ "@
+ page\\t%b1\;call\\t%b1
+ push\\t%L1%<\;push\\t%H1%>\;page\\t__indcall\;call\\t__indcall")
+
+(define_expand "call_value_pop"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (match_operand 1 "" "")
+ (match_operand:HI 2 "" "")))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 4 "immediate_operand" "")))])]
+ ""
+ "")
+
+(define_insn "*call_value_pop"
+ [(set (match_operand 0 "" "")
+ (call (mem:HI (match_operand:HI 1 "general_operand" "i,roS"))
+ (match_operand:HI 2 "" "")))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 3 "immediate_operand" "")))]
+ ""
+ "@
+ page\\t%b1\;call\\t%b1
+ push\\t%L1%<\;push\\t%H1%>\;page\\t__indcall\;call\\t__indcall")
+
+;; Undo any splitting of operands that lead to redundant movhi3 instructions.
+;;
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (parallel [(set (match_operand 2 "" "")
+ (call (mem:HI (match_dup 0))
+ (match_operand:HI 3 "" "")))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_operand:HI 4 "immediate_operand" "")))])]
+ ""
+ [(parallel [(set (match_dup 2)
+ (call (mem:HI (match_dup 1))
+ (match_dup 3)))
+ (set (reg:HI 6)
+ (plus:HI (reg:HI 6)
+ (match_dup 4)))])]
+ "")
+
+;; Nop instruction.
+;;
+;; We don't really want nops to appear in our code so just insert an comment.
+;;
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "; nop")
+
+\f
+;; SEQ instruction
+;;
+(define_insn "seq"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (eq:QI (cc0) (const_int 0)))]
+ ""
+ "* return ip2k_gen_sCOND (insn, EQ, operands[0]);")
+
+;; Tweak SEQ if we can adjust the output operand. Note that we have to do
+;; this via a peephole because we need to ensure that any reloads have taken
+;; place before we try to do this. If there's a reload in order to get our
+;; actual result operand then this peephole won't match.
+;;
+(define_peephole
+ [(set (match_operand:QI 0 "register_operand" "")
+ (eq:QI (cc0) (const_int 0)))
+ (set (reg:QI 10)
+ (match_dup 0))
+ (set (match_operand:QI 1 "nonimmediate_operand" "")
+ (reg:QI 10))]
+ "find_regno_note (insn, REG_DEAD, REGNO (operands[0]))"
+ "* return ip2k_gen_sCOND (insn, EQ, operands[1]);")
+
+;; Another peephole match handles the same merge as above but for cases where
+;; we're emulating memory accesses via IP and an offset.
+;;
+(define_peephole
+ [(set (match_operand:QI 0 "register_operand" "")
+ (eq:QI (cc0) (const_int 0)))
+ (set (reg:QI 10)
+ (match_dup 0))
+ (set (mem:QI (plus:HI (reg:HI 4)
+ (match_operand:QI 1 "const_int_operand" "")))
+ (reg:QI 10))]
+ "(find_regno_note (insn, REG_DEAD, REGNO (operands[0]))
+ && (INTVAL (operands[1]) < 0x100))"
+ "*{
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (inc, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, ipl, w);
+ }
+ ip2k_gen_sCOND (insn, EQ,
+ gen_rtx_MEM (QImode, gen_rtx_REG (HImode, REG_IP)));
+ if (find_regno_note (insn, REG_DEAD, REG_IP))
+ {
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (dec, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (sub, ipl, w);
+ }
+ }
+ return \"\";
+ }")
+
+;; SNE instruction
+;;
+(define_insn "sne"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (ne:QI (cc0) (const_int 0)))]
+ ""
+ "* return ip2k_gen_sCOND (insn, NE, operands[0]);")
+
+;; Tweak SNE if we can adjust the output operand. Note that we have to do
+;; this via a peephole because we need to ensure that any reloads have taken
+;; place before we try to do this. If there's a reload in order to get our
+;; actual result operand then this peephole won't match.
+;;
+(define_peephole
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ne:QI (cc0) (const_int 0)))
+ (set (reg:QI 10)
+ (match_dup 0))
+ (set (match_operand:QI 1 "nonimmediate_operand" "")
+ (reg:QI 10))]
+ "find_regno_note (PREV_INSN (insn), REG_DEAD, REGNO (operands[0]))"
+ "* return ip2k_gen_sCOND (insn, NE, operands[1]);")
+
+;; Another peephole match handles the same merge as above but for cases where
+;; we're emulating memory accesses via IP and an offset.
+;;
+(define_peephole
+ [(set (match_operand:QI 0 "register_operand" "")
+ (ne:QI (cc0) (const_int 0)))
+ (set (reg:QI 10)
+ (match_dup 0))
+ (set (mem:QI (plus:HI (reg:HI 4)
+ (match_operand:QI 1 "const_int_operand" "")))
+ (reg:QI 10))]
+ "(find_regno_note (PREV_INSN (insn), REG_DEAD, REGNO (operands[0]))
+ && (INTVAL (operands[1]) < 0x100))"
+ "*{
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (inc, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, ipl, w);
+ }
+ ip2k_gen_sCOND (insn, NE,
+ gen_rtx_MEM (QImode, gen_rtx_REG (HImode, REG_IP)));
+ if (find_regno_note (insn, REG_DEAD, REG_IP))
+ {
+ if (INTVAL (operands[1]) == 1)
+ OUT_AS1 (dec, ipl);
+ else
+ {
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (sub, ipl, w);
+ }
+ }
+ return \"\";
+ }")
+
+\f
+
+;; Case Dispatch Table Support.
+;;
+;; Called with 5 arguments:
+;;
+;; 0. case index
+;; 1. lower bound (const_int)
+;; 2. range (const_int)
+;; 3. label before dispatch table
+;; 4. out-of-bounds label
+;;
+;; With the IP2k we actually really want to do a caseqi but that
+;; doesn't exist so we cheat and make it look (to the core of gcc)
+;; like we're going to do the SImode stuff but then truncate it
+;; away when it's no longer looking :-)
+;;
+(define_expand "casesi"
+ [(set (match_dup 5)
+ (truncate:QI (match_operand:SI 0 "general_operand" "g")))
+ (set (match_dup 5)
+ (minus:QI (match_dup 5)
+ (match_operand 1 "const_int_operand" "n")))
+ (set (cc0)
+ (compare (match_dup 5)
+ (match_operand 2 "const_int_operand" "n")))
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (parallel [(set (pc)
+ (plus:HI (pc)
+ (zero_extend:HI (match_dup 5))))
+ (use (label_ref (match_operand 3 "" "")))
+ (use (match_dup 2))])]
+ ""
+ "{
+ operands[5] = gen_reg_rtx (QImode);
+ }")
+
+;; There are TWO instructions per dispatch entry (page & jump), so we
+;; multiply by two even though our RTL only indicates a simple addition.
+;; Subsequent linker relaxation may well restore this back to what the
+;; RTL says though!
+;;
+;; Note that we handle tables with 128 or more entries differently!
+;;
+(define_insn "*casedispatch"
+ [(set (pc)
+ (plus:HI (pc) (zero_extend:HI
+ (match_operand:QI 2 "nonimmediate_operand" "roR,roR"))))
+ (use (label_ref (match_operand 0 "" "")))
+ (use (match_operand 1 "const_int_operand" "K, n"))]
+ ""
+ "@
+ mov\\tw,%2\;add\\tw,wreg\;add\\tpcl,w
+ mov\\tw,%2\;push\\t%0%<\;push\\t#0%<\;add\\tw,wreg\;snc\;inc\\t1(SP)\;add\\t2(SP),w\;snc\;inc\\t1(SP)\;page\\t__indcall\;jmp\\t__indcall%>%>")
+
+;; Handle cleaning up the switch statement stuff. We can eliminate some
+;; register moves in some cases. Note that our pattern is slightly different
+;; to the casesi pattern because our minus has become a plus!
+;;
+;; Note that as of 07-FEB-2002 we must have this pattern as it is because
+;; linker relaxation will not work any other way.
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (plus:QI (match_operand 5 "nonimmediate_operand" "rS,rS,rS,rS")
+ (match_operand 1 "const_int_operand" "M, n, M, n")))
+ (set (match_operand:QI 0 "register_operand" "+r, r, r, r")
+ (reg:QI 10))
+ (set (cc0)
+ (compare (match_dup 0)
+ (match_operand 2 "const_int_operand" "K, K, n, n")))
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+ (parallel [(set (pc)
+ (plus:HI (pc)
+ (zero_extend:HI (match_dup 0))))
+ (use (label_ref (match_operand 3 "" "")))
+ (use (match_dup 2))])]
+ "(INTVAL (operands[1]) != 0
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[0])))"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ case 2:
+ OUT_AS2 (dec, w, %5);
+ break;
+
+ case 1:
+ case 3:
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (add, w, %5);
+ break;
+ }
+
+ OUT_AS2 (cmp, w, %2);
+ OUT_AS1 (sc, );
+ OUT_AS1 (page, %4);
+ OUT_AS1 (jmp, %4);
+
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ OUT_AS2 (add, w, WREG);
+ OUT_AS2 (add, pcl, w);
+ return \"\";
+
+ case 2:
+ case 3:
+ OUT_AS1 (push, %0%<);
+ OUT_AS1 (push, #0%<);
+ OUT_AS2 (add, w, WREG);
+ OUT_AS1 (snc, );
+ OUT_AS1 (inc, 1(SP));
+ OUT_AS2 (add, 2(SP), w);
+ OUT_AS1 (snc, );
+ OUT_AS1 (inc, 1(SP));
+ OUT_AS1 (page, __indcall);
+ OUT_AS1 (jmp, __indcall%>%>);
+ return \"\";
+ }
+ }")
+
+(define_peephole
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "rS,rS")
+ (match_operand 1 "const_int_operand" "K, n")))
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (parallel [(set (pc)
+ (plus:HI (pc)
+ (zero_extend:HI (match_dup 0))))
+ (use (label_ref (match_operand 3 "" "")))
+ (use (match_dup 1))])]
+ ""
+ "@
+ mov\\tw,%0\;cmp\\tw,%1\;sc\;page\\t%2\;jmp\\t%2\;add\\tw,wreg\;add\\tpcl,w
+ mov\\tw,%0\;cmp\\tw,%1\;sc\;page\\t%2\;jmp\\t%2\;push\\t%0%<\;push\\t#0%<\;add\\tw,wreg\;snc\;inc\\t1(SP)\;add\\t2(SP),w\;snc\;inc\\t1(SP)\;page\\t__indcall\;jmp\\t__indcall%>%>")
+
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+roR")
+ (plus:HI (match_dup 0)
+ (const_int -1)))
+ (set (cc0)
+ (compare (match_dup 0)
+ (match_operand 3 "const_int_operand" "n")))
+ (set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[2]) == EQ || GET_CODE (operands[2]) == NE)
+ && ((INTVAL (operands[3]) == -1) || (INTVAL (operands[3]) == 65535)))"
+ "*{
+ OUT_AS2 (mov, w, #255);
+ OUT_AS2 (add, %L0, w);
+ if ((GET_CODE (operands[0]) == REG)
+ && ((REGNO (operands[0]) == REG_DP)
+ || (REGNO (operands[0]) == REG_IP)
+ || (REGNO (operands[0]) == REG_SP)))
+ {
+ OUT_AS2 (add, %H0, w);
+ }
+ else
+ {
+ OUT_AS2 (addc, %H0, w);
+ }
+ if (GET_CODE (operands[2]) == EQ)
+ OUT_AS1 (sc, );
+ else
+ OUT_AS1 (snc, );
+ return AS1 (page, %1) CR_TAB
+ AS1 (jmp, %1);
+ }")
+
+(define_peephole
+ [(set (match_operand:QI 0 "nonimmediate_operand" "+rS")
+ (plus:QI (match_dup 0)
+ (const_int -1)))
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[2]) == EQ || GET_CODE (operands[2]) == NE)"
+ "*{
+ if (GET_CODE (operands[2]) == EQ)
+ OUT_AS1 (decsnz, %0);
+ else
+ OUT_AS1 (decsz, %0);
+ return AS1 (page, %1) CR_TAB
+ AS1 (jmp, %1);
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (match_operand:QI 1 "nonimmediate_operand" "rS"))
+ (set (match_operand:QI 0 "nonimmediate_operand" "=rS")
+ (reg:QI 10))
+ (set (cc0)
+ (match_operand:QI 2 "nonimmediate_operand" "rS"))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)
+ && (rtx_equal_p (operands[0], operands[2])
+ || rtx_equal_p (operands[1], operands[2])))"
+ "*{
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (mov, %0, w);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (match_operand:QI 1 "nonimmediate_operand" "uS"))
+ (set (match_operand:QI 0 "nonimmediate_operand" "+uS")
+ (reg:QI 10))
+ (set (cc0)
+ (match_operand:SI 2 "nonimmediate_operand" "uS"))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)
+ && (rtx_equal_p (operands[0],
+ ip2k_get_high_half (ip2k_get_high_half (operands[2],
+ HImode), QImode))
+ || rtx_equal_p (operands[1],
+ ip2k_get_high_half (ip2k_get_high_half (operands[2],
+ HImode),
+ QImode))))"
+ "*{
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (mov, %0, w);
+ OUT_AS2 (or, w, %B2);
+ OUT_AS2 (or, w, %C2);
+ OUT_AS2 (or, w, %D2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (match_operand:QI 1 "nonimmediate_operand" "uS"))
+ (set (match_operand:QI 0 "nonimmediate_operand" "+uS")
+ (reg:QI 10))
+ (set (cc0)
+ (match_operand:HI 2 "nonimmediate_operand" "uS"))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)
+ && (rtx_equal_p (operands[0], ip2k_get_high_half (operands[2], QImode))
+ || rtx_equal_p (operands[1], ip2k_get_high_half (operands[2], QImode))
+ || rtx_equal_p (operands[0], ip2k_get_low_half (operands[2], QImode))
+ || rtx_equal_p (operands[1], ip2k_get_low_half (operands[2],QImode))))"
+ "*{
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (mov, %0, w);
+ if (rtx_equal_p (operands[0], ip2k_get_high_half (operands[2], QImode))
+ || rtx_equal_p (operands[1], ip2k_get_high_half (operands[2], QImode)))
+ OUT_AS2 (or, w, %L2);
+ else
+ OUT_AS2 (or, w, %H2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+uo")
+ (match_operand:HI 1 "nonimmediate_operand" "uo"))
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[2]) == EQ || GET_CODE (operands[2]) == NE)"
+ "*{
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS1 (push, %L1%<);
+ OUT_AS1 (pop, %L0%>);
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS2 (or, w, %L0);
+ if (GET_CODE (operands[2]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %3) CR_TAB
+ AS1 (jmp, %3);
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+uo")
+ (match_operand:HI 1 "nonimmediate_operand" "uo"))
+ (set (cc0)
+ (match_dup 1))
+ (set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[2]) == EQ || GET_CODE (operands[2]) == NE)"
+ "*{
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS1 (push, %L1%<);
+ OUT_AS1 (pop, %L0%>);
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS2 (or, w, %L0);
+ if (GET_CODE (operands[2]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %3) CR_TAB
+ AS1 (jmp, %3);
+ }")
+
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+f,bqdo")
+ (mem:HI (reg:HI 4)))
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (push, (IP));
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, ipl, w);
+ OUT_AS1 (pop, iph);
+ OUT_AS2 (or, w, iph);
+ if (GET_CODE (operands[1]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %2) CR_TAB
+ AS1 (jmp, %2);
+
+ case 1:
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %L0, w);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ OUT_AS1 (dec, ipl);
+ OUT_AS2 (or, w, %H0);
+ if (GET_CODE (operands[1]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %2) CR_TAB
+ AS1 (jmp, %2);
+ }
+ }")
+
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+f,bqdo")
+ (mem:HI (reg:HI 4)))
+ (set (cc0)
+ (mem:HI (reg:HI 4)))
+ (set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)"
+ "*{
+ switch (which_alternative)
+ {
+ case 0:
+ OUT_AS1 (push, (IP));
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, ipl, w);
+ OUT_AS1 (pop, iph);
+ OUT_AS2 (or, w, iph);
+ if (GET_CODE (operands[1]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %2) CR_TAB
+ AS1 (jmp, %2);
+
+ case 1:
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS1 (inc, ipl);
+ OUT_AS2 (mov, w, (IP));
+ OUT_AS2 (mov, %L0, w);
+ if (!find_regno_note (insn, REG_DEAD, REG_IP))
+ OUT_AS1 (dec, ipl);
+ OUT_AS2 (or, w, %H0);
+ if (GET_CODE (operands[1]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %2) CR_TAB
+ AS1 (jmp, %2);
+ }
+ }")
+
+;; Handle move-twice and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(parallel [(set (match_operand:HI 0 "ip2k_gen_operand" "=uS")
+ (match_operand:HI 1 "ip2k_gen_operand" "uS"))
+ (set (match_operand:HI 2 "ip2k_gen_operand" "=uS")
+ (match_dup 1))])
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)"
+ "*{
+ if ((REG_P (operands[0])
+ && !(ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[2],
+ REGNO (operands[0]), 2)))
+ || (REG_P (operands[2])
+ && !(ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 2))))
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS1 (push, %H1%<);
+ OUT_AS1 (push, %H1%<);
+ OUT_AS1 (pop, %H0%>);
+ OUT_AS2 (mov, %L0, w);
+ OUT_AS1 (pop, %H2%>);
+ OUT_AS2 (mov, %L2, w);
+ OUT_AS2 (or, w, %H2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (mov, %L0, w);
+ OUT_AS2 (mov, %L2, w);
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS2 (mov, %H2, w);
+ OUT_AS2 (or, w, %L2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }
+ }")
+
+(define_peephole
+ [(parallel [(set (match_operand:HI 0 "ip2k_gen_operand" "=uS")
+ (match_operand:HI 1 "ip2k_gen_operand" "uS"))
+ (set (match_operand:HI 2 "ip2k_gen_operand" "=uS")
+ (match_dup 1))])
+ (set (cc0)
+ (match_dup 2))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)"
+ "*{
+ if ((REG_P (operands[0])
+ && !(ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[2],
+ REGNO (operands[0]), 2)))
+ || (REG_P (operands[2])
+ && !(ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 2))))
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS1 (push, %H1%<);
+ OUT_AS1 (push, %H1%<);
+ OUT_AS1 (pop, %H0%>);
+ OUT_AS2 (mov, %L0, w);
+ OUT_AS1 (pop, %H2%>);
+ OUT_AS2 (mov, %L2, w);
+ OUT_AS2 (or, w, %H2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }
+ else
+ {
+ OUT_AS2 (mov, w, %L1);
+ OUT_AS2 (mov, %L0, w);
+ OUT_AS2 (mov, %L2, w);
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS2 (mov, %H2, w);
+ OUT_AS2 (or, w, %L2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }
+ }")
+
+;; Handle move and compare-with-zero operations - we can reuse w across
+;; the two operations.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+uo")
+ (match_operand:HI 1 "nonimmediate_operand" "uo"))
+ (set (cc0)
+ (match_operand:SI 2 "nonimmediate_operand" "uo"))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "((GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE)
+ && (rtx_equal_p (operands[0], ip2k_get_high_half (operands[2], HImode))
+ || rtx_equal_p (operands[1],
+ ip2k_get_high_half (operands[2], HImode))))"
+ "*{
+ OUT_AS2 (mov, w, %H1);
+ OUT_AS1 (push, %L1%<);
+ OUT_AS1 (pop, %L0%>);
+ OUT_AS2 (mov, %H0, w);
+ OUT_AS2 (or, w, %B0);
+ OUT_AS2 (or, w, %C0);
+ OUT_AS2 (or, w, %D0);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Handle bitwise-and and compare-with-zero operations on bytes.
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (match_operand:QI 2 "general_operand" " g"))
+ (set (reg:QI 10)
+ (and:QI (match_operand:QI 1 "general_operand" "g")
+ (reg:QI 10)))
+ (set (match_operand:QI 0 "register_operand" "+r")
+ (reg:QI 10))
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(find_regno_note (PREV_INSN (insn), REG_DEAD, REGNO (operands[0]))
+ && (GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE))"
+ "*{
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (and, w, %2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Handle bitwise-xor and compare-with-zero operations on bytes.
+;;
+(define_peephole
+ [(set (match_operand:QI 0 "register_operand" "+r")
+ (xor:QI (match_operand:QI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))
+ (set (cc0)
+ (match_dup 0))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(find_regno_note (PREV_INSN (insn), REG_DEAD, REGNO (operands[0]))
+ && (GET_CODE (operands[3]) == EQ || GET_CODE (operands[3]) == NE))"
+ "*{
+ OUT_AS2 (mov, w, %1);
+ OUT_AS2 (xor, w, %2);
+ if (GET_CODE (operands[3]) == EQ)
+ OUT_AS1 (snz, );
+ else
+ OUT_AS1 (sz, );
+ return AS1 (page, %4) CR_TAB
+ AS1 (jmp, %4);
+ }")
+
+;; Cope with reload's vagaries.
+;;
+
+(define_insn "*pushqi_reload_popqi"
+ [(set (match_operand:QI 0 "ip2k_nonsp_reg_operand" "=u, u")
+ (match_operand:QI 1 "ip2k_short_operand" "S, S"))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" "i,ro"))
+ (set (match_operand:QI 3 "ip2k_short_operand" "=S, S")
+ (match_dup 0))]
+ ""
+ "@
+ push\\t%1%<\;loadh\\t%x2\;loadl\\t%x2\;pop\\t%3%>
+ push\\t%1%<\;mov\\tw,%L2\;push\\t%H2\;pop\\tdph\;mov\\tdpl,w\;pop\\t%3%>"
+)
+
+(define_peephole2
+ [(set (match_operand:QI 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand:QI 1 "ip2k_short_operand" ""))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" ""))
+ (set (match_operand:QI 3 "ip2k_short_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_split_himode
+ && peep2_reg_dead_p (3, operands[0])
+ && ip2k_address_uses_reg_p (operands[1], REG_DP)
+ && ip2k_address_uses_reg_p (operands[3], REG_DP)
+ && !(ip2k_address_uses_reg_p (operands[2], REG_SP)
+ && (GET_CODE (XEXP (operands[2], 0)) == PLUS)
+ && (INTVAL (XEXP (XEXP (operands[2], 0), 1)) >= 126))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (reg:HI 12)
+ (match_dup 2))
+ (set (match_dup 3)
+ (match_dup 0))])]
+ "")
+
+(define_insn "*pushhi_reload_pophi"
+ [(set (match_operand:HI 0 "ip2k_nonsp_reg_operand" "=u, u")
+ (match_operand:HI 1 "ip2k_short_operand" "S, S"))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" "i,ro"))
+ (set (match_operand:HI 3 "ip2k_short_operand" "=S, S")
+ (match_dup 0))]
+ ""
+ "@
+ push\\t%L1%<\;push\\t%H1%<\;loadh\\t%x2\;loadl\\t%x2\;pop\\t%H3%>\;pop\\t%L3%>
+ push\\t%L1%<\;push\\t%H1%<\;mov\\tw,%L2\;push\\t%H2\;pop\\tdph\;mov\\tdpl,w\;pop\\t%H3%>\;pop\\t%L3%>"
+)
+
+(define_peephole2
+ [(set (match_operand:HI 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand:HI 1 "ip2k_short_operand" ""))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" ""))
+ (set (match_operand:HI 3 "ip2k_short_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_split_simode
+ && peep2_reg_dead_p (3, operands[0])
+ && ip2k_address_uses_reg_p (operands[1], REG_DP)
+ && ip2k_address_uses_reg_p (operands[3], REG_DP)
+ && !(ip2k_address_uses_reg_p (operands[2], REG_SP)
+ && (GET_CODE (XEXP (operands[2], 0)) == PLUS)
+ && (INTVAL (XEXP (XEXP (operands[2], 0), 1)) >= 125))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (reg:HI 12)
+ (match_dup 2))
+ (set (match_dup 3)
+ (match_dup 0))])]
+ "")
+
+(define_insn "*pushsi_reload_popsi"
+ [(set (match_operand:SI 0 "ip2k_nonsp_reg_operand" "=u, u")
+ (match_operand:SI 1 "ip2k_short_operand" "S, S"))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" "i,ro"))
+ (set (match_operand:SI 3 "ip2k_short_operand" "=S, S")
+ (match_dup 0))]
+ ""
+ "@
+ push\\t%D1%<\;push\\t%C1%<\;push\\t%B1%<\;push\\t%A1%<\;loadh\\t%x2\;loadl\\t%x2\;pop\\t%A3%>\;pop\\t%B3%>\;pop\\t%C3%>\;pop\\t%D3%>
+ push\\t%D1%<\;push\\t%C1%<\;push\\t%B1%<\;push\\t%A1%<\;mov\\tw,%L2\;push\\t%H2\;pop\\tdph\;mov\\tdpl,w\;pop\\t%A3%>\;pop\\t%B3%>\;pop\\t%C3%>\;pop\\t%D3%>"
+)
+
+(define_peephole2
+ [(set (match_operand:SI 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand:SI 1 "ip2k_short_operand" ""))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" ""))
+ (set (match_operand:SI 3 "ip2k_short_operand" "")
+ (match_dup 0))]
+ "(ip2k_reorg_split_dimode
+ && peep2_reg_dead_p (3, operands[0])
+ && ip2k_address_uses_reg_p (operands[1], REG_DP)
+ && ip2k_address_uses_reg_p (operands[3], REG_DP)
+ && ! (ip2k_address_uses_reg_p (operands[2], REG_SP)
+ && (GET_CODE (XEXP (operands[2], 0)) == PLUS)
+ && (INTVAL (XEXP (XEXP (operands[2], 0), 1)) >= 123)))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (reg:HI 12)
+ (match_dup 2))
+ (set (match_dup 3)
+ (match_dup 0))])]
+ "")
+
+(define_insn "*pushdi_reload_popdi"
+ [(set (match_operand:DI 0 "ip2k_nonsp_reg_operand" "=u, u")
+ (match_operand:DI 1 "ip2k_short_operand" "S, S"))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" "i,ro"))
+ (set (match_operand:DI 3 "ip2k_short_operand" "=S, S")
+ (match_dup 0))]
+ ""
+ "@
+ push\\t%S1%<\;push\\t%T1%<\;push\\t%U1%<\;push\\t%V1%<\;push\\t%W1%<\;push\\t%X1%<\;push\\t%Y1%<\;push\\t%Z1%<\;loadh\\t%x2\;loadl\\t%x2\;pop\\t%Z3%>\;pop\\t%Y3%>\;pop\\t%X3%>\;pop\\t%W3%>\;pop\\t%V3%>\;pop\\t%U3%>\;pop\\t%T3%>\;pop\\t%S3%>
+ push\\t%S1%<\;push\\t%T1%<\;push\\t%U1%<\;push\\t%V1%<\;push\\t%W1%<\;push\\t%X1%<\;push\\t%Y1%<\;push\\t%Z1%<\;mov\\tw,%L2\;push\\t%H2\;pop\\tdph\;mov\\tdpl,w\;pop\\t%Z3%>\;pop\\t%Y3%>\;pop\\t%X3%>\;pop\\t%W3%>\;pop\\t%V3%>\;pop\\t%U3%>\;pop\\t%T3%>\;pop\\t%S3%>"
+)
+
+(define_peephole2
+ [(set (match_operand:DI 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand:DI 1 "ip2k_short_operand" ""))
+ (set (reg:HI 12)
+ (match_operand:HI 2 "general_operand" ""))
+ (set (match_operand:DI 3 "ip2k_short_operand" "")
+ (match_dup 0))]
+ "((ip2k_reorg_in_progress || ip2k_reorg_completed)
+ && peep2_reg_dead_p (3, operands[0])
+ && ip2k_address_uses_reg_p (operands[1], REG_DP)
+ && ip2k_address_uses_reg_p (operands[3], REG_DP)
+ && ! (ip2k_address_uses_reg_p (operands[2], REG_SP)
+ && (GET_CODE (XEXP (operands[2], 0)) == PLUS)
+ && (INTVAL (XEXP (XEXP (operands[2], 0), 1)) >= 119)))"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (set (reg:HI 12)
+ (match_dup 2))
+ (set (match_dup 3)
+ (match_dup 0))])]
+ "")
+
+;; FIXME: Disabled because in lshiftrt:SI op1 must match op0
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_operand 1 "general_operand" "")
+ (match_operand 2 "general_operand" "")]))
+ (set (match_operand 4 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "0 && (peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 4)
+ (match_op_dup 3 [(match_dup 1)
+ (match_dup 2)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(zero_extend:HI
+ (match_operand 1 "general_operand" ""))
+ (match_operand 2 "general_operand" "")]))
+ (set (match_operand 4 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 4)
+ (match_op_dup 3 [(zero_extend:HI (match_dup 1))
+ (match_dup 2)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_operand 1 "general_operand" "")
+ (zero_extend:HI
+ (match_operand 2 "general_operand" ""))]))
+ (set (match_operand 4 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 4)
+ (match_op_dup 3 [(match_dup 1)
+ (zero_extend:HI (match_dup 2))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(zero_extend:SI
+ (match_operand 1 "general_operand" ""))
+ (match_operand 2 "general_operand" "")]))
+ (set (match_operand 4 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 4)
+ (match_op_dup 3 [(zero_extend:SI (match_dup 1))
+ (match_dup 2)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_operand 1 "general_operand" "")
+ (zero_extend:SI
+ (match_operand 2 "general_operand" ""))]))
+ (set (match_operand 4 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 4)
+ (match_op_dup 3 [(match_dup 1)
+ (zero_extend:SI (match_dup 2))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_operand 4 "general_operand" "")
+ (match_dup 0)]))]
+ "0 && ((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 4)
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(zero_extend:HI
+ (match_operand 4 "general_operand" ""))
+ (match_dup 0)]))]
+ "((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(zero_extend:HI (match_dup 4))
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(zero_extend:SI
+ (match_operand 4 "general_operand" ""))
+ (match_dup 0)]))]
+ "((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(zero_extend:SI (match_dup 4))
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_dup 0)
+ (match_operand 4 "general_operand" "")]))]
+ "0 && ((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1)
+ (match_dup 4)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_dup 0)
+ (zero_extend:HI
+ (match_operand 4 "general_operand" ""))]))]
+ "((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1)
+ (zero_extend:HI (match_dup 4))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_binary_operator"
+ [(match_dup 0)
+ (zero_extend:SI
+ (match_operand 4 "general_operand" ""))]))]
+ "((peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[0], operands[2]))
+ && ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1)
+ (zero_extend:SI (match_dup 4))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(match_operand 3 "general_operand" "")
+ (match_dup 0)]))]
+ "0 && (peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(match_dup 3)
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(zero_extend:HI
+ (match_operand 3 "general_operand" ""))
+ (match_dup 0)]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(zero_extend:HI (match_dup 3))
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(zero_extend:SI
+ (match_operand 3 "general_operand" ""))
+ (match_dup 0)]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(zero_extend:SI (match_dup 3))
+ (match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(match_dup 0)
+ (match_operand 3 "general_operand" "")]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(match_dup 1)
+ (match_dup 3)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(match_dup 0)
+ (zero_extend:HI
+ (match_operand 3 "general_operand" ""))]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(match_dup 1)
+ (zero_extend:HI (match_dup 3))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_operator 2 "ip2k_binary_operator"
+ [(match_dup 0)
+ (zero_extend:SI
+ (match_operand 3 "general_operand" ""))]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (cc0)
+ (match_op_dup 2 [(match_dup 1)
+ (zero_extend:SI (match_dup 3))]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operator 3 "ip2k_unary_operator"
+ [(match_operand 1 "general_operand" "")]))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_dup 0))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (match_operator 3 "ip2k_unary_operator" [(match_dup 0)]))]
+ "(peep2_reg_dead_p (2, operands[0])
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0]))))"
+ [(set (match_dup 2)
+ (match_op_dup 3 [(match_dup 1)]))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "ip2k_nonsp_reg_operand" "")
+ (match_operand 1 "nonimmediate_operand" ""))
+ (set (cc0)
+ (match_dup 0))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (cc0)
+ (match_dup 1))]
+ "")
+
+;; Look for places where we can shorten a compare operation.
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (const_int 0))
+ (set (cc0)
+ (match_operand:HI 1 "nonimmediate_operand" ""))
+ (set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[1], QImode), operands[0]))"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (cc0)
+ (match_dup 4))
+ (set (pc)
+ (if_then_else (match_op_dup 2
+ [(cc0) (const_int 0)])
+ (label_ref (match_dup 3))
+ (pc)))]
+ "{
+ operands[4] = ip2k_get_low_half (operands[1], QImode);
+ }")
+
+;; Look for places where we can shorten a compare operation.
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "nonimmediate_operand" "")
+ (const_int 0))
+ (set (cc0)
+ (compare (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 3 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 4 "" ""))
+ (pc)))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[1], QImode), operands[0])
+ && (abs (INTVAL (operands[2]) <= 127)))"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (cc0)
+ (compare (match_dup 5)
+ (match_dup 6)))
+ (set (pc)
+ (if_then_else (match_op_dup 3
+ [(cc0) (const_int 0)])
+ (label_ref (match_dup 4))
+ (pc)))]
+ "{
+ operands[5] = ip2k_get_low_half (operands[1], QImode);
+ operands[6] = gen_int_mode (INTVAL (operands[2]) & 0xff, QImode);
+ }")
+
+;; This is one of those cases where gcc just can't untangle our wishes. We
+;; want to add some values but get two copies of the result. In this instance
+;; however, the seconds copy can be made more cheaply by combining things.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "ip2k_nonsp_reg_operand" "+&u")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "rS")
+ (match_operand:HI 2 "general_operand" "rSi")))
+ (set (match_operand:HI 3 "ip2k_gen_operand" "=&uS")
+ (match_dup 0))]
+ "(ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))
+ && (!REG_P (operands[3])
+ || (ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3]))))))"
+ "mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\t%L3,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w\;mov\\t%H3,w")
+
+(define_peephole
+ [(set (match_operand:HI 0 "ip2k_short_operand" "+&S")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "rS")
+ (match_operand:HI 2 "general_operand" "rSi")))
+ (set (match_operand:HI 3 "ip2k_nonsp_reg_operand" "=&u")
+ (match_dup 0))]
+ "(ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ! rtx_equal_p (operands[0], operands[1])
+ && ! rtx_equal_p (operands[0], operands[2]))"
+ "mov\\tw,%L2\;add\\tw,%L1\;mov\\t%L0,w\;mov\\t%L3,w\;mov\\tw,%H2\;addc\\tw,%H1\;mov\\t%H0,w\;mov\\t%H3,w")
+
+;; Some splits zero the MSByte of a word that we then use for shifting. We
+;; can therefore replace full shifts with zero-extended ones. These are
+;; cheaper for us.
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (const_int 0))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (ashift:HI (match_operand:HI 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" "")))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[2], QImode), operands[0])
+ && peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 1)
+ (ashift:HI (zero_extend:HI (match_dup 4))
+ (match_dup 3)))]
+ "{
+ operands[4] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (const_int 0))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (ashift:HI (match_operand:HI 2 "register_operand" "")
+ (match_operand 3 "const_int_operand" "")))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[2], QImode), operands[0]))"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (match_dup 1)
+ (ashift:HI (zero_extend:HI (match_dup 4))
+ (match_dup 3)))]
+ "{
+ operands[4] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+;; Some splits zero the MSByte of a word that we then use for multiplying. We
+;; can therefore replace the full multiplies with zero-extended ones.
+;; These are cheaper for us.
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (const_int 0))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (mult:HI (match_operand:HI 2 "register_operand" "")
+ (zero_extend:HI
+ (match_operand:QI 3 "const_int_operand" ""))))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[2], QImode), operands[0])
+ && (peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (operands[1], operands[2])))"
+ [(set (match_dup 1)
+ (mult:HI (zero_extend:HI (match_dup 4))
+ (zero_extend:HI (match_dup 3))))]
+ "{
+ operands[4] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (const_int 0))
+ (set (match_operand:HI 1 "nonimmediate_operand" "")
+ (mult:HI (match_operand:HI 2 "register_operand" "")
+ (zero_extend:HI
+ (match_operand:QI 3 "const_int_operand" ""))))]
+ "(rtx_equal_p (ip2k_get_high_half (operands[2], QImode), operands[0]))"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (match_dup 1)
+ (mult:HI (zero_extend:HI (match_dup 4))
+ (zero_extend:HI (match_dup 3))))]
+ "{
+ operands[4] = ip2k_get_low_half (operands[2], QImode);
+ }")
+
+;; Merge in a redundant move before a zero-extended multiply.
+;;
+(define_peephole2
+ [(set (match_operand:QI 0 "register_operand" "")
+ (match_operand:QI 1 "general_operand" ""))
+ (set (match_operand:HI 2 "nonimmediate_operand" "")
+ (mult:HI (zero_extend:HI (match_dup 0))
+ (zero_extend:HI
+ (match_operand:QI 3 "const_int_operand" ""))))]
+ "(peep2_reg_dead_p (2, operands[0])
+ || rtx_equal_p (ip2k_get_high_half (operands[2], QImode), operands[0])
+ || rtx_equal_p (ip2k_get_low_half (operands[2], QImode), operands[0]))"
+ [(set (match_dup 2)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 3))))]
+ "")
+
+;; Pick up redundant clears followed by adds - these can just become moves.
+;;
+(define_peephole2
+ [(set (match_operand 0 "register_operand" "")
+ (const_int 0))
+ (set (match_operand 2 "nonimmediate_operand" "")
+ (plus (match_dup 0)
+ (match_operand 1 "general_operand" "")))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2)
+ (match_dup 1))]
+ "")
+
+(define_peephole2
+ [(set (match_operand 0 "register_operand" "")
+ (const_int 0))
+ (set (match_dup 0)
+ (plus (match_dup 0)
+ (match_operand 1 "general_operand" "")))]
+ ""
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "")
+
+;; Clear up an add followed by a push of the result. The fact that this
+;; isn't picked up consistently within the combiner suggests a bug somewhere.
+;;
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (set (mem:HI (post_dec:HI (reg:HI 6)))
+ (match_dup 0))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (mem:HI (post_dec:HI (reg:HI 6)))
+ (plus:HI (match_dup 1)
+ (match_dup 2)))]
+ "")
+
+;; Tidy up stack slot addressing where we've eliminated some registers.
+;; This looks like something strange going on though as gcc-2.97 didn't
+;; exhibit this behaviour, whereas gcc-3.0.4 does.
+;;
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand 2 "const_int_operand" "")))
+ (set (mem:HI (post_dec:HI (reg:HI 6)))
+ (plus:HI (match_dup 0)
+ (match_operand 3 "const_int_operand" "")))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (mem:HI (post_dec:HI (reg:HI 6)))
+ (plus:HI (match_dup 1)
+ (match_dup 4)))]
+ "{
+ operands[4] = gen_int_mode (INTVAL (operands[2]) + INTVAL (operands[3]),
+ HImode);
+ }")
+
+;; Match duplicate loads of a symbol ref. This isn't something that we want to
+;; do at the peephole2 stage because more often than not we'll make one of the
+;; two loads redundant after we run peephole2. We catch the remaining cases
+;; here though
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+uS")
+ (match_operand 1 "ip2k_symbol_ref_operand" "i"))
+ (set (match_operand:HI 2 "nonimmediate_operand" "=uS")
+ (match_dup 1))]
+ "((!REG_P (operands[0]) || (REGNO (operands[0]) != REG_DP))
+ && (!REG_P (operands[2]) || (REGNO (operands[2]) != REG_DP)))"
+ "mov\\tw,%L1\;mov\\t%L0,w\;mov\\t%L2,w\;mov\\tw,%H1\;mov\\t%H0,w\;mov\\t%H2,w")
+
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "+&uS")
+ (match_operand 1 "ip2k_symbol_ref_operand" "i"))
+ (set (match_operand:HI 2 "nonimmediate_operand" "=&uS")
+ (match_dup 0))]
+ ""
+ "*{
+ if ((REG_P (operands[0])
+ && !(ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[2],
+ REGNO (operands[0]), 2)))
+ || (REG_P (operands[2])
+ && !(ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[2]), 2)
+ && ip2k_xexp_not_uses_reg_p (operands[1],
+ REGNO (operands[2]), 2))))
+ {
+ return AS2 (mov, w, %L1) CR_TAB
+ AS1 (push, %H1%<) CR_TAB
+ AS1 (push, %H1%<) CR_TAB
+ AS1 (pop, %H0%>) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS1 (pop, %H2%>) CR_TAB
+ AS2 (mov, %L2, w);
+ }
+ else
+ {
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, %L2, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (mov, %H2, w);
+ }
+ }")
+
+;; Handle the common array indexing pattern.
+;; This is of the form A = X + (Y * C).
+;; We use splits earlier in this file to get our interesting cases into the
+;; same form (i.e. zero-extended multiply and add).
+;;
+(define_insn "*mulacchi"
+ [(set (match_operand:HI 3 "nonimmediate_operand" "=rS")
+ (plus:HI (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "rS"))
+ (zero_extend:HI
+ (match_operand:QI 2 "const_int_operand" "n")))
+ (match_operand:HI 0 "general_operand" "rSi")))]
+ ""
+ "*{
+ if (immediate_operand (operands[0], HImode)
+ && REG_P (operands[3])
+ && (REGNO (operands[3]) == REG_DP)
+ && (INTVAL (operands[2]) == 2))
+ return AS2 (mov, w, %1) CR_TAB
+ AS1 (loadl, %x0) CR_TAB
+ AS1 (loadh, %x0) CR_TAB
+ AS2 (add, dpl, w) CR_TAB
+ AS2 (add, dpl, w);
+ else
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, w, %L0) CR_TAB
+ AS2 (mov, %L3, w) CR_TAB
+ AS2 (mov, w, %H0) CR_TAB
+ AS2 (addc, w, MULH) CR_TAB
+ AS2 (mov, %H3, w);
+ }")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" ""))
+ (zero_extend:HI
+ (match_operand 2 "const_int_operand" ""))))
+ (set (match_operand:HI 3 "nonimmediate_operand" "")
+ (plus:HI (match_dup 0)
+ (match_operand:HI 4 "general_operand" "")))]
+ "(((! REG_P (operands[3]))
+ || (ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))
+ && peep2_reg_dead_p (2, operands[0]))"
+ [(set (match_dup 3)
+ (plus:HI (mult:HI (zero_extend:HI
+ (match_dup 1))
+ (zero_extend:HI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "")
+
+(define_insn "*mulhi_and_accumulate"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rS")
+ (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" "rS"))
+ (zero_extend:HI
+ (match_operand:QI 2 "const_int_operand" "n"))))
+ (set (match_operand:HI 3 "nonimmediate_operand" "=rS")
+ (plus:HI (match_dup 0)
+ (match_operand:HI 4 "general_operand" "%rSi")))]
+ "((! REG_P (operands[3]))
+ || (ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))"
+ "*{
+ return AS2 (mov, w, %1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (add, w, %L4) CR_TAB
+ AS2 (mov, %L3, w) CR_TAB
+ AS2 (mov, w, %H4) CR_TAB
+ AS2 (addc, w, MULH) CR_TAB
+ AS2 (mov, %H3, w) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %H0, w);
+ }")
+
+(define_peephole2
+ [(set (match_operand:HI 0 "nonimmediate_operand" "")
+ (mult:HI (zero_extend:HI
+ (match_operand:QI 1 "nonimmediate_operand" ""))
+ (zero_extend:HI
+ (match_operand 2 "const_int_operand" ""))))
+ (set (match_operand:HI 3 "nonimmediate_operand" "")
+ (plus:HI (match_dup 0)
+ (match_operand:HI 4 "general_operand" "")))]
+ "((! REG_P (operands[3]))
+ || (ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))"
+ [(parallel [(set (match_dup 0)
+ (mult:HI (zero_extend:HI
+ (match_dup 1))
+ (zero_extend:HI
+ (match_dup 2))))
+ (set (match_dup 3)
+ (plus:HI (match_dup 0)
+ (match_dup 4)))])]
+ "")
+
+;; Handle the common array indexing pattern.
+;; This is of the form A = X + (Y * C).
+;; We use splits earlier in this file to get our interesting cases into the
+;; same form (i.e. multiply and add).
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "rS")
+ (zero_extend:HI
+ (match_operand:QI 2 "const_int_operand" "n"))))
+ (set (match_operand:HI 3 "nonimmediate_operand" "=rS")
+ (plus:HI (match_dup 0)
+ (match_operand:HI 4 "general_operand" "%rSi")))]
+ "((!REG_P (operands[3])
+ || (ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))
+ && find_regno_note (insn, REG_DEAD, REGNO (operands[0])))"
+ "*{
+ if (immediate_operand (operands[4], HImode)
+ && REG_P (operands[3])
+ && (REGNO (operands[3]) == REG_DP)
+ && (INTVAL (operands[2]) == 2)
+ && ip2k_xexp_not_uses_reg_p (operands[1], REG_DP,
+ GET_MODE_SIZE (HImode)))
+ return AS2 (clrb, STATUS, 0) CR_TAB
+ AS1 (loadl, %x4) CR_TAB
+ AS1 (loadh, %x4) CR_TAB
+ AS2 (rl, w, %L1) CR_TAB
+ AS2 (add, dpl, w) CR_TAB
+ AS2 (rl, w, %H1) CR_TAB
+ AS2 (add, dph, w);
+ else if (!REG_P (operands[3])
+ || (ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, w, %L4) CR_TAB
+ AS2 (mov, %L3, w) CR_TAB
+ AS2 (mov, w, %H4) CR_TAB
+ AS2 (addc, w, MULH) CR_TAB
+ AS2 (mov, %H3, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, %H3, w);
+ else
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, w, %L4) CR_TAB
+ AS1 (push, wreg%<) CR_TAB
+ AS2 (mov, w, %H4) CR_TAB
+ AS2 (addc, w, MULH) CR_TAB
+ AS1 (push, wreg%<) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS1 (pop, %H3%>) CR_TAB
+ AS1 (pop, %L3%>) CR_TAB
+ AS2 (add, %H3, w);
+ }")
+
+;; Handle the more complex variant of the preceding multiply and accumulate
+;; variant of the preceding multiply-and-add operation. This one would
+;; otherwise fail to match because the result never goes dead.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rS")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "rS")
+ (zero_extend:HI
+ (match_operand:QI 2 "const_int_operand" "n"))))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (match_operand:HI 3 "general_operand" "%rSi")))]
+ "(!REG_P (operands[0])
+ || (ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))
+ && ip2k_xexp_not_uses_reg_p (operands[3], REGNO (operands[0]),
+ GET_MODE_SIZE (GET_MODE (operands[0])))))"
+ "*{
+ if (immediate_operand (operands[3], HImode)
+ && REG_P (operands[0])
+ && (REGNO (operands[0]) == REG_DP)
+ && (INTVAL (operands[2]) == 2))
+ return AS2 (clrb, STATUS, 0) CR_TAB
+ AS1 (loadl, %x3) CR_TAB
+ AS1 (loadh, %x3) CR_TAB
+ AS2 (rl, w, %L1) CR_TAB
+ AS2 (add, dpl, w) CR_TAB
+ AS2 (rl, w, %H1) CR_TAB
+ AS2 (add, dph, w);
+ else
+ return AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, w, %L3) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (mov, w, %H3) CR_TAB
+ AS2 (addc, w, MULH) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, %H0, w);
+ }")
+
+;; Handle the a complex variant of the preceding multiply and add
+;; operations where the intermediate result is also required.
+;;
+(define_peephole
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rS")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "rS")
+ (zero_extend:HI
+ (match_operand:QI 2 "const_int_operand" "n"))))
+ (set (match_operand:HI 3 "nonimmediate_operand" "=rS")
+ (plus:HI (match_dup 0)
+ (match_operand:HI 4 "general_operand" "%rSi")))]
+ "((!REG_P (operands[3])
+ || (ip2k_xexp_not_uses_reg_p (operands[4], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[0], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[1], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))
+ && ip2k_xexp_not_uses_reg_p (operands[2], REGNO (operands[3]),
+ GET_MODE_SIZE (GET_MODE (operands[3])))))
+ && (INTVAL (operands[2]) != 2))"
+ "* return AS2 (mov, w, %H4) CR_TAB
+ AS2 (mov, %H3, w) CR_TAB
+ AS2 (mov, w, %L1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (mov, %L0, w) CR_TAB
+ AS2 (add, w, %L4) CR_TAB
+ AS2 (mov, %L3, w) CR_TAB
+ AS2 (mov, w, MULH) CR_TAB
+ AS2 (mov, %H0, w) CR_TAB
+ AS2 (addc, %H3, w) CR_TAB
+ AS2 (mov, w, %H1) CR_TAB
+ AS2 (mulu, w, %2) CR_TAB
+ AS2 (add, %H3, w) CR_TAB
+ AS2 (add, %H0, w);")
+
+;; Byte swapping!
+;;
+(define_peephole
+ [(set (reg:QI 10)
+ (match_operand:QI 1 "nonimmediate_operand" "rS"))
+ (set (match_operand:QI 0 "register_operand" "=r")
+ (reg:QI 10))
+ (set (reg:QI 10)
+ (match_operand:QI 2 "nonimmediate_operand" "rS"))
+ (set (match_dup 1)
+ (reg:QI 10))
+ (set (reg:QI 10)
+ (match_dup 0))
+ (set (match_dup 2)
+ (reg:QI 10))]
+ "find_regno_note (PREV_INSN (insn), REG_DEAD, REGNO (operands[0]))"
+ "push\\t%1%<\;push\\t%2%<\;pop\\t%1%>\;pop\\t%2%>")
+
+
--- /dev/null
+; libgcc.S for the Ubicom IP2k architecture.
+;
+; Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+; Contributed by Red Hat, Inc and Ubicom, Inc.
+;
+; This file is part of GNU CC.
+;
+; GNU CC is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 2, or (at your option)
+; any later version.
+;
+; In addition to the permissions in the GNU General Public License, the
+; Free Software Foundation gives you unlimited permission to link the
+; compiled version of this file with other programs, and to distribute
+; those programs without any restriction coming from the use of this
+; file. (The General Public License restrictions do apply in other
+; respects; for example, they cover modification of the file, and
+; distribution when not linked into another program.)
+;
+; GNU CC is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GNU CC; see the file COPYING. If not, write to
+; the Free Software Foundation, 59 Temple Place - Suite 330,
+; Boston, MA 02111-1307, USA. */
+
+/*******************************************************
+ load byte from arbitrary memory
+ address passed in first bank register, result in W
+
+*******************************************************/
+ .macro movb to, from
+ mov w, \from
+ mov \to, w
+ .endm
+
+\f
+#if defined (L_indcall)
+/* __indcall - given register containing an address, call the function
+ * at that address.
+ */
+
+ .sect .pram.libgcc,"ax"
+ .global __indcall
+ .func _indcall,__indcall
+
+__indcall:
+ page 1f
+ call 1f
+1: pop callh ; Get the call target
+ pop calll
+ ret ; Transfer to new function
+
+ .endfunc
+#endif
+\f
+
+#if defined (L_mulhi3)
+ .sect .pram.libgcc,"ax"
+ .global __mulhi3
+ .func _mulhi3, __mulhi3
+
+__mulhi3:
+ mov w, 2(SP) ; First upper half partial product
+ mulu w, 3(SP)
+ mov 3(SP), w
+ mov w, 1(SP) ; Second upper half partial product
+ mulu w, 4(SP)
+ add 3(SP), w
+ mov w, 2(SP) ; Lower half partial product
+ mulu w, 4(SP)
+ mov 4(SP), w
+ mov w, MULH
+ add 3(SP), w
+
+ mov w, #2 ; Adjust the stack leaving the result to
+ add spl, w ; be popped off later.
+ ret
+
+ .endfunc
+
+#endif /* defined (L_mulhi3) */
+
+#if defined (L_mulsi3)
+/*******************************************************
+ Multiplication 32 x 32
+*******************************************************/
+
+ .sect .text.libgcc,"ax"
+ .global __mulsi3
+ .func _mulsi3, __mulsi3
+
+__mulsi3:
+ clr $80 ; Assume zero result
+ clr $81
+ clr $82
+ clr $83
+
+2: mov w, 1(sp)
+ or w, 2(sp)
+ or w, 3(sp)
+ or w, 4(sp)
+ snz ; Any more significant bits to multiply?
+ page 3f
+ jmp 3f
+
+ sb 4(sp), 0 ; Check LSB of multiplier
+ page 1f ; zero => scale multiplicand & multiplier
+ jmp 1f
+
+ mov w, 8(sp) ; Accumulate product
+ add $83, w
+ mov w, 7(sp)
+ addc $82, w
+ mov w, 6(sp)
+ addc $81, w
+ mov w, 5(sp)
+ addc $80, w
+1: clrb status, 0 ; scale multiplier down
+ rr 1(sp)
+ rr 2(sp)
+ rr 3(sp)
+ rr 4(sp)
+ clrb status, 0
+ rl 8(sp)
+ rl 7(sp)
+ rl 6(sp)
+ rl 5(sp)
+ page 2b
+ jmp 2b
+
+3: mov w, #8
+ add spl ,w
+ ret
+
+ .endfunc
+
+#endif /* defined (L_mulsi3) */
+
+#if defined (L_muldi3)
+/*******************************************************
+ Multiplication 64 x 64
+*******************************************************/
+
+ .sect .text.libgcc,"ax"
+ .global __muldi3
+ .func _muldi3, __muldi3
+
+__muldi3:
+ clr $80 ; Assume zero result
+ clr $81
+ clr $82
+ clr $83
+ clr $84
+ clr $85
+ clr $86
+ clr $87
+
+2: mov w, 1(sp)
+ or w, 2(sp)
+ or w, 3(sp)
+ or w, 4(sp)
+ or w, 5(sp)
+ or w, 6(sp)
+ or w, 7(sp)
+ or w, 8(sp)
+ snz ; Any more significant bits to multiply?
+ page 3f
+ jmp 3f
+
+ sb 8(sp), 0 ; Check LSB of multiplier
+ page 1f ; zero => scale multiplicand & multiplier
+ jmp 1f
+
+ mov w, 16(sp) ; Accumulate product
+ add $87, w
+ mov w, 15(sp)
+ addc $86, w
+ mov w, 14(sp)
+ addc $85, w
+ mov w, 13(sp)
+ addc $84, w
+ mov w, 12(sp)
+ addc $83, w
+ mov w, 11(sp)
+ addc $82, w
+ mov w, 10(sp)
+ addc $81, w
+ mov w, 9(sp)
+ addc $80, w
+
+1: clrb status, 0 ; scale multiplier down
+ rr 1(sp)
+ rr 2(sp)
+ rr 3(sp)
+ rr 4(sp)
+ rr 5(sp)
+ rr 6(sp)
+ rr 7(sp)
+ rr 8(sp)
+ clrb status, 0
+ rl 16(sp)
+ rl 15(sp)
+ rl 14(sp)
+ rl 13(sp)
+ rl 12(sp)
+ rl 11(sp)
+ rl 10(sp)
+ rl 9(sp)
+ page 2b
+ jmp 2b
+
+3: mov w, #16
+ add spl, w
+ ret
+
+ .endfunc
+
+#endif /* defined (L_muldi3) */
+
+#if defined (L_divmodhi4)
+#define arg1h 1(SP)
+#define arg1l 2(SP)
+#define arg2h 3(SP)
+#define arg2l 4(SP)
+#define resl $81
+#define resh $80
+#define reml $83
+#define remh $82
+#define tmp_var $84
+#define cnt $85
+#define arg1_sign $86
+#define res_sign $87
+
+ .sect .text.libgcc,"ax"
+ .global __divmodhi4
+ .func _divmodhi4, __divmodhi4
+
+__divmodhi4:
+ mov w,arg2h
+ mov res_sign,w
+ mov w,arg1h
+ mov arg1_sign,w
+ xor res_sign,w
+
+ sb arg1h,7
+ page 1f
+ jmp 1f
+
+ not arg1h
+ not arg1l
+ incsnz arg1l
+ inc arg1h
+
+1: sb arg2h, 7
+ page 1f
+ jmp 1f
+
+ not arg2h
+ not arg2l
+ incsnz arg2l
+ inc arg2h
+
+1: page __udivmodhi4 ; Do the unsigned div/mod
+ call __udivmodhi4
+
+ sb arg1_sign, 7
+ page 1f
+ jmp 1f
+
+ not reml
+ not remh
+ incsnz reml
+ inc remh
+
+1: sb res_sign, 7
+ ret
+
+ not resl
+ not resh
+ incsnz resl
+ inc resh
+ ret
+
+ .endfunc
+
+#undef arg1h
+#undef arg1l
+#undef arg2h
+#undef arg2l
+#undef resl
+#undef resh
+#undef reml
+#undef remh
+#undef tmp_var
+#undef cnt
+#undef arg1_sign
+#undef res_sign
+
+#endif /* defined (L_divmodhi4) */
+
+#if defined (L_udivmodhi4)
+
+#define arg1h 1(SP)
+#define arg1l 2(SP)
+#define arg2h 3(SP)
+#define arg2l 4(SP)
+#define resl $81
+#define resh $80
+#define reml $83
+#define remh $82
+#define tmp_var $84
+#define cnt $85
+
+ .sect .text.libgcc,"ax"
+ .global __udivmodhi4
+ .func _udivmodhi4, __udivmodhi4
+
+__udivmodhi4:
+ clr reml
+ clr remh
+ mov w, #17
+ mov cnt,w
+ clrb status, 0
+ page 1f
+ jmp 1f
+
+2: rl reml
+ rl remh
+ mov w, arg2l
+ sub w, reml
+ mov tmp_var, w
+ mov w, arg2h
+ subc w, remh
+ sc
+ page 1f
+ jmp 1f
+ mov remh, w
+ mov w, tmp_var
+ mov reml, w
+
+1: rl arg1l
+ rl arg1h
+ decsz cnt
+ page 2b
+ jmp 2b
+
+ pop resh
+ pop resl
+ mov w, #2
+ add spl, w
+ ret
+
+ .endfunc
+
+#undef arg1h
+#undef arg1l
+#undef arg2h
+#undef arg2l
+#undef resl
+#undef resh
+#undef reml
+#undef remh
+#undef tmp_var
+#undef cnt
+
+#endif /* defined (L_udivmodhi4) */
+
+#if defined (L_divmodsi4)
+
+#define arg1a 1(SP)
+#define arg1b 2(SP)
+#define arg1c 3(SP)
+#define arg1d 4(SP)
+
+#define arg2a 5(SP)
+#define arg2b 6(SP)
+#define arg2c 7(SP)
+#define arg2d 8(SP)
+
+#define resa $80
+#define resb $81
+#define resc $82
+#define resd $83
+
+#define rema $84
+#define remb $85
+#define remc $86
+#define remd $87
+
+#define tmp_var $88
+#define tmp_var1 $89
+#define tmp_var2 $8a
+#define cnt $8b
+#define arg1_sign $8c
+#define res_sign $8d
+
+ .sect .text.libgcc,"ax"
+ .global __divmodsi4
+ .func _divmodsi4, __divmodsi4
+
+__divmodsi4:
+ mov w, arg2a
+ mov res_sign, w
+ mov w, arg1a
+ mov arg1_sign, w
+ xor res_sign, w
+
+ sb arg1a, 7
+ page 1f
+ jmp 1f
+
+ not arg1d
+ not arg1c
+ not arg1b
+ not arg1a
+ incsnz arg1d
+ incsz arg1c
+ page 1f
+ jmp 1f
+ incsnz arg1b
+ inc arg1a
+
+1: sb arg2a, 7
+ page 1f
+ jmp 1f
+
+ not arg2d
+ not arg2c
+ not arg2b
+ not arg2a
+ incsnz arg2d
+ incsz arg2c
+ page 1f
+ jmp 1f
+ incsnz arg2b
+ inc arg2a
+
+1: page __udivmodsi4 ; Do the unsigned div/mod.
+ call __udivmodsi4
+
+ sb arg1_sign, 7
+ page 1f
+ jmp 1f
+
+ not remd
+ not remc
+ not remb
+ not rema
+ incsnz remd
+ incsz remc
+ page 1f
+ jmp 1f
+ incsnz remb
+ inc rema
+
+1: sb res_sign, 7
+ ret
+
+ not resd
+ not resc
+ not resb
+ not resa
+ incsnz resd
+ incsz resc
+ ret
+ incsnz resb
+ inc resa
+ ret
+
+ .endfunc
+
+#undef arg1a
+#undef arg1b
+#undef arg1c
+#undef arg1d
+
+#undef arg2a
+#undef arg2b
+#undef arg2c
+#undef arg2d
+
+#undef resa
+#undef resb
+#undef resc
+#undef resd
+
+#undef rema
+#undef remb
+#undef remc
+#undef remd
+
+#undef tmp_var
+#undef tmp_var1
+#undef tmp_var2
+#undef cnt
+#undef arg1_sign
+#undef res_sign
+
+#endif /* defined (L_divmodsi4) */
+
+#if defined (L_udivmodsi4)
+#define arg1a 1(SP)
+#define arg1b 2(SP)
+#define arg1c 3(SP)
+#define arg1d 4(SP)
+
+#define arg2a 5(SP)
+#define arg2b 6(SP)
+#define arg2c 7(SP)
+#define arg2d 8(SP)
+
+#define resa $80
+#define resb $81
+#define resc $82
+#define resd $83
+
+#define rema $84
+#define remb $85
+#define remc $86
+#define remd $87
+
+#define tmp_var $88
+#define tmp_var1 $89
+#define tmp_var2 $8a
+#define cnt $8b
+
+ .sect .text.libgcc,"ax"
+ .global __udivmodsi4
+ .func _udivmodsi4, __udivmodsi4
+
+__udivmodsi4:
+ clr remd
+ clr remc
+ clr remb
+ clr rema
+ mov w, #33
+ mov cnt, w
+ clrb status, 0
+ page 1f
+ jmp 1f
+
+2: rl remd
+ rl remc
+ rl remb
+ rl rema
+ mov w, arg2d
+ sub w, remd
+ mov tmp_var, w
+ mov w, arg2c
+ subc w, remc
+ mov tmp_var1, w
+ mov w, arg2b
+ subc w, remb
+ mov tmp_var2, w
+ mov w, arg2a
+ subc w, rema
+ sc
+ page 1f
+ jmp 1f
+
+ mov rema, w
+ mov w, tmp_var2
+ mov remb, w
+ mov w, tmp_var1
+ mov remc, w
+ mov w, tmp_var
+ mov remd, w
+
+1: rl arg1d
+ rl arg1c
+ rl arg1b
+ rl arg1a
+ decsz cnt
+ page 2b
+ jmp 2b
+
+ pop resa
+ pop resb
+ pop resc
+ pop resd
+ mov w, #4
+ add spl, w
+ ret
+
+ .endfunc
+
+#undef arg1a
+#undef arg1b
+#undef arg1c
+#undef arg1d
+
+#undef arg2a
+#undef arg2b
+#undef arg2c
+#undef arg2d
+
+#undef resa
+#undef resb
+#undef resc
+#undef resd
+
+#undef rema
+#undef remb
+#undef remc
+#undef remd
+
+#undef tmp_var
+#undef tmp_var1
+#undef tmp_var2
+#undef cnt
+
+#endif /* defined (L_udivmodsi4) */
+
+#if defined (L_divmoddi4)
+
+#define arg1s 1(SP)
+#define arg1t 2(SP)
+#define arg1u 3(SP)
+#define arg1v 4(SP)
+#define arg1w 5(SP)
+#define arg1x 6(SP)
+#define arg1y 7(SP)
+#define arg1z 8(SP)
+
+#define arg2s 9(SP)
+#define arg2t 10(SP)
+#define arg2u 11(SP)
+#define arg2v 12(SP)
+#define arg2w 13(SP)
+#define arg2x 14(SP)
+#define arg2y 15(SP)
+#define arg2z 16(SP)
+
+#define ress $80
+#define rest $81
+#define resu $82
+#define resv $83
+#define resw $84
+#define resx $85
+#define resy $86
+#define resz $87
+
+#define rems $88
+#define remt $89
+#define remu $8a
+#define remv $8b
+#define remw $8c
+#define remx $8d
+#define remy $8e
+#define remz $8f
+
+#define tmp_var $90
+#define tmp_var1 $91
+#define tmp_var2 $92
+#define tmp_var3 $93
+#define tmp_var4 $94
+#define tmp_var5 $95
+#define tmp_var6 $96
+#define cnt $97
+
+ .sect .text.libgcc,"ax"
+ .global __divmoddi4
+ .func _divmoddi4, __divmoddi4
+
+__divmoddi4:
+ rl w, arg2s ; Use MULH to track sign bits.
+ rl MULH
+ rl w, arg1s
+ rl WREG
+ xor MULH, w
+ rl w, arg1s
+ rl MULH
+
+ sb arg1s, 7
+ page 1f
+ jmp 1f
+
+ not arg1s
+ not arg1t
+ not arg1u
+ not arg1v
+ not arg1w
+ not arg1x
+ not arg1y
+ not arg1z
+ incsnz arg1z
+ incsz arg1y
+ page 1f
+ jmp 1f
+ incsnz arg1x
+ incsz arg1w
+ page 1f
+ jmp 1f
+ incsnz arg1v
+ incsz arg1u
+ page 1f
+ jmp 1f
+ incsnz arg1t
+ inc arg1s
+
+1: sb arg2s, 7
+ page 1f
+ jmp 1f
+
+ not arg2s
+ not arg2t
+ not arg2u
+ not arg2v
+ not arg2w
+ not arg2x
+ not arg2y
+ not arg2z
+ incsnz arg2z
+ incsz arg2y
+ page 1f
+ jmp 1f
+ incsnz arg2x
+ incsz arg2w
+ page 1f
+ jmp 1f
+ incsnz arg2v
+ incsz arg2u
+ page 1f
+ jmp 1f
+ incsnz arg2t
+ inc arg2s
+
+1: page __udivmoddi4 ; Do the unsigned div/mod.
+ call __udivmoddi4
+
+ sb MULH, 0 ; Look at the save sign bit for arg 1.
+ page 1f
+ jmp 1f
+
+ not rems
+ not remt
+ not remu
+ not remv
+ not remw
+ not remx
+ not remy
+ not remz
+ incsnz remz
+ incsz remy
+ page 1f
+ jmp 1f
+ incsnz remx
+ incsz remw
+ page 1f
+ jmp 1f
+ incsnz remv
+ incsz remu
+ page 1f
+ jmp 1f
+ incsnz remt
+ inc rems
+
+1: sb MULH, 1
+ ret
+
+ not ress
+ not rest
+ not resu
+ not resv
+ not resw
+ not resx
+ not resy
+ not resz
+ incsnz resz
+ incsz resy
+ ret
+ incsnz resx
+ incsz resw
+ ret
+ incsnz resv
+ incsz resu
+ ret
+ incsnz rest
+ inc ress
+ ret
+
+ .endfunc
+
+#undef arg1s
+#undef arg1t
+#undef arg1u
+#undef arg1v
+#undef arg1w
+#undef arg1x
+#undef arg1y
+#undef arg1z
+
+#undef arg2s
+#undef arg2t
+#undef arg2u
+#undef arg2v
+#undef arg2w
+#undef arg2x
+#undef arg2y
+#undef arg2z
+
+#undef ress
+#undef rest
+#undef resu
+#undef resv
+#undef resw
+#undef resx
+#undef resy
+#undef resz
+
+#undef rems
+#undef remt
+#undef remu
+#undef remv
+#undef remw
+#undef remx
+#undef remy
+#undef remz
+
+#undef tmp_var
+#undef tmp_var1
+#undef tmp_var2
+#undef tmp_var3
+#undef tmp_var4
+#undef tmp_var5
+#undef tmp_var6
+#undef cnt
+
+#endif /* defined (L_divmoddi4) */
+
+#if defined (L_udivmoddi4)
+#define arg1s 1(SP)
+#define arg1t 2(SP)
+#define arg1u 3(SP)
+#define arg1v 4(SP)
+#define arg1w 5(SP)
+#define arg1x 6(SP)
+#define arg1y 7(SP)
+#define arg1z 8(SP)
+
+#define arg2s 9(SP)
+#define arg2t 10(SP)
+#define arg2u 11(SP)
+#define arg2v 12(SP)
+#define arg2w 13(SP)
+#define arg2x 14(SP)
+#define arg2y 15(SP)
+#define arg2z 16(SP)
+
+#define ress $80
+#define rest $81
+#define resu $82
+#define resv $83
+#define resw $84
+#define resx $85
+#define resy $86
+#define resz $87
+
+#define rems $88
+#define remt $89
+#define remu $8a
+#define remv $8b
+#define remw $8c
+#define remx $8d
+#define remy $8e
+#define remz $8f
+
+#define tmp_var $90
+#define tmp_var1 $91
+#define tmp_var2 $92
+#define tmp_var3 $93
+#define tmp_var4 $94
+#define tmp_var5 $95
+#define tmp_var6 $96
+#define cnt $97
+
+ .sect .text.libgcc,"ax"
+ .global __udivmoddi4
+ .func _udivmoddi4, __udivmoddi4
+
+__udivmoddi4:
+ clr rems
+ clr remt
+ clr remu
+ clr remv
+ clr remw
+ clr remx
+ clr remy
+ clr remz
+ mov w, #65
+ mov cnt, w
+ clrb status, 0
+ page 1f
+ jmp 1f
+
+2: rl remz
+ rl remy
+ rl remx
+ rl remw
+ rl remv
+ rl remu
+ rl remt
+ rl rems
+ mov w, arg2z
+ sub w, remz
+ mov tmp_var, w
+ mov w, arg2y
+ subc w, remy
+ mov tmp_var1, w
+ mov w, arg2x
+ subc w, remx
+ mov tmp_var2, w
+ mov w, arg2w
+ subc w, remw
+ mov tmp_var3, w
+ mov w, arg2v
+ subc w, remv
+ mov tmp_var4, w
+ mov w, arg2u
+ subc w, remu
+ mov tmp_var5, w
+ mov w, arg2t
+ subc w, remt
+ mov tmp_var6, w
+ mov w, arg2s
+ subc w, rems
+ sc
+ page 1f
+ jmp 1f
+
+ mov rems, w
+ mov w, tmp_var6
+ mov remt, w
+ mov w, tmp_var5
+ mov remu, w
+ mov w, tmp_var4
+ mov remv, w
+ mov w, tmp_var3
+ mov remw, w
+ mov w, tmp_var2
+ mov remx, w
+ mov w, tmp_var1
+ mov remy, w
+ mov w, tmp_var
+ mov remz, w
+
+1: rl arg1z
+ rl arg1y
+ rl arg1x
+ rl arg1w
+ rl arg1v
+ rl arg1u
+ rl arg1t
+ rl arg1s
+ decsz cnt
+ page 2b
+ jmp 2b
+
+ pop ress
+ pop rest
+ pop resu
+ pop resv
+ pop resw
+ pop resx
+ pop resy
+ pop resz
+ mov w, #8
+ add spl, w
+ ret
+
+ .endfunc
+
+#undef arg1s
+#undef arg1t
+#undef arg1u
+#undef arg1v
+#undef arg1w
+#undef arg1x
+#undef arg1y
+#undef arg1z
+
+#undef arg2s
+#undef arg2t
+#undef arg2u
+#undef arg2v
+#undef arg2w
+#undef arg2x
+#undef arg2y
+#undef arg2z
+
+#undef ress
+#undef rest
+#undef resu
+#undef resv
+#undef resw
+#undef resx
+#undef resy
+#undef resz
+
+#undef rems
+#undef remt
+#undef remu
+#undef remv
+#undef remw
+#undef remx
+#undef remy
+#undef remz
+
+#undef tmp_var
+#undef tmp_var1
+#undef tmp_var2
+#undef tmp_var3
+#undef tmp_var4
+#undef tmp_var5
+#undef tmp_var6
+#undef cnt
+
+#endif /* defined (L_udivmoddi4) */
+
+#define LT #0
+#define EQ #1
+#define GT #2
+
+#if defined(L_cmphi2)
+#define arg1l 2(sp)
+#define arg1h 1(sp)
+#define arg2l 4(sp)
+#define arg2h 3(sp)
+
+ .sect .text.libgcc,"ax"
+ .global __cmphi2
+ .global __cmp_ret
+ .global __cmpqi_ret
+ .func _cmphi2, __cmphi2
+
+__cmphi2:
+ mov w,arg1l
+ sub w,arg2l
+ snz
+ page 2f
+ jmp 2f
+ mov w,arg1h
+1:
+ subc w,arg2h
+ clr arg2l
+ rl arg2l
+ snb arg1h,7
+ setb arg2l,2
+ snb arg2h,7
+ setb arg2l,1
+ mov w,#3
+
+__cmp_ret:
+ add spl,w ; sign1
+ pop wreg ; sign2
+
+__cmpqi_ret:
+ add pcl,w ; carry of arg1 - arg2
+ retw GT ; [000] arg1 > arg2
+ retw LT ; [001] arg1 < arg2
+ retw GT ; [010] arg1 > arg2
+ retw GT ; [011] arg1 > arg2
+ retw LT ; [100] arg1 < arg2
+ retw LT ; [101] arg1 < arg2
+ retw GT ; [110] arg1 > arg2
+ retw LT ; [111] arg1 < arg2
+2:
+ mov w,arg1h
+ cse w,arg2h
+ page 1b
+ jmp 1b
+ mov w,#4
+ add spl,w
+ retw EQ
+
+ .endfunc
+#undef arg1l
+#undef arg1h
+#undef arg2l
+#undef arg2h
+#endif /* L_cmphi2 */
+
+#if defined(L_cmpqi2)
+#define arg1 1(sp)
+#define arg2 2(sp)
+
+ .sect .text.libgcc,"ax"
+ .global __cmpqi2
+ .func _cmpqi2, __cmpqi2
+
+__cmpqi2:
+ mov w, arg1
+ sub w, arg2
+ snz
+ page 2f
+ jmp 2f
+
+ clr wreg
+ rl wreg
+ snb arg1, 7
+ setb wreg, 2
+ snb arg2, 7
+ setb wreg, 1
+ inc spl
+ inc spl
+ page __cmpqi_ret
+ jmp __cmpqi_ret
+
+2: mov w, #2
+ add spl, w
+ retw EQ
+
+ .endfunc
+#undef arg1l
+#undef arg2l
+#endif /* L_cmpqi2 */
+
+#if defined(L_cmpsi2)
+#define arg1d 4(sp)
+#define arg1c 3(sp)
+#define arg1b 2(sp)
+#define arg1a 1(sp)
+#define arg2d 8(sp)
+#define arg2c 7(sp)
+#define arg2b 6(sp)
+#define arg2a 5(sp)
+
+ .sect .text.libgcc,"ax"
+ .global __cmpsi2
+ .func _cmpsi2, __cmpsi2
+
+__cmpsi2:
+ mov w, arg1d
+ sub w, arg2d
+ snz
+ page 2f
+ jmp 2f
+
+1: mov w, arg1c
+ subc w, arg2c
+ mov w, arg1b
+ subc w, arg2b
+ mov w, arg1a
+ subc w, arg2a
+
+ clr arg2d
+ rl arg2d
+ snb arg1a, 7
+ setb arg2d, 2
+ snb arg2a, 7
+ setb arg2d, 1
+ mov w, #7
+ page __cmp_ret
+ jmp __cmp_ret
+
+2: mov w, arg1c
+ cse w, arg2c
+ page 1b
+ jmp 1b
+
+ mov w, arg1b
+ cse w, arg2b
+ page 1b
+ jmp 1b
+
+ mov w, arg1a
+ cse w, arg2a
+ page 1b
+ jmp 1b
+
+ mov w, #8
+ add spl, w
+ retw EQ
+
+ .endfunc
+
+#undef arg1d
+#undef arg1c
+#undef arg1b
+#undef arg1a
+#undef arg2d
+#undef arg2c
+#undef arg2b
+#undef arg2a
+#endif /* L_cmpsi2 */
+
+#if defined(L_cmpdi2)
+#define arg1z 8(sp)
+#define arg1y 7(sp)
+#define arg1x 6(sp)
+#define arg1w 5(sp)
+#define arg1v 4(sp)
+#define arg1u 3(sp)
+#define arg1t 2(sp)
+#define arg1s 1(sp)
+
+#define arg2z 16(sp)
+#define arg2y 15(sp)
+#define arg2x 14(sp)
+#define arg2w 13(sp)
+#define arg2v 12(sp)
+#define arg2u 11(sp)
+#define arg2t 10(sp)
+#define arg2s 9(sp)
+
+ .sect .text.libgcc,"ax"
+ .global __cmpdi2
+ .func _cmpdi2, __cmpdi2
+
+__cmpdi2:
+ mov w, arg1z
+ sub w, arg2z
+ snz
+ page 2f
+ jmp 2f
+
+1: mov w, arg1y
+ subc w, arg2y
+ mov w, arg1x
+ subc w, arg2x
+ mov w, arg1w
+ subc w, arg2w
+ mov w, arg1v
+ subc w, arg2v
+ mov w, arg1u
+ subc w, arg2u
+ mov w, arg1t
+ subc w, arg2t
+ mov w, arg1s
+ subc w, arg2s
+ clr arg2z
+ rl arg2z
+ snb arg1s, 7
+ setb arg2z, 2
+ snb arg2s, 7
+ setb arg2z, 1
+ mov w, #15
+ page __cmp_ret
+ jmp __cmp_ret
+
+2: mov w, arg1y
+ cse w, arg2y
+ page 1b
+ jmp 1b
+
+ mov w, arg1x
+ cse w, arg2x
+ page 1b
+ jmp 1b
+
+ mov w, arg1w
+ cse w, arg2w
+ page 1b
+ jmp 1b
+
+ mov w, arg1v
+ cse w, arg2v
+ page 1b
+ jmp 1b
+
+ mov w, arg1u
+ cse w, arg2u
+ page 1b
+ jmp 1b
+
+ mov w, arg1t
+ cse w, arg2t
+ page 1b
+ jmp 1b
+
+ mov w, arg1s
+ cse w, arg2s
+ page 1b
+ jmp 1b
+
+ mov w, #16
+ add spl, w
+ retw EQ
+
+ .endfunc
+
+#undef arg1z
+#undef arg1y
+#undef arg1x
+#undef arg1w
+#undef arg1v
+#undef arg1u
+#undef arg1t
+#undef arg1s
+#undef arg2z
+#undef arg2y
+#undef arg2x
+#undef arg2w
+#undef arg2v
+#undef arg2u
+#undef arg2t
+#undef arg2s
+
+#endif /* L_cmpdi2 */
+
+#if defined(L_cmpdi2_dp)
+ .sect .text.libgcc,"ax"
+ .global __cmpdi2_dp
+ .func _cmpdi2_dp, __cmpdi2_dp
+
+__cmpdi2_dp:
+ push 7(dp)
+ push 6(dp)
+ push 5(dp)
+ push 4(dp)
+ push 3(dp)
+ push 2(dp)
+ push 1(dp)
+ push (dp)
+ page __cmpdi2
+ jmp __cmpdi2
+
+ .endfunc
+#endif /* L_cmpdi2_dp */
+
+#if defined(L_fp_pop_args_ret)
+ .sect .pram.libgcc,"ax"
+ .global __fp_pop_args_ret
+ .global __pop_args_ret
+ .func __fp_pop_args_ret, __fp_pop_args_ret
+
+__fp_pop_args_ret:
+ pop 0xfd
+ pop 0xfe
+__pop_args_ret:
+ pop callh
+ pop calll
+ add spl, w
+ ret
+
+ .endfunc
+#endif /* L_fp_pop_args_ret */
+
+#if defined(L_leaf_fp_pop_args_ret)
+ .sect .pram.libgcc,"ax"
+ .global __leaf_fp_pop_args_ret
+ .func __leaf_fp_pop_args_ret, __leaf_fp_pop_args_ret
+
+__leaf_fp_pop_args_ret:
+ pop 0xfd
+ pop 0xfe
+ add spl, w
+ ret
+
+ .endfunc
+#endif /* L_leaf_fp_pop_args_ret */
+
+#if defined(L_fp_pop2_args_ret)
+ .sect .pram.libgcc,"ax"
+ .global __fp_pop2_args_ret
+ .global __pop2_args_ret
+ .func __fp_pop2_args_ret, __fp_pop2_args_ret
+
+__fp_pop2_args_ret:
+ pop 0xfd
+ pop 0xfe
+__pop2_args_ret:
+ mov w, #2
+ pop callh
+ pop calll
+ add spl, w
+ ret
+
+ .endfunc
+#endif /* L_fp_pop2_args_ret */
+
+#if defined(L_leaf_fp_pop2_args_ret)
+ .sect .pram.libgcc,"ax"
+ .global __leaf_fp_pop2_args_ret
+ .func __leaf_fp_pop2_args_ret, __leaf_fp_pop2_args_ret
+
+__leaf_fp_pop2_args_ret:
+ pop 0xfd
+ pop 0xfe
+ mov w, #2
+ add spl, w
+ ret
+
+ .endfunc
+#endif /* L_leaf_fp_pop2_args_ret */
+
+#if defined(L_movstrhi_countqi)
+ .sect .pram.libgcc,"ax"
+ .global __movstrhi_countqi
+ .func _movstrhi_countqi, __movstrhi_countqi
+
+__movstrhi_countqi:
+ push dph ; Save our pointer regs
+ push dpl
+ push iph
+ push ipl
+
+ mov w, 5(SP) ; Get our dest pointer
+ mov dph, w
+ mov w, 6(SP)
+ mov dpl, w
+ mov w, 7(SP) ; And our source pointer
+ mov iph, w
+ mov w, 8(SP)
+ mov ipl, w
+
+1: push (IP) ; *dest++ = *src++
+ pop 0(DP)
+ inc ipl
+ inc dpl
+ decsz 9(SP) ; Loop until completed
+ page 1b
+ jmp 1b
+
+ pop ipl ; Restore our pointer regs
+ pop iph
+ pop dpl
+ pop dph
+
+ mov w, #5 ; Tidy up our stack args
+ add spl, w
+ ret
+
+ .endfunc
+#endif
+
+#if defined(L_movstrhi_counthi)
+ .sect .text.libgcc,"ax"
+ .global __movstrhi_counthi
+ .func _movstrhi_counthi, __movstrhi_counthi
+
+__movstrhi_counthi:
+ push dph ; Save our pointer regs
+ push dpl
+ push iph
+ push ipl
+
+ mov w, 5(SP) ; Get our dest pointer
+ mov dph, w
+ mov w, 6(SP)
+ mov dpl, w
+ mov w, 7(SP) ; And our source pointer
+ mov iph, w
+ mov w, 8(SP)
+ mov ipl, w
+
+ test 10(SP) ; If we have a non-zero LSB then adjust the
+ sz ; MSB of the loop count to allow us to use
+ inc 9(SP) ; skip tricks!
+
+1: push (IP) ; *dest++ = *src++
+ pop 0(DP)
+ inc ipl
+ inc dpl
+ decsnz 10(SP) ; Loop until completed - note the skip trick
+ decsz 9(SP) ; on the MSB!
+ page 1b
+ jmp 1b
+
+ pop ipl ; Restore our pointer regs
+ pop iph
+ pop dpl
+ pop dph
+
+ mov w, #6 ; Tidy up our stacked args.
+ add spl, w
+ ret
+
+ .endfunc
+#endif
+
+#if defined(L_exit)
+ .sect .text.libgcc,"ax"
+ .global __exit
+ .global _exit
+ .func _exit, __exit
+ .weak __exit
+ .weak _exit
+
+_exit:
+__exit:
+ pop $88
+ pop wreg
+ or w, $88
+ push wreg
+ push #0
+ push #1
+ system ; Exit wreg
+ page __exit ; Never return
+ jmp __exit
+
+ .endfunc
+#endif
+
+#if defined(Labort)
+ .sect .text.libgcc,"ax"
+ .global _abort
+ .func abort, _abort
+
+_abort:
+ push #1
+ push #0
+ push #1
+ system ; Exit 1
+ ret
+
+ .endfunc
+#endif
+
+#if defined(Lwrite)
+ /* Dummy entrypoint to suppress problems with glue code. */
+ .sect .text.libgcc,"ax"
+ .global _write
+ .func write, _write
+;;
+;; write (fil,buf,len) - say that write succeeds....
+;;
+_write:
+ movb $80, 5(SP)
+ movb $81, 6(SP) ; Return length written
+ mov w, #6
+ add spl, w
+ ret
+
+ .endfunc
+#endif
+
--- /dev/null
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = ip2k/libgcc.S
+
+LIB1ASMFUNCS = \
+ _indcall \
+ _mulhi3 \
+ _mulsi3 \
+ _muldi3 \
+ _udivmodhi4 \
+ _divmodhi4 \
+ _udivmodsi4 \
+ _divmodsi4 \
+ _udivmoddi4 \
+ _divmoddi4 \
+ _cmpqi2 \
+ _cmphi2 \
+ _cmpsi2 \
+ _cmpdi2 \
+ _cmpdi2_dp \
+ _fp_pop_args_ret \
+ _leaf_fp_pop_args_ret \
+ _fp_pop2_args_ret \
+ _leaf_fp_pop2_args_ret \
+ _movstrhi_countqi \
+ _movstrhi_counthi \
+ abort \
+ _exit
+
+# libgcc...
+LIBGCC1_TEST =
+
+# libgcc2.h thinks that nobody would have SI mode when
+# MIN_UNITS_PER_WORD == 1, so lie to keep from major compiler errors.
+
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -DDF=SF -g -DMIN_UNITS_PER_WORD=2
+
+fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/ip2k/t-ip2k crt0.o
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define FLOAT_ONLY' >> fp-bit.c
+ echo '#define DF SF' >> fp-bit.c
+ echo '#define DI SI' >> fp-bit.c
+ echo '#define CMPtype QItype' >> fp-bit.c
+ echo '#define SMALL_MACHINE' >> fp-bit.c
+ echo 'typedef int QItype __attribute__ ((mode (QI)));' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+
+FPBIT = fp-bit.c
+
+# Our crt0 is written in assembler and we don't really support profiling.
+
+CRT0_S = $(srcdir)/config/ip2k/crt0.S
+MCRT0_S = $(CRT0_S)
+
@item naked
@cindex function without a prologue/epilogue code
-Use this attribute on the ARM or AVR ports to indicate that the specified
-function do not need prologue/epilogue sequences generated by the
-compiler. It is up to the programmer to provide these sequences.
+Use this attribute on the ARM, AVR and IP2K ports to indicate that the
+specified function do not need prologue/epilogue sequences generated by
+the compiler. It is up to the programmer to provide these sequences.
@item model (@var{model-name})
@cindex function addressability on the M32R/D
-@c Copyright (C) 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+@c Copyright (C) 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
@c This is part of the GCC manual.
@c For copying conditions, see the file install.texi.
@quotation
@c gmicro, fx80, spur and tahoe omitted since they don't work.
1750a, a29k, alpha, arm, avr, c@var{n}, clipper, dsp16xx, elxsi, fr30, h8300,
-hppa1.0, hppa1.1, i370, i386, i486, i586, i686, i786, i860, i960, m32r,
+hppa1.0, hppa1.1, i370, i386, i486, i586, i686, i786, i860, i960, ip2k, m32r,
m68000, m68k, m6811, m6812, m88k, mcore, mips, mipsel, mips64, mips64el,
mn10200, mn10300, ns32k, pdp11, powerpc, powerpcle, romp, rs6000, sh, sparc,
sparclite, sparc64, v850, vax, we32k.
@item
@uref{#*-ibm-aix*,,*-ibm-aix*}
@item
+@uref{#ip2k-*-elf,,ip2k-*-elf}
+@item
@uref{#m32r-*-elf,,m32r-*-elf}
@item
@uref{#m68000-hp-bsd,,m68000-hp-bsd}
</p>
<hr>
@end html
+@heading @anchor{ip2k-*-elf}ip2k-*-elf
+Ubicom IP2022 micro controller.
+This configuration is intended for embedded systems.
+There are no standard Unix configurations.
+
+Use @samp{configure --target=ip2k-elf --enable-languages=c} to configure GCC@.
+
+@html
+</p>
+<hr>
+@end html
@heading @anchor{m32r-*-elf}m32r-*-elf
Mitsubishi M32R processor.
This configuration is intended for embedded systems.
(@samp{m} is preferable for @code{asm} statements)
@end table
+@item IP2K---@file{ip2k.h}
+@table @code
+@item a
+@samp{DP} or @samp{IP} registers (general address)
+
+@item f
+@samp{IP} register
+
+@item j
+@samp{IPL} register
+
+@item k
+@samp{IPH} register
+
+@item b
+@samp{DP} register
+
+@item y
+@samp{DPH} register
+
+@item z
+@samp{DPL} register
+
+@item q
+@samp{SP} register
+
+@item c
+@samp{DP} or @samp{SP} registers (offsettable address)
+
+@item d
+Non-pointer registers (not @samp{SP}, @samp{DP}, @samp{IP})
+
+@item u
+Non-SP registers (everything except @samp{SP})
+
+@item R
+Indirect thru @samp{IP} - Avoid this except for @code{QImode}, since we
+can't access extra bytes
+
+@item S
+Indirect thru @samp{SP} or @samp{DP} with short displacement (0..127)
+
+@item T
+Data-section immediate value
+
+@item I
+Integers from @minus{}255 to @minus{}1
+
+@item J
+Integers from 0 to 7---valid bit number in a register
+
+@item K
+Integers from 0 to 127---valid displacement for addressing mode
+
+@item L
+Integers from 1 to 127
+
+@item M
+Integer @minus{}1
+
+@item N
+Integer 1
+
+@item O
+Zero
+
+@item P
+Integers from 0 to 255
+@end table
+
@item Motorola 680x0---@file{m68k.h}
@table @code
@item a