1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 2, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
24 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
47 #include "target-def.h"
49 /* Maximum size we are allowed to grow the stack in a single operation.
50 If we want more, we must do it in increments of at most this size.
51 If this value is 0, we don't check at all. */
52 const char * mcore_stack_increment_string = 0;
53 int mcore_stack_increment = STACK_UNITS_MAXSTEP;
55 /* For dumping information about frame sizes. */
56 char * mcore_current_function_name = 0;
57 long mcore_current_compilation_timestamp = 0;
59 /* Global variables for machine-dependent things. */
61 /* Saved operands from the last compare to use when we generate an scc
66 /* Provides the class number of the smallest class containing
68 const int regno_reg_class[FIRST_PSEUDO_REGISTER] =
70 GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
71 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
72 LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
73 LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
74 GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
77 /* Provide reg_class from a letter such as appears in the machine
79 const enum reg_class reg_class_from_letter[] =
81 /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
82 /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
83 /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
84 /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
85 /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
86 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
87 /* y */ NO_REGS, /* z */ NO_REGS
92 int arg_size; /* Stdarg spills (bytes). */
93 int reg_size; /* Non-volatile reg saves (bytes). */
94 int reg_mask; /* Non-volatile reg saves. */
95 int local_size; /* Locals. */
96 int outbound_size; /* Arg overflow on calls out. */
100 /* Describe the steps we'll use to grow it. */
101 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
102 int growth[MAX_STACK_GROWS];
120 static void output_stack_adjust (int, int);
121 static int calc_live_regs (int *);
122 static int const_ok_for_mcore (int);
123 static int try_constant_tricks (long, int *, int *);
124 static const char * output_inline_const (enum machine_mode, rtx *);
125 static void block_move_sequence (rtx, rtx, rtx, rtx, int, int, int);
126 static void layout_mcore_frame (struct mcore_frame *);
127 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
128 static cond_type is_cond_candidate (rtx);
129 static rtx emit_new_cond_insn (rtx, int);
130 static rtx conditionalize_block (rtx);
131 static void conditionalize_optimization (void);
132 static void mcore_reorg (void);
133 static rtx handle_structs_in_regs (enum machine_mode, tree, int);
134 static void mcore_mark_dllexport (tree);
135 static void mcore_mark_dllimport (tree);
136 static int mcore_dllexport_p (tree);
137 static int mcore_dllimport_p (tree);
138 const struct attribute_spec mcore_attribute_table[];
139 static tree mcore_handle_naked_attribute (tree *, tree, tree, int, bool *);
140 #ifdef OBJECT_FORMAT_ELF
141 static void mcore_asm_named_section (const char *,
144 static void mcore_unique_section (tree, int);
145 static void mcore_encode_section_info (tree, rtx, int);
146 static const char *mcore_strip_name_encoding (const char *);
147 static int mcore_const_costs (rtx, RTX_CODE);
148 static int mcore_and_cost (rtx);
149 static int mcore_ior_cost (rtx);
150 static bool mcore_rtx_costs (rtx, int, int, int *);
151 static void mcore_external_libcall (rtx);
152 static bool mcore_return_in_memory (tree, tree);
155 /* Initialize the GCC target structure. */
156 #undef TARGET_ASM_EXTERNAL_LIBCALL
157 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
159 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
160 #undef TARGET_MERGE_DECL_ATTRIBUTES
161 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
164 #ifdef OBJECT_FORMAT_ELF
165 #undef TARGET_ASM_UNALIGNED_HI_OP
166 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
167 #undef TARGET_ASM_UNALIGNED_SI_OP
168 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
171 #undef TARGET_ATTRIBUTE_TABLE
172 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
173 #undef TARGET_ASM_UNIQUE_SECTION
174 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
175 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
176 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
177 #undef TARGET_ENCODE_SECTION_INFO
178 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
179 #undef TARGET_STRIP_NAME_ENCODING
180 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
181 #undef TARGET_RTX_COSTS
182 #define TARGET_RTX_COSTS mcore_rtx_costs
183 #undef TARGET_ADDRESS_COST
184 #define TARGET_ADDRESS_COST hook_int_rtx_0
185 #undef TARGET_MACHINE_DEPENDENT_REORG
186 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
188 #undef TARGET_PROMOTE_FUNCTION_ARGS
189 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
190 #undef TARGET_PROMOTE_FUNCTION_RETURN
191 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
192 #undef TARGET_PROMOTE_PROTOTYPES
193 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
195 #undef TARGET_RETURN_IN_MEMORY
196 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
197 #undef TARGET_MUST_PASS_IN_STACK
198 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
199 #undef TARGET_PASS_BY_REFERENCE
200 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
202 #undef TARGET_SETUP_INCOMING_VARARGS
203 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
205 struct gcc_target targetm = TARGET_INITIALIZER;
207 /* Adjust the stack and return the number of bytes taken to do it. */
209 output_stack_adjust (int direction, int size)
211 /* If extending stack a lot, we do it incrementally. */
212 if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
214 rtx tmp = gen_rtx_REG (SImode, 1);
217 emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
220 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
221 memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
222 MEM_VOLATILE_P (memref) = 1;
223 emit_insn (gen_movsi (memref, stack_pointer_rtx));
224 size -= mcore_stack_increment;
226 while (size > mcore_stack_increment);
228 /* SIZE is now the residual for the last adjustment,
229 which doesn't require a probe. */
235 rtx val = GEN_INT (size);
239 rtx nval = gen_rtx_REG (SImode, 1);
240 emit_insn (gen_movsi (nval, val));
245 insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
247 insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
253 /* Work out the registers which need to be saved,
254 both as a mask and a count. */
257 calc_live_regs (int * count)
260 int live_regs_mask = 0;
264 for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
266 if (regs_ever_live[reg] && !call_used_regs[reg])
269 live_regs_mask |= (1 << reg);
273 return live_regs_mask;
276 /* Print the operand address in x to the stream. */
279 mcore_print_operand_address (FILE * stream, rtx x)
281 switch (GET_CODE (x))
284 fprintf (stream, "(%s)", reg_names[REGNO (x)]);
289 rtx base = XEXP (x, 0);
290 rtx index = XEXP (x, 1);
292 if (GET_CODE (base) != REG)
294 /* Ensure that BASE is a register (one of them must be). */
300 switch (GET_CODE (index))
303 fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
304 reg_names[REGNO(base)], INTVAL (index));
317 output_addr_const (stream, x);
322 /* Print operand x (an rtx) in assembler syntax to file stream
323 according to modifier code.
325 'R' print the next register or memory location along, ie the lsw in
327 'O' print a constant without the #
328 'M' print a constant as its negative
329 'P' print log2 of a power of two
330 'Q' print log2 of an inverse of a power of two
331 'U' print register for ldm/stm instruction
332 'X' print byte number for xtrbN instruction. */
335 mcore_print_operand (FILE * stream, rtx x, int code)
341 fprintf (asm_out_file, "32");
343 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
346 fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
349 fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
352 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
355 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
358 /* Next location along in memory or register. */
359 switch (GET_CODE (x))
362 fputs (reg_names[REGNO (x) + 1], (stream));
365 mcore_print_operand_address
366 (stream, XEXP (adjust_address (x, SImode, 4), 0));
373 fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
374 reg_names[REGNO (x) + 3]);
377 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
380 fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
384 switch (GET_CODE (x))
387 fputs (reg_names[REGNO (x)], (stream));
390 output_address (XEXP (x, 0));
393 output_addr_const (stream, x);
400 /* What does a constant cost ? */
403 mcore_const_costs (rtx exp, enum rtx_code code)
405 int val = INTVAL (exp);
407 /* Easy constants. */
408 if ( CONST_OK_FOR_I (val)
409 || CONST_OK_FOR_M (val)
410 || CONST_OK_FOR_N (val)
411 || (code == PLUS && CONST_OK_FOR_L (val)))
414 && ( CONST_OK_FOR_M (~val)
415 || CONST_OK_FOR_N (~val)))
417 else if (code == PLUS
418 && ( CONST_OK_FOR_I (-val)
419 || CONST_OK_FOR_M (-val)
420 || CONST_OK_FOR_N (-val)))
426 /* What does an and instruction cost - we do this b/c immediates may
427 have been relaxed. We want to ensure that cse will cse relaxed immeds
428 out. Otherwise we'll get bad code (multiple reloads of the same const). */
431 mcore_and_cost (rtx x)
435 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
438 val = INTVAL (XEXP (x, 1));
440 /* Do it directly. */
441 if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
443 /* Takes one instruction to load. */
444 else if (const_ok_for_mcore (val))
446 /* Takes two instructions to load. */
447 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
450 /* Takes a lrw to load. */
454 /* What does an or cost - see and_cost(). */
457 mcore_ior_cost (rtx x)
461 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
464 val = INTVAL (XEXP (x, 1));
466 /* Do it directly with bclri. */
467 if (CONST_OK_FOR_M (val))
469 /* Takes one instruction to load. */
470 else if (const_ok_for_mcore (val))
472 /* Takes two instructions to load. */
473 else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
476 /* Takes a lrw to load. */
481 mcore_rtx_costs (rtx x, int code, int outer_code, int * total)
486 *total = mcore_const_costs (x, outer_code);
498 *total = COSTS_N_INSNS (mcore_and_cost (x));
502 *total = COSTS_N_INSNS (mcore_ior_cost (x));
511 *total = COSTS_N_INSNS (100);
519 /* Check to see if a comparison against a constant can be made more efficient
520 by incrementing/decrementing the constant to get one that is more efficient
524 mcore_modify_comparison (enum rtx_code code)
526 rtx op1 = arch_compare_op1;
528 if (GET_CODE (op1) == CONST_INT)
530 int val = INTVAL (op1);
535 if (CONST_OK_FOR_J (val + 1))
537 arch_compare_op1 = GEN_INT (val + 1);
550 /* Prepare the operands for a comparison. */
553 mcore_gen_compare_reg (enum rtx_code code)
555 rtx op0 = arch_compare_op0;
556 rtx op1 = arch_compare_op1;
557 rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
559 if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
560 op1 = force_reg (SImode, op1);
562 /* cmpnei: 0-31 (K immediate)
563 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
566 case EQ: /* Use inverted condition, cmpne. */
570 case NE: /* Use normal condition, cmpne. */
571 if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
572 op1 = force_reg (SImode, op1);
575 case LE: /* Use inverted condition, reversed cmplt. */
579 case GT: /* Use normal condition, reversed cmplt. */
580 if (GET_CODE (op1) == CONST_INT)
581 op1 = force_reg (SImode, op1);
584 case GE: /* Use inverted condition, cmplt. */
588 case LT: /* Use normal condition, cmplt. */
589 if (GET_CODE (op1) == CONST_INT &&
590 /* covered by btsti x,31. */
592 ! CONST_OK_FOR_J (INTVAL (op1)))
593 op1 = force_reg (SImode, op1);
596 case GTU: /* Use inverted condition, cmple. */
597 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
599 /* Unsigned > 0 is the same as != 0, but we need
600 to invert the condition, so we want to set
601 code = EQ. This cannot be done however, as the
602 mcore does not support such a test. Instead we
603 cope with this case in the "bgtu" pattern itself
604 so we should never reach this point. */
612 case LEU: /* Use normal condition, reversed cmphs. */
613 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
614 op1 = force_reg (SImode, op1);
617 case LTU: /* Use inverted condition, cmphs. */
621 case GEU: /* Use normal condition, cmphs. */
622 if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
623 op1 = force_reg (SImode, op1);
630 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_fmt_ee (code, CCmode, op0, op1)));
636 mcore_symbolic_address_p (rtx x)
638 switch (GET_CODE (x))
645 return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
646 || GET_CODE (XEXP (x, 0)) == LABEL_REF)
647 && GET_CODE (XEXP (x, 1)) == CONST_INT);
654 mcore_call_address_operand (rtx x, enum machine_mode mode)
656 return register_operand (x, mode) || CONSTANT_P (x);
659 /* Functions to output assembly code for a function call. */
662 mcore_output_call (rtx operands[], int index)
664 static char buffer[20];
665 rtx addr = operands [index];
671 if (mcore_current_function_name == 0)
674 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
678 sprintf (buffer, "jsr\t%%%d", index);
684 if (mcore_current_function_name == 0)
687 if (GET_CODE (addr) != SYMBOL_REF)
690 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
693 sprintf (buffer, "jbsr\t%%%d", index);
699 /* Can we load a constant with a single instruction ? */
702 const_ok_for_mcore (int value)
704 if (value >= 0 && value <= 127)
707 /* Try exact power of two. */
708 if ((value & (value - 1)) == 0)
711 /* Try exact power of two - 1. */
712 if ((value & (value + 1)) == 0)
718 /* Can we load a constant inline with up to 2 instructions ? */
721 mcore_const_ok_for_inline (long value)
725 return try_constant_tricks (value, & x, & y) > 0;
728 /* Are we loading the constant using a not ? */
731 mcore_const_trick_uses_not (long value)
735 return try_constant_tricks (value, & x, & y) == 2;
738 /* Try tricks to load a constant inline and return the trick number if
739 success (0 is non-inlinable).
742 1: single instruction (do the usual thing)
743 2: single insn followed by a 'not'
744 3: single insn followed by a subi
745 4: single insn followed by an addi
746 5: single insn followed by rsubi
747 6: single insn followed by bseti
748 7: single insn followed by bclri
749 8: single insn followed by rotli
750 9: single insn followed by lsli
751 10: single insn followed by ixh
752 11: single insn followed by ixw. */
755 try_constant_tricks (long value, int * x, int * y)
758 unsigned bit, shf, rot;
760 if (const_ok_for_mcore (value))
761 return 1; /* Do the usual thing. */
765 if (const_ok_for_mcore (~value))
771 for (i = 1; i <= 32; i++)
773 if (const_ok_for_mcore (value - i))
781 if (const_ok_for_mcore (value + i))
792 for (i = 0; i <= 31; i++)
794 if (const_ok_for_mcore (i - value))
802 if (const_ok_for_mcore (value & ~bit))
810 if (const_ok_for_mcore (value | bit))
824 for (i = 1; i < 31; i++)
828 /* MCore has rotate left. */
832 rot |= c; /* Simulate rotate. */
834 if (const_ok_for_mcore (rot))
843 shf = 0; /* Can't use logical shift, low order bit is one. */
847 if (shf != 0 && const_ok_for_mcore (shf))
856 if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
863 if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
874 /* Check whether reg is dead at first. This is done by searching ahead
875 for either the next use (i.e., reg is live), a death note, or a set of
876 reg. Don't just use dead_or_set_p() since reload does not always mark
877 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
878 can ignore subregs by extracting the actual register. BRC */
881 mcore_is_dead (rtx first, rtx reg)
885 /* For mcore, subregs can't live independently of their parent regs. */
886 if (GET_CODE (reg) == SUBREG)
887 reg = SUBREG_REG (reg);
889 /* Dies immediately. */
890 if (dead_or_set_p (first, reg))
893 /* Look for conclusive evidence of live/death, otherwise we have
894 to assume that it is live. */
895 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
897 if (GET_CODE (insn) == JUMP_INSN)
898 return 0; /* We lose track, assume it is alive. */
900 else if (GET_CODE(insn) == CALL_INSN)
902 /* Call's might use it for target or register parms. */
903 if (reg_referenced_p (reg, PATTERN (insn))
904 || find_reg_fusage (insn, USE, reg))
906 else if (dead_or_set_p (insn, reg))
909 else if (GET_CODE (insn) == INSN)
911 if (reg_referenced_p (reg, PATTERN (insn)))
913 else if (dead_or_set_p (insn, reg))
918 /* No conclusive evidence either way, we cannot take the chance
919 that control flow hid the use from us -- "I'm not dead yet". */
923 /* Count the number of ones in mask. */
926 mcore_num_ones (int mask)
928 /* A trick to count set bits recently posted on comp.compilers. */
929 mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
930 mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
931 mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
932 mask = ((mask >> 8) + mask);
934 return (mask + (mask >> 16)) & 0xff;
937 /* Count the number of zeros in mask. */
940 mcore_num_zeros (int mask)
942 return 32 - mcore_num_ones (mask);
945 /* Determine byte being masked. */
948 mcore_byte_offset (unsigned int mask)
950 if (mask == 0x00ffffffL)
952 else if (mask == 0xff00ffffL)
954 else if (mask == 0xffff00ffL)
956 else if (mask == 0xffffff00L)
962 /* Determine halfword being masked. */
965 mcore_halfword_offset (unsigned int mask)
967 if (mask == 0x0000ffffL)
969 else if (mask == 0xffff0000L)
975 /* Output a series of bseti's corresponding to mask. */
978 mcore_output_bseti (rtx dst, int mask)
983 out_operands[0] = dst;
985 for (bit = 0; bit < 32; bit++)
987 if ((mask & 0x1) == 0x1)
989 out_operands[1] = GEN_INT (bit);
991 output_asm_insn ("bseti\t%0,%1", out_operands);
999 /* Output a series of bclri's corresponding to mask. */
1002 mcore_output_bclri (rtx dst, int mask)
1004 rtx out_operands[2];
1007 out_operands[0] = dst;
1009 for (bit = 0; bit < 32; bit++)
1011 if ((mask & 0x1) == 0x0)
1013 out_operands[1] = GEN_INT (bit);
1015 output_asm_insn ("bclri\t%0,%1", out_operands);
1024 /* Output a conditional move of two constants that are +/- 1 within each
1025 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1026 really worth the effort. */
1029 mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1033 rtx out_operands[4];
1035 out_operands[0] = operands[0];
1037 /* Check to see which constant is loadable. */
1038 if (const_ok_for_mcore (INTVAL (operands[1])))
1040 out_operands[1] = operands[1];
1041 out_operands[2] = operands[2];
1043 else if (const_ok_for_mcore (INTVAL (operands[2])))
1045 out_operands[1] = operands[2];
1046 out_operands[2] = operands[1];
1048 /* Complement test since constants are swapped. */
1049 cmp_t = (cmp_t == 0);
1051 load_value = INTVAL (out_operands[1]);
1052 adjust_value = INTVAL (out_operands[2]);
1054 /* First output the test if folded into the pattern. */
1057 output_asm_insn (test, operands);
1059 /* Load the constant - for now, only support constants that can be
1060 generated with a single instruction. maybe add general inlinable
1061 constants later (this will increase the # of patterns since the
1062 instruction sequence has a different length attribute). */
1063 if (load_value >= 0 && load_value <= 127)
1064 output_asm_insn ("movi\t%0,%1", out_operands);
1065 else if ((load_value & (load_value - 1)) == 0)
1066 output_asm_insn ("bgeni\t%0,%P1", out_operands);
1067 else if ((load_value & (load_value + 1)) == 0)
1068 output_asm_insn ("bmaski\t%0,%N1", out_operands);
1070 /* Output the constant adjustment. */
1071 if (load_value > adjust_value)
1074 output_asm_insn ("decf\t%0", out_operands);
1076 output_asm_insn ("dect\t%0", out_operands);
1081 output_asm_insn ("incf\t%0", out_operands);
1083 output_asm_insn ("inct\t%0", out_operands);
1089 /* Outputs the peephole for moving a constant that gets not'ed followed
1090 by an and (i.e. combine the not and the and into andn). BRC */
1093 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1096 rtx out_operands[3];
1097 const char * load_op;
1100 if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
1103 out_operands[0] = operands[0];
1104 out_operands[1] = GEN_INT(x);
1105 out_operands[2] = operands[2];
1107 if (x >= 0 && x <= 127)
1108 load_op = "movi\t%0,%1";
1110 /* Try exact power of two. */
1111 else if ((x & (x - 1)) == 0)
1112 load_op = "bgeni\t%0,%P1";
1114 /* Try exact power of two - 1. */
1115 else if ((x & (x + 1)) == 0)
1116 load_op = "bmaski\t%0,%N1";
1119 load_op = "BADMOVI\t%0,%1";
1121 sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1122 output_asm_insn (buf, out_operands);
1127 /* Output an inline constant. */
1130 output_inline_const (enum machine_mode mode, rtx operands[])
1134 rtx out_operands[3];
1137 const char *dst_fmt;
1140 value = INTVAL (operands[1]);
1142 if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
1144 /* lrw's are handled separately: Large inlinable constants
1145 never get turned into lrw's. Our caller uses try_constant_tricks
1146 to back off to an lrw rather than calling this routine. */
1153 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1154 out_operands[0] = operands[0];
1155 out_operands[1] = GEN_INT (x);
1158 out_operands[2] = GEN_INT (y);
1160 /* Select dst format based on mode. */
1161 if (mode == DImode && (! TARGET_LITTLE_END))
1166 if (x >= 0 && x <= 127)
1167 sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1169 /* Try exact power of two. */
1170 else if ((x & (x - 1)) == 0)
1171 sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1173 /* Try exact power of two - 1. */
1174 else if ((x & (x + 1)) == 0)
1175 sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1178 sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
1183 strcpy (buf, load_op);
1186 sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
1189 sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1192 sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1195 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1196 sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1199 sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
1202 sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
1205 sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1208 sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
1211 sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1214 sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
1220 output_asm_insn (buf, out_operands);
1225 /* Output a move of a word or less value. */
1228 mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1229 enum machine_mode mode ATTRIBUTE_UNUSED)
1231 rtx dst = operands[0];
1232 rtx src = operands[1];
1234 if (GET_CODE (dst) == REG)
1236 if (GET_CODE (src) == REG)
1238 if (REGNO (src) == CC_REG) /* r-c */
1241 return "mov\t%0,%1"; /* r-r*/
1243 else if (GET_CODE (src) == MEM)
1245 if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1246 return "lrw\t%0,[%1]"; /* a-R */
1248 switch (GET_MODE (src)) /* r-m */
1251 return "ldw\t%0,%1";
1253 return "ld.h\t%0,%1";
1255 return "ld.b\t%0,%1";
1260 else if (GET_CODE (src) == CONST_INT)
1264 if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
1265 return "movi\t%0,%1";
1266 else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
1267 return "bgeni\t%0,%P1\t// %1 %x1";
1268 else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
1269 return "bmaski\t%0,%N1\t// %1 %x1";
1270 else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
1271 return output_inline_const (SImode, operands); /* 1-2 insns */
1273 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1276 return "lrw\t%0, %1"; /* Into the literal pool. */
1278 else if (GET_CODE (dst) == MEM) /* m-r */
1279 switch (GET_MODE (dst))
1282 return "stw\t%1,%0";
1284 return "st.h\t%1,%0";
1286 return "st.b\t%1,%0";
1294 /* Return a sequence of instructions to perform DI or DF move.
1295 Since the MCORE cannot move a DI or DF in one instruction, we have
1296 to take care when we see overlapping source and dest registers. */
1299 mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1301 rtx dst = operands[0];
1302 rtx src = operands[1];
1304 if (GET_CODE (dst) == REG)
1306 if (GET_CODE (src) == REG)
1308 int dstreg = REGNO (dst);
1309 int srcreg = REGNO (src);
1311 /* Ensure the second source not overwritten. */
1312 if (srcreg + 1 == dstreg)
1313 return "mov %R0,%R1\n\tmov %0,%1";
1315 return "mov %0,%1\n\tmov %R0,%R1";
1317 else if (GET_CODE (src) == MEM)
1319 rtx memexp = memexp = XEXP (src, 0);
1320 int dstreg = REGNO (dst);
1323 if (GET_CODE (memexp) == LABEL_REF)
1324 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1325 else if (GET_CODE (memexp) == REG)
1326 basereg = REGNO (memexp);
1327 else if (GET_CODE (memexp) == PLUS)
1329 if (GET_CODE (XEXP (memexp, 0)) == REG)
1330 basereg = REGNO (XEXP (memexp, 0));
1331 else if (GET_CODE (XEXP (memexp, 1)) == REG)
1332 basereg = REGNO (XEXP (memexp, 1));
1339 /* ??? length attribute is wrong here. */
1340 if (dstreg == basereg)
1342 /* Just load them in reverse order. */
1343 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1345 /* XXX: alternative: move basereg to basereg+1
1346 and then fall through. */
1349 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1351 else if (GET_CODE (src) == CONST_INT)
1353 if (TARGET_LITTLE_END)
1355 if (CONST_OK_FOR_I (INTVAL (src)))
1356 output_asm_insn ("movi %0,%1", operands);
1357 else if (CONST_OK_FOR_M (INTVAL (src)))
1358 output_asm_insn ("bgeni %0,%P1", operands);
1359 else if (INTVAL (src) == -1)
1360 output_asm_insn ("bmaski %0,32", operands);
1361 else if (CONST_OK_FOR_N (INTVAL (src)))
1362 output_asm_insn ("bmaski %0,%N1", operands);
1366 if (INTVAL (src) < 0)
1367 return "bmaski %R0,32";
1369 return "movi %R0,0";
1373 if (CONST_OK_FOR_I (INTVAL (src)))
1374 output_asm_insn ("movi %R0,%1", operands);
1375 else if (CONST_OK_FOR_M (INTVAL (src)))
1376 output_asm_insn ("bgeni %R0,%P1", operands);
1377 else if (INTVAL (src) == -1)
1378 output_asm_insn ("bmaski %R0,32", operands);
1379 else if (CONST_OK_FOR_N (INTVAL (src)))
1380 output_asm_insn ("bmaski %R0,%N1", operands);
1384 if (INTVAL (src) < 0)
1385 return "bmaski %0,32";
1393 else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1394 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1399 /* Predicates used by the templates. */
1401 /* Nonzero if OP can be source of a simple move operation. */
1404 mcore_general_movsrc_operand (rtx op, enum machine_mode mode)
1406 /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
1407 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
1410 return general_operand (op, mode);
1413 /* Nonzero if OP can be destination of a simple move operation. */
1416 mcore_general_movdst_operand (rtx op, enum machine_mode mode)
1418 if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
1421 return general_operand (op, mode);
1424 /* Nonzero if OP is a normal arithmetic register. */
1427 mcore_arith_reg_operand (rtx op, enum machine_mode mode)
1429 if (! register_operand (op, mode))
1432 if (GET_CODE (op) == SUBREG)
1433 op = SUBREG_REG (op);
1435 if (GET_CODE (op) == REG)
1436 return REGNO (op) != CC_REG;
1441 /* Nonzero if OP should be recognized during reload for an ixh/ixw
1442 operand. See the ixh/ixw patterns. */
1445 mcore_reload_operand (rtx op, enum machine_mode mode)
1447 if (mcore_arith_reg_operand (op, mode))
1450 if (! reload_in_progress)
1453 return GET_CODE (op) == MEM;
1456 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1459 mcore_arith_J_operand (rtx op, enum machine_mode mode)
1461 if (register_operand (op, mode))
1464 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
1470 /* Nonzero if OP is a valid source operand for an arithmetic insn. */
1473 mcore_arith_K_operand (rtx op, enum machine_mode mode)
1475 if (register_operand (op, mode))
1478 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1484 /* Nonzero if OP is a valid source operand for a shift or rotate insn. */
1487 mcore_arith_K_operand_not_0 (rtx op, enum machine_mode mode)
1489 if (register_operand (op, mode))
1492 if ( GET_CODE (op) == CONST_INT
1493 && CONST_OK_FOR_K (INTVAL (op))
1494 && INTVAL (op) != 0)
1501 mcore_arith_K_S_operand (rtx op, enum machine_mode mode)
1503 if (register_operand (op, mode))
1506 if (GET_CODE (op) == CONST_INT)
1508 if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
1516 mcore_arith_S_operand (rtx op)
1518 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1525 mcore_arith_M_operand (rtx op, enum machine_mode mode)
1527 if (register_operand (op, mode))
1530 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
1536 /* Nonzero if OP is a valid source operand for loading. */
1539 mcore_arith_imm_operand (rtx op, enum machine_mode mode)
1541 if (register_operand (op, mode))
1544 if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
1551 mcore_arith_any_imm_operand (rtx op, enum machine_mode mode)
1553 if (register_operand (op, mode))
1556 if (GET_CODE (op) == CONST_INT)
1562 /* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1. */
1565 mcore_arith_O_operand (rtx op, enum machine_mode mode)
1567 if (register_operand (op, mode))
1570 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
1576 /* Nonzero if OP is a valid source operand for a btsti. */
1579 mcore_literal_K_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1581 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
1587 /* Nonzero if OP is a valid source operand for an add/sub insn. */
1590 mcore_addsub_operand (rtx op, enum machine_mode mode)
1592 if (register_operand (op, mode))
1595 if (GET_CODE (op) == CONST_INT)
1599 /* The following is removed because it precludes large constants from being
1600 returned as valid source operands for and add/sub insn. While large
1601 constants may not directly be used in an add/sub, they may if first loaded
1602 into a register. Thus, this predicate should indicate that they are valid,
1603 and the constraint in mcore.md should control whether an additional load to
1604 register is needed. (see mcore.md, addsi). -- DAC 4/2/1998 */
1606 if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
1614 /* Nonzero if OP is a valid source operand for a compare operation. */
1617 mcore_compare_operand (rtx op, enum machine_mode mode)
1619 if (register_operand (op, mode))
1622 if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
1628 /* Expand insert bit field. BRC */
1631 mcore_expand_insv (rtx operands[])
1633 int width = INTVAL (operands[1]);
1634 int posn = INTVAL (operands[2]);
1636 rtx mreg, sreg, ereg;
1638 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1639 for width==1 must be removed. Look around line 368. This is something
1640 we really want the md part to do. */
1641 if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1643 /* Do directly with bseti or bclri. */
1644 /* RBE: 2/97 consider only low bit of constant. */
1645 if ((INTVAL(operands[3])&1) == 0)
1647 mask = ~(1 << posn);
1648 emit_insn (gen_rtx_SET (SImode, operands[0],
1649 gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1654 emit_insn (gen_rtx_SET (SImode, operands[0],
1655 gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1661 /* Look at some bit-field placements that we aren't interested
1662 in handling ourselves, unless specifically directed to do so. */
1663 if (! TARGET_W_FIELD)
1664 return 0; /* Generally, give up about now. */
1666 if (width == 8 && posn % 8 == 0)
1667 /* Byte sized and aligned; let caller break it up. */
1670 if (width == 16 && posn % 16 == 0)
1671 /* Short sized and aligned; let caller break it up. */
1674 /* The general case - we can do this a little bit better than what the
1675 machine independent part tries. This will get rid of all the subregs
1676 that mess up constant folding in combine when working with relaxed
1679 /* If setting the entire field, do it directly. */
1680 if (GET_CODE (operands[3]) == CONST_INT &&
1681 INTVAL (operands[3]) == ((1 << width) - 1))
1683 mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1684 emit_insn (gen_rtx_SET (SImode, operands[0],
1685 gen_rtx_IOR (SImode, operands[0], mreg)));
1689 /* Generate the clear mask. */
1690 mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1692 /* Clear the field, to overlay it later with the source. */
1693 emit_insn (gen_rtx_SET (SImode, operands[0],
1694 gen_rtx_AND (SImode, operands[0], mreg)));
1696 /* If the source is constant 0, we've nothing to add back. */
1697 if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1700 /* XXX: Should we worry about more games with constant values?
1701 We've covered the high profile: set/clear single-bit and many-bit
1702 fields. How often do we see "arbitrary bit pattern" constants? */
1703 sreg = copy_to_mode_reg (SImode, operands[3]);
1705 /* Extract src as same width as dst (needed for signed values). We
1706 always have to do this since we widen everything to SImode.
1707 We don't have to mask if we're shifting this up against the
1708 MSB of the register (e.g., the shift will push out any hi-order
1710 if (width + posn != (int) GET_MODE_SIZE (SImode))
1712 ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1713 emit_insn (gen_rtx_SET (SImode, sreg,
1714 gen_rtx_AND (SImode, sreg, ereg)));
1717 /* Insert source value in dest. */
1719 emit_insn (gen_rtx_SET (SImode, sreg,
1720 gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1722 emit_insn (gen_rtx_SET (SImode, operands[0],
1723 gen_rtx_IOR (SImode, operands[0], sreg)));
1728 /* Return 1 if OP is a load multiple operation. It is known to be a
1729 PARALLEL and the first section will be tested. */
1732 mcore_load_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1734 int count = XVECLEN (op, 0);
1739 /* Perform a quick check so we don't blow up below. */
1741 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1742 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
1743 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
1746 dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
1747 src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
1749 for (i = 1; i < count; i++)
1751 rtx elt = XVECEXP (op, 0, i);
1753 if (GET_CODE (elt) != SET
1754 || GET_CODE (SET_DEST (elt)) != REG
1755 || GET_MODE (SET_DEST (elt)) != SImode
1756 || REGNO (SET_DEST (elt)) != (unsigned) (dest_regno + i)
1757 || GET_CODE (SET_SRC (elt)) != MEM
1758 || GET_MODE (SET_SRC (elt)) != SImode
1759 || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
1760 || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
1761 || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
1762 || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
1769 /* Similar, but tests for store multiple. */
1772 mcore_store_multiple_operation (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1774 int count = XVECLEN (op, 0);
1779 /* Perform a quick check so we don't blow up below. */
1781 || GET_CODE (XVECEXP (op, 0, 0)) != SET
1782 || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
1783 || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
1786 src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
1787 dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
1789 for (i = 1; i < count; i++)
1791 rtx elt = XVECEXP (op, 0, i);
1793 if (GET_CODE (elt) != SET
1794 || GET_CODE (SET_SRC (elt)) != REG
1795 || GET_MODE (SET_SRC (elt)) != SImode
1796 || REGNO (SET_SRC (elt)) != (unsigned) (src_regno + i)
1797 || GET_CODE (SET_DEST (elt)) != MEM
1798 || GET_MODE (SET_DEST (elt)) != SImode
1799 || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
1800 || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
1801 || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
1802 || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
1809 /* ??? Block move stuff stolen from m88k. This code has not been
1810 verified for correctness. */
1812 /* Emit code to perform a block move. Choose the best method.
1814 OPERANDS[0] is the destination.
1815 OPERANDS[1] is the source.
1816 OPERANDS[2] is the size.
1817 OPERANDS[3] is the alignment safe to use. */
1819 /* Emit code to perform a block move with an offset sequence of ldw/st
1820 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1821 known constants. DEST and SRC are registers. OFFSET is the known
1822 starting point for the output pattern. */
1824 static const enum machine_mode mode_from_align[] =
1826 VOIDmode, QImode, HImode, VOIDmode, SImode,
1827 VOIDmode, VOIDmode, VOIDmode, DImode
1831 block_move_sequence (rtx dest, rtx dst_mem, rtx src, rtx src_mem,
1832 int size, int align, int offset)
1835 enum machine_mode mode[2];
1840 int offset_ld = offset;
1841 int offset_st = offset;
1843 active[0] = active[1] = FALSE;
1845 /* Establish parameters for the first load and for the second load if
1846 it is known to be the same mode as the first. */
1847 amount[0] = amount[1] = align;
1849 mode[0] = mode_from_align[align];
1851 temp[0] = gen_reg_rtx (mode[0]);
1853 if (size >= 2 * align)
1856 temp[1] = gen_reg_rtx (mode[1]);
1868 /* Change modes as the sequence tails off. */
1869 if (size < amount[next])
1871 amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1872 mode[next] = mode_from_align[amount[next]];
1873 temp[next] = gen_reg_rtx (mode[next]);
1876 size -= amount[next];
1877 srcp = gen_rtx_MEM (
1879 MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
1883 gen_rtx_PLUS (Pmode, src, GEN_INT (offset_ld)));
1885 MEM_READONLY_P (srcp) = MEM_READONLY_P (src_mem);
1886 MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
1887 MEM_IN_STRUCT_P (srcp) = 1;
1888 emit_insn (gen_rtx_SET (VOIDmode, temp[next], srcp));
1889 offset_ld += amount[next];
1890 active[next] = TRUE;
1895 active[phase] = FALSE;
1897 dstp = gen_rtx_MEM (
1899 MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
1903 gen_rtx_PLUS (Pmode, dest, GEN_INT (offset_st)));
1905 MEM_READONLY_P (dstp) = MEM_READONLY_P (dst_mem);
1906 MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
1907 MEM_IN_STRUCT_P (dstp) = 1;
1908 emit_insn (gen_rtx_SET (VOIDmode, dstp, temp[phase]));
1909 offset_st += amount[phase];
1912 while (active[next]);
1916 mcore_expand_block_move (rtx dst_mem, rtx src_mem, rtx * operands)
1918 int align = INTVAL (operands[3]);
1921 if (GET_CODE (operands[2]) == CONST_INT)
1923 bytes = INTVAL (operands[2]);
1930 /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
1931 we give up and go to memcpy. */
1932 if ((align == 4 && (bytes <= 4*4
1933 || ((bytes & 01) == 0 && bytes <= 8*4)
1934 || ((bytes & 03) == 0 && bytes <= 16*4)))
1935 || (align == 2 && bytes <= 4*2)
1936 || (align == 1 && bytes <= 4*1))
1938 block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
1944 /* If we get here, just use the library routine. */
1945 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "memcpy"), 0, VOIDmode, 3,
1946 operands[0], Pmode, operands[1], Pmode, operands[2],
1951 /* Code to generate prologue and epilogue sequences. */
1952 static int number_of_regs_before_varargs;
1954 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1955 for a varargs function. */
1956 static int current_function_anonymous_args;
1958 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1959 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1960 #define ADDI_REACH (32) /* Maximum addi operand. */
1963 layout_mcore_frame (struct mcore_frame * infp)
1972 unsigned int growths;
1975 /* Might have to spill bytes to re-assemble a big argument that
1976 was passed partially in registers and partially on the stack. */
1977 nbytes = current_function_pretend_args_size;
1979 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1980 if (current_function_anonymous_args)
1981 nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1983 infp->arg_size = nbytes;
1985 /* How much space to save non-volatile registers we stomp. */
1986 infp->reg_mask = calc_live_regs (& n);
1987 infp->reg_size = n * 4;
1989 /* And the rest of it... locals and space for overflowed outbounds. */
1990 infp->local_size = get_frame_size ();
1991 infp->outbound_size = current_function_outgoing_args_size;
1993 /* Make sure we have a whole number of words for the locals. */
1994 if (infp->local_size % STACK_BYTES)
1995 infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1997 /* Only thing we know we have to pad is the outbound space, since
1998 we've aligned our locals assuming that base of locals is aligned. */
1999 infp->pad_local = 0;
2001 infp->pad_outbound = 0;
2002 if (infp->outbound_size % STACK_BYTES)
2003 infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
2005 /* Now we see how we want to stage the prologue so that it does
2006 the most appropriate stack growth and register saves to either:
2008 (2) reduce instruction space, or
2009 (3) reduce stack space. */
2010 for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
2011 infp->growth[i] = 0;
2013 regarg = infp->reg_size + infp->arg_size;
2014 localregarg = infp->local_size + regarg;
2015 localreg = infp->local_size + infp->reg_size;
2016 outbounds = infp->outbound_size + infp->pad_outbound;
2019 /* XXX: Consider one where we consider localregarg + outbound too! */
2021 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
2022 use stw's with offsets and buy the frame in one shot. */
2023 if (localregarg <= ADDI_REACH
2024 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2026 /* Make sure we'll be aligned. */
2027 if (localregarg % STACK_BYTES)
2028 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2030 step = localregarg + infp->pad_reg;
2031 infp->reg_offset = infp->local_size;
2033 if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
2036 infp->reg_offset += outbounds;
2040 infp->arg_offset = step - 4;
2041 infp->growth[growths++] = step;
2042 infp->reg_growth = growths;
2043 infp->local_growth = growths;
2045 /* If we haven't already folded it in. */
2047 infp->growth[growths++] = outbounds;
2052 /* Frame can't be done with a single subi, but can be done with 2
2053 insns. If the 'stm' is getting <= 2 registers, we use stw's and
2054 shift some of the stack purchase into the first subi, so both are
2055 single instructions. */
2056 if (localregarg <= STORE_REACH
2057 && (infp->local_size > ADDI_REACH)
2058 && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
2062 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
2063 if (localregarg % STACK_BYTES)
2064 infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
2066 all = localregarg + infp->pad_reg + infp->pad_local;
2067 step = ADDI_REACH; /* As much up front as we can. */
2071 /* XXX: Consider whether step will still be aligned; we believe so. */
2072 infp->arg_offset = step - 4;
2073 infp->growth[growths++] = step;
2074 infp->reg_growth = growths;
2075 infp->reg_offset = step - infp->pad_reg - infp->reg_size;
2078 /* Can we fold in any space required for outbounds? */
2079 if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
2085 /* Get the rest of the locals in place. */
2087 infp->growth[growths++] = step;
2088 infp->local_growth = growths;
2093 /* Finish off if we need to do so. */
2095 infp->growth[growths++] = outbounds;
2100 /* Registers + args is nicely aligned, so we'll buy that in one shot.
2101 Then we buy the rest of the frame in 1 or 2 steps depending on
2102 whether we need a frame pointer. */
2103 if ((regarg % STACK_BYTES) == 0)
2105 infp->growth[growths++] = regarg;
2106 infp->reg_growth = growths;
2107 infp->arg_offset = regarg - 4;
2108 infp->reg_offset = 0;
2110 if (infp->local_size % STACK_BYTES)
2111 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2113 step = infp->local_size + infp->pad_local;
2115 if (!frame_pointer_needed)
2121 infp->growth[growths++] = step;
2122 infp->local_growth = growths;
2124 /* If there's any left to be done. */
2126 infp->growth[growths++] = outbounds;
2131 /* XXX: optimizations that we'll want to play with....
2132 -- regarg is not aligned, but it's a small number of registers;
2133 use some of localsize so that regarg is aligned and then
2134 save the registers. */
2136 /* Simple encoding; plods down the stack buying the pieces as it goes.
2137 -- does not optimize space consumption.
2138 -- does not attempt to optimize instruction counts.
2139 -- but it is safe for all alignments. */
2140 if (regarg % STACK_BYTES != 0)
2141 infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
2143 infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
2144 infp->reg_growth = growths;
2145 infp->arg_offset = infp->growth[0] - 4;
2146 infp->reg_offset = 0;
2148 if (frame_pointer_needed)
2150 if (infp->local_size % STACK_BYTES != 0)
2151 infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
2153 infp->growth[growths++] = infp->local_size + infp->pad_local;
2154 infp->local_growth = growths;
2156 infp->growth[growths++] = outbounds;
2160 if ((infp->local_size + outbounds) % STACK_BYTES != 0)
2161 infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
2163 infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
2164 infp->local_growth = growths;
2167 /* Anything else that we've forgotten?, plus a few consistency checks. */
2169 assert (infp->reg_offset >= 0);
2170 assert (growths <= MAX_STACK_GROWS);
2172 for (i = 0; i < growths; i++)
2174 if (infp->growth[i] % STACK_BYTES)
2176 fprintf (stderr,"stack growth of %d is not %d aligned\n",
2177 infp->growth[i], STACK_BYTES);
2183 /* Define the offset between two registers, one to be eliminated, and
2184 the other its replacement, at the start of a routine. */
2187 mcore_initial_elimination_offset (int from, int to)
2191 struct mcore_frame fi;
2193 layout_mcore_frame (& fi);
2196 above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
2198 below_frame = fi.outbound_size + fi.pad_outbound;
2200 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
2203 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2204 return above_frame + below_frame;
2206 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
2214 /* Keep track of some information about varargs for the prolog. */
2217 mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
2218 enum machine_mode mode, tree type,
2219 int * ptr_pretend_size ATTRIBUTE_UNUSED,
2220 int second_time ATTRIBUTE_UNUSED)
2222 current_function_anonymous_args = 1;
2224 /* We need to know how many argument registers are used before
2225 the varargs start, so that we can push the remaining argument
2226 registers during the prologue. */
2227 number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
2229 /* There is a bug somewhere in the arg handling code.
2230 Until I can find it this workaround always pushes the
2231 last named argument onto the stack. */
2232 number_of_regs_before_varargs = *args_so_far;
2234 /* The last named argument may be split between argument registers
2235 and the stack. Allow for this here. */
2236 if (number_of_regs_before_varargs > NPARM_REGS)
2237 number_of_regs_before_varargs = NPARM_REGS;
2241 mcore_expand_prolog (void)
2243 struct mcore_frame fi;
2244 int space_allocated = 0;
2247 /* Find out what we're doing. */
2248 layout_mcore_frame (&fi);
2250 space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
2251 fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
2255 /* Emit a symbol for this routine's frame size. */
2258 x = DECL_RTL (current_function_decl);
2260 if (GET_CODE (x) != MEM)
2265 if (GET_CODE (x) != SYMBOL_REF)
2268 if (mcore_current_function_name)
2269 free (mcore_current_function_name);
2271 mcore_current_function_name = xstrdup (XSTR (x, 0));
2273 ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
2275 if (current_function_calls_alloca)
2276 ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
2279 We're looking at how the 8byte alignment affects stack layout
2280 and where we had to pad things. This emits information we can
2281 extract which tells us about frame sizes and the like. */
2282 fprintf (asm_out_file,
2283 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
2284 mcore_current_function_name,
2285 fi.arg_size, fi.reg_size, fi.reg_mask,
2286 fi.local_size, fi.outbound_size,
2287 frame_pointer_needed);
2290 if (mcore_naked_function_p ())
2293 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
2294 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2296 /* If we have a parameter passed partially in regs and partially in memory,
2297 the registers will have been stored to memory already in function.c. So
2298 we only need to do something here for varargs functions. */
2299 if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
2302 int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2303 int remaining = fi.arg_size;
2305 for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2307 emit_insn (gen_movsi
2308 (gen_rtx_MEM (SImode,
2309 plus_constant (stack_pointer_rtx, offset)),
2310 gen_rtx_REG (SImode, rn)));
2314 /* Do we need another stack adjustment before we do the register saves? */
2315 if (growth < fi.reg_growth)
2316 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2318 if (fi.reg_size != 0)
2321 int offs = fi.reg_offset;
2323 for (i = 15; i >= 0; i--)
2325 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2329 while (fi.reg_mask & (1 << first_reg))
2333 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2334 gen_rtx_REG (SImode, first_reg),
2335 GEN_INT (16 - first_reg)));
2337 i -= (15 - first_reg);
2338 offs += (16 - first_reg) * 4;
2340 else if (fi.reg_mask & (1 << i))
2342 emit_insn (gen_movsi
2343 (gen_rtx_MEM (SImode,
2344 plus_constant (stack_pointer_rtx, offs)),
2345 gen_rtx_REG (SImode, i)));
2351 /* Figure the locals + outbounds. */
2352 if (frame_pointer_needed)
2354 /* If we haven't already purchased to 'fp'. */
2355 if (growth < fi.local_growth)
2356 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2358 emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2360 /* ... and then go any remaining distance for outbounds, etc. */
2361 if (fi.growth[growth])
2362 output_stack_adjust (-1, fi.growth[growth++]);
2366 if (growth < fi.local_growth)
2367 output_stack_adjust (-1, fi.growth[growth++]); /* Grows it. */
2368 if (fi.growth[growth])
2369 output_stack_adjust (-1, fi.growth[growth++]);
2374 mcore_expand_epilog (void)
2376 struct mcore_frame fi;
2379 int growth = MAX_STACK_GROWS - 1 ;
2382 /* Find out what we're doing. */
2383 layout_mcore_frame(&fi);
2385 if (mcore_naked_function_p ())
2388 /* If we had a frame pointer, restore the sp from that. */
2389 if (frame_pointer_needed)
2391 emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2392 growth = fi.local_growth - 1;
2396 /* XXX: while loop should accumulate and do a single sell. */
2397 while (growth >= fi.local_growth)
2399 if (fi.growth[growth] != 0)
2400 output_stack_adjust (1, fi.growth[growth]);
2405 /* Make sure we've shrunk stack back to the point where the registers
2406 were laid down. This is typically 0/1 iterations. Then pull the
2407 register save information back off the stack. */
2408 while (growth >= fi.reg_growth)
2409 output_stack_adjust ( 1, fi.growth[growth--]);
2411 offs = fi.reg_offset;
2413 for (i = 15; i >= 0; i--)
2415 if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2419 /* Find the starting register. */
2422 while (fi.reg_mask & (1 << first_reg))
2427 emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2428 gen_rtx_MEM (SImode, stack_pointer_rtx),
2429 GEN_INT (16 - first_reg)));
2431 i -= (15 - first_reg);
2432 offs += (16 - first_reg) * 4;
2434 else if (fi.reg_mask & (1 << i))
2436 emit_insn (gen_movsi
2437 (gen_rtx_REG (SImode, i),
2438 gen_rtx_MEM (SImode,
2439 plus_constant (stack_pointer_rtx, offs))));
2444 /* Give back anything else. */
2445 /* XXX: Should accumulate total and then give it back. */
2447 output_stack_adjust ( 1, fi.growth[growth--]);
2450 /* This code is borrowed from the SH port. */
2452 /* The MCORE cannot load a large constant into a register, constants have to
2453 come from a pc relative load. The reference of a pc relative load
2454 instruction must be less than 1k infront of the instruction. This
2455 means that we often have to dump a constant inside a function, and
2456 generate code to branch around it.
2458 It is important to minimize this, since the branches will slow things
2459 down and make things bigger.
2461 Worst case code looks like:
2477 We fix this by performing a scan before scheduling, which notices which
2478 instructions need to have their operands fetched from the constant table
2479 and builds the table.
2483 scan, find an instruction which needs a pcrel move. Look forward, find the
2484 last barrier which is within MAX_COUNT bytes of the requirement.
2485 If there isn't one, make one. Process all the instructions between
2486 the find and the barrier.
2488 In the above example, we can tell that L3 is within 1k of L1, so
2489 the first move can be shrunk from the 2 insn+constant sequence into
2490 just 1 insn, and the constant moved to L3 to make:
2500 Then the second move becomes the target for the shortening process. */
2504 rtx value; /* Value in table. */
2505 rtx label; /* Label of value. */
2508 /* The maximum number of constants that can fit into one pool, since
2509 the pc relative range is 0...1020 bytes and constants are at least 4
2510 bytes long. We subtract 4 from the range to allow for the case where
2511 we need to add a branch/align before the constant pool. */
2513 #define MAX_COUNT 1016
2514 #define MAX_POOL_SIZE (MAX_COUNT/4)
2515 static pool_node pool_vector[MAX_POOL_SIZE];
2516 static int pool_size;
2518 /* Dump out any constants accumulated in the final pass. These
2519 will only be labels. */
2522 mcore_output_jump_label_table (void)
2528 fprintf (asm_out_file, "\t.align 2\n");
2530 for (i = 0; i < pool_size; i++)
2532 pool_node * p = pool_vector + i;
2534 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2536 output_asm_insn (".long %0", &p->value);
2545 /* Check whether insn is a candidate for a conditional. */
2548 is_cond_candidate (rtx insn)
2550 /* The only things we conditionalize are those that can be directly
2551 changed into a conditional. Only bother with SImode items. If
2552 we wanted to be a little more aggressive, we could also do other
2553 modes such as DImode with reg-reg move or load 0. */
2554 if (GET_CODE (insn) == INSN)
2556 rtx pat = PATTERN (insn);
2559 if (GET_CODE (pat) != SET)
2562 dst = XEXP (pat, 0);
2564 if ((GET_CODE (dst) != REG &&
2565 GET_CODE (dst) != SUBREG) ||
2566 GET_MODE (dst) != SImode)
2569 src = XEXP (pat, 1);
2571 if ((GET_CODE (src) == REG ||
2572 (GET_CODE (src) == SUBREG &&
2573 GET_CODE (SUBREG_REG (src)) == REG)) &&
2574 GET_MODE (src) == SImode)
2575 return COND_MOV_INSN;
2576 else if (GET_CODE (src) == CONST_INT &&
2578 return COND_CLR_INSN;
2579 else if (GET_CODE (src) == PLUS &&
2580 (GET_CODE (XEXP (src, 0)) == REG ||
2581 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2582 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2583 GET_MODE (XEXP (src, 0)) == SImode &&
2584 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2585 INTVAL (XEXP (src, 1)) == 1)
2586 return COND_INC_INSN;
2587 else if (((GET_CODE (src) == MINUS &&
2588 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2589 INTVAL( XEXP (src, 1)) == 1) ||
2590 (GET_CODE (src) == PLUS &&
2591 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2592 INTVAL (XEXP (src, 1)) == -1)) &&
2593 (GET_CODE (XEXP (src, 0)) == REG ||
2594 (GET_CODE (XEXP (src, 0)) == SUBREG &&
2595 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2596 GET_MODE (XEXP (src, 0)) == SImode)
2597 return COND_DEC_INSN;
2599 /* Some insns that we don't bother with:
2600 (set (rx:DI) (ry:DI))
2601 (set (rx:DI) (const_int 0))
2605 else if (GET_CODE (insn) == JUMP_INSN &&
2606 GET_CODE (PATTERN (insn)) == SET &&
2607 GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2608 return COND_BRANCH_INSN;
2613 /* Emit a conditional version of insn and replace the old insn with the
2614 new one. Return the new insn if emitted. */
2617 emit_new_cond_insn (rtx insn, int cond)
2623 if ((num = is_cond_candidate (insn)) == COND_NO)
2626 pat = PATTERN (insn);
2628 if (GET_CODE (insn) == INSN)
2630 dst = SET_DEST (pat);
2631 src = SET_SRC (pat);
2635 dst = JUMP_LABEL (insn);
2644 c_insn = gen_movt0 (dst, src, dst);
2646 c_insn = gen_movt0 (dst, dst, src);
2651 c_insn = gen_incscc (dst, dst);
2653 c_insn = gen_incscc_false (dst, dst);
2658 c_insn = gen_decscc (dst, dst);
2660 c_insn = gen_decscc_false (dst, dst);
2663 case COND_BRANCH_INSN:
2665 c_insn = gen_branch_true (dst);
2667 c_insn = gen_branch_false (dst);
2674 /* Only copy the notes if they exist. */
2675 if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2677 /* We really don't need to bother with the notes and links at this
2678 point, but go ahead and save the notes. This will help is_dead()
2679 when applying peepholes (links don't matter since they are not
2680 used any more beyond this point for the mcore). */
2681 REG_NOTES (c_insn) = REG_NOTES (insn);
2684 if (num == COND_BRANCH_INSN)
2686 /* For jumps, we need to be a little bit careful and emit the new jump
2687 before the old one and to update the use count for the target label.
2688 This way, the barrier following the old (uncond) jump will get
2689 deleted, but the label won't. */
2690 c_insn = emit_jump_insn_before (c_insn, insn);
2692 ++ LABEL_NUSES (dst);
2694 JUMP_LABEL (c_insn) = dst;
2697 c_insn = emit_insn_after (c_insn, insn);
2704 /* Attempt to change a basic block into a series of conditional insns. This
2705 works by taking the branch at the end of the 1st block and scanning for the
2706 end of the 2nd block. If all instructions in the 2nd block have cond.
2707 versions and the label at the start of block 3 is the same as the target
2708 from the branch at block 1, then conditionalize all insn in block 2 using
2709 the inverse condition of the branch at block 1. (Note I'm bending the
2710 definition of basic block here.)
2714 bt L2 <-- end of block 1 (delete)
2717 br L3 <-- end of block 2
2719 L2: ... <-- start of block 3 (NUSES==1)
2730 we can delete the L2 label if NUSES==1 and re-apply the optimization
2731 starting at the last instruction of block 2. This may allow an entire
2732 if-then-else statement to be conditionalized. BRC */
2734 conditionalize_block (rtx first)
2738 rtx end_blk_1_br = 0;
2739 rtx end_blk_2_insn = 0;
2740 rtx start_blk_3_lab = 0;
2746 /* Check that the first insn is a candidate conditional jump. This is
2747 the one that we'll eliminate. If not, advance to the next insn to
2749 if (GET_CODE (first) != JUMP_INSN ||
2750 GET_CODE (PATTERN (first)) != SET ||
2751 GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2752 return NEXT_INSN (first);
2754 /* Extract some information we need. */
2755 end_blk_1_br = first;
2756 br_pat = PATTERN (end_blk_1_br);
2758 /* Complement the condition since we use the reverse cond. for the insns. */
2759 cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2761 /* Determine what kind of branch we have. */
2762 if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2764 /* A normal branch, so extract label out of first arm. */
2765 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2769 /* An inverse branch, so extract the label out of the 2nd arm
2770 and complement the condition. */
2772 br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2775 /* Scan forward for the start of block 2: it must start with a
2776 label and that label must be the same as the branch target
2777 label from block 1. We don't care about whether block 2 actually
2778 ends with a branch or a label (an uncond. branch is
2779 conditionalizable). */
2780 for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2784 code = GET_CODE (insn);
2786 /* Look for the label at the start of block 3. */
2787 if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2790 /* Skip barriers, notes, and conditionalizable insns. If the
2791 insn is not conditionalizable or makes this optimization fail,
2792 just return the next insn so we can start over from that point. */
2793 if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2794 return NEXT_INSN (insn);
2796 /* Remember the last real insn before the label (ie end of block 2). */
2797 if (code == JUMP_INSN || code == INSN)
2800 end_blk_2_insn = insn;
2807 /* It is possible for this optimization to slow performance if the blocks
2808 are long. This really depends upon whether the branch is likely taken
2809 or not. If the branch is taken, we slow performance in many cases. But,
2810 if the branch is not taken, we always help performance (for a single
2811 block, but for a double block (i.e. when the optimization is re-applied)
2812 this is not true since the 'right thing' depends on the overall length of
2813 the collapsed block). As a compromise, don't apply this optimization on
2814 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2815 the best threshold depends on the latencies of the instructions (i.e.,
2816 the branch penalty). */
2817 if (optimize > 1 && blk_size > 2)
2820 /* At this point, we've found the start of block 3 and we know that
2821 it is the destination of the branch from block 1. Also, all
2822 instructions in the block 2 are conditionalizable. So, apply the
2823 conditionalization and delete the branch. */
2824 start_blk_3_lab = insn;
2826 for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2827 insn = NEXT_INSN (insn))
2831 if (INSN_DELETED_P (insn))
2834 /* Try to form a conditional variant of the instruction and emit it. */
2835 if ((newinsn = emit_new_cond_insn (insn, cond)))
2837 if (end_blk_2_insn == insn)
2838 end_blk_2_insn = newinsn;
2844 /* Note whether we will delete the label starting blk 3 when the jump
2845 gets deleted. If so, we want to re-apply this optimization at the
2846 last real instruction right before the label. */
2847 if (LABEL_NUSES (start_blk_3_lab) == 1)
2849 start_blk_3_lab = 0;
2852 /* ??? we probably should redistribute the death notes for this insn, esp.
2853 the death of cc, but it doesn't really matter this late in the game.
2854 The peepholes all use is_dead() which will find the correct death
2855 regardless of whether there is a note. */
2856 delete_insn (end_blk_1_br);
2858 if (! start_blk_3_lab)
2859 return end_blk_2_insn;
2861 /* Return the insn right after the label at the start of block 3. */
2862 return NEXT_INSN (start_blk_3_lab);
2865 /* Apply the conditionalization of blocks optimization. This is the
2866 outer loop that traverses through the insns scanning for a branch
2867 that signifies an opportunity to apply the optimization. Note that
2868 this optimization is applied late. If we could apply it earlier,
2869 say before cse 2, it may expose more optimization opportunities.
2870 but, the pay back probably isn't really worth the effort (we'd have
2871 to update all reg/flow/notes/links/etc to make it work - and stick it
2872 in before cse 2). */
2875 conditionalize_optimization (void)
2879 for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2883 static int saved_warn_return_type = -1;
2884 static int saved_warn_return_type_count = 0;
2886 /* This is to handle loads from the constant pool. */
2891 /* Reset this variable. */
2892 current_function_anonymous_args = 0;
2894 /* Restore the warn_return_type if it has been altered. */
2895 if (saved_warn_return_type != -1)
2897 /* Only restore the value if we have reached another function.
2898 The test of warn_return_type occurs in final_function () in
2899 c-decl.c a long time after the code for the function is generated,
2900 so we need a counter to tell us when we have finished parsing that
2901 function and can restore the flag. */
2902 if (--saved_warn_return_type_count == 0)
2904 warn_return_type = saved_warn_return_type;
2905 saved_warn_return_type = -1;
2912 /* Conditionalize blocks where we can. */
2913 conditionalize_optimization ();
2915 /* Literal pool generation is now pushed off until the assembler. */
2919 /* Return true if X is something that can be moved directly into r15. */
2922 mcore_r15_operand_p (rtx x)
2924 switch (GET_CODE (x))
2927 return mcore_const_ok_for_inline (INTVAL (x));
2939 /* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't
2940 directly move X into it, use r1-r14 as a temporary. */
2943 mcore_secondary_reload_class (enum reg_class class,
2944 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2946 if (TEST_HARD_REG_BIT (reg_class_contents[class], 15)
2947 && !mcore_r15_operand_p (x))
2952 /* Return the reg_class to use when reloading the rtx X into the class
2953 CLASS. If X is too complex to move directly into r15, prefer to
2954 use LRW_REGS instead. */
2957 mcore_reload_class (rtx x, enum reg_class class)
2959 if (reg_class_subset_p (LRW_REGS, class) && !mcore_r15_operand_p (x))
2965 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2966 register. Note that the current version doesn't worry about whether
2967 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2968 in r2 matches an SImode in r2. Might think in the future about whether
2969 we want to be able to say something about modes. */
2972 mcore_is_same_reg (rtx x, rtx y)
2974 /* Strip any and all of the subreg wrappers. */
2975 while (GET_CODE (x) == SUBREG)
2978 while (GET_CODE (y) == SUBREG)
2981 if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2988 mcore_override_options (void)
2990 if (mcore_stack_increment_string)
2992 mcore_stack_increment = atoi (mcore_stack_increment_string);
2994 if (mcore_stack_increment < 0
2995 || (mcore_stack_increment == 0
2996 && (mcore_stack_increment_string[0] != '0'
2997 || mcore_stack_increment_string[1] != 0)))
2998 error ("invalid option `-mstack-increment=%s'",
2999 mcore_stack_increment_string);
3002 /* Only the m340 supports little endian code. */
3003 if (TARGET_LITTLE_END && ! TARGET_M340)
3004 target_flags |= M340_BIT;
3007 /* Compute the number of word sized registers needed to
3008 hold a function argument of mode MODE and type TYPE. */
3011 mcore_num_arg_regs (enum machine_mode mode, tree type)
3015 if (targetm.calls.must_pass_in_stack (mode, type))
3018 if (type && mode == BLKmode)
3019 size = int_size_in_bytes (type);
3021 size = GET_MODE_SIZE (mode);
3023 return ROUND_ADVANCE (size);
3027 handle_structs_in_regs (enum machine_mode mode, tree type, int reg)
3031 /* The MCore ABI defines that a structure whoes size is not a whole multiple
3032 of bytes is passed packed into registers (or spilled onto the stack if
3033 not enough registers are available) with the last few bytes of the
3034 structure being packed, left-justified, into the last register/stack slot.
3035 GCC handles this correctly if the last word is in a stack slot, but we
3036 have to generate a special, PARALLEL RTX if the last word is in an
3037 argument register. */
3039 && TYPE_MODE (type) == BLKmode
3040 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
3041 && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
3042 && (size % UNITS_PER_WORD != 0)
3043 && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
3045 rtx arg_regs [NPARM_REGS];
3050 for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
3053 gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
3054 GEN_INT (nregs * UNITS_PER_WORD));
3058 /* We assume here that NPARM_REGS == 6. The assert checks this. */
3059 assert (ARRAY_SIZE (arg_regs) == 6);
3060 rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
3061 arg_regs[3], arg_regs[4], arg_regs[5]);
3063 result = gen_rtx_PARALLEL (mode, rtvec);
3067 return gen_rtx_REG (mode, reg);
3071 mcore_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3073 enum machine_mode mode;
3076 mode = TYPE_MODE (valtype);
3078 PROMOTE_MODE (mode, unsigned_p, NULL);
3080 return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
3083 /* Define where to put the arguments to a function.
3084 Value is zero to push the argument on the stack,
3085 or a hard register in which to store the argument.
3087 MODE is the argument's machine mode.
3088 TYPE is the data type of the argument (as a tree).
3089 This is null for libcalls where that information may
3091 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3092 the preceding args and about the function being called.
3093 NAMED is nonzero if this argument is a named parameter
3094 (otherwise it is an extra parameter matching an ellipsis).
3096 On MCore the first args are normally in registers
3097 and the rest are pushed. Any arg that starts within the first
3098 NPARM_REGS words is at least partially passed in a register unless
3099 its data type forbids. */
3102 mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
3103 tree type, int named)
3110 if (targetm.calls.must_pass_in_stack (mode, type))
3113 arg_reg = ROUND_REG (cum, mode);
3115 if (arg_reg < NPARM_REGS)
3116 return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
3121 /* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
3122 Returns the number of argument registers required to hold *part* of
3123 a parameter of machine mode MODE and type TYPE (which may be NULL if
3124 the type is not known). If the argument fits entirely in the argument
3125 registers, or entirely on the stack, then 0 is returned. CUM is the
3126 number of argument registers already used by earlier parameters to
3130 mcore_function_arg_partial_nregs (CUMULATIVE_ARGS cum, enum machine_mode mode,
3131 tree type, int named)
3133 int reg = ROUND_REG (cum, mode);
3138 if (targetm.calls.must_pass_in_stack (mode, type))
3141 /* REG is not the *hardware* register number of the register that holds
3142 the argument, it is the *argument* register number. So for example,
3143 the first argument to a function goes in argument register 0, which
3144 translates (for the MCore) into hardware register 2. The second
3145 argument goes into argument register 1, which translates into hardware
3146 register 3, and so on. NPARM_REGS is the number of argument registers
3147 supported by the target, not the maximum hardware register number of
3149 if (reg >= NPARM_REGS)
3152 /* If the argument fits entirely in registers, return 0. */
3153 if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
3156 /* The argument overflows the number of available argument registers.
3157 Compute how many argument registers have not yet been assigned to
3158 hold an argument. */
3159 reg = NPARM_REGS - reg;
3161 /* Return partially in registers and partially on the stack. */
3165 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
3168 mcore_dllexport_name_p (const char * symbol)
3170 return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
3173 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
3176 mcore_dllimport_name_p (const char * symbol)
3178 return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
3181 /* Mark a DECL as being dllexport'd. */
3184 mcore_mark_dllexport (tree decl)
3186 const char * oldname;
3191 rtlname = XEXP (DECL_RTL (decl), 0);
3193 if (GET_CODE (rtlname) == SYMBOL_REF)
3194 oldname = XSTR (rtlname, 0);
3195 else if ( GET_CODE (rtlname) == MEM
3196 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3197 oldname = XSTR (XEXP (rtlname, 0), 0);
3201 if (mcore_dllexport_name_p (oldname))
3202 return; /* Already done. */
3204 newname = alloca (strlen (oldname) + 4);
3205 sprintf (newname, "@e.%s", oldname);
3207 /* We pass newname through get_identifier to ensure it has a unique
3208 address. RTL processing can sometimes peek inside the symbol ref
3209 and compare the string's addresses to see if two symbols are
3211 /* ??? At least I think that's why we do this. */
3212 idp = get_identifier (newname);
3214 XEXP (DECL_RTL (decl), 0) =
3215 gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3218 /* Mark a DECL as being dllimport'd. */
3221 mcore_mark_dllimport (tree decl)
3223 const char * oldname;
3229 rtlname = XEXP (DECL_RTL (decl), 0);
3231 if (GET_CODE (rtlname) == SYMBOL_REF)
3232 oldname = XSTR (rtlname, 0);
3233 else if ( GET_CODE (rtlname) == MEM
3234 && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
3235 oldname = XSTR (XEXP (rtlname, 0), 0);
3239 if (mcore_dllexport_name_p (oldname))
3240 abort (); /* This shouldn't happen. */
3241 else if (mcore_dllimport_name_p (oldname))
3242 return; /* Already done. */
3244 /* ??? One can well ask why we're making these checks here,
3245 and that would be a good question. */
3247 /* Imported variables can't be initialized. */
3248 if (TREE_CODE (decl) == VAR_DECL
3249 && !DECL_VIRTUAL_P (decl)
3250 && DECL_INITIAL (decl))
3252 error ("%Jinitialized variable '%D' is marked dllimport", decl, decl);
3256 /* `extern' needn't be specified with dllimport.
3257 Specify `extern' now and hope for the best. Sigh. */
3258 if (TREE_CODE (decl) == VAR_DECL
3259 /* ??? Is this test for vtables needed? */
3260 && !DECL_VIRTUAL_P (decl))
3262 DECL_EXTERNAL (decl) = 1;
3263 TREE_PUBLIC (decl) = 1;
3266 newname = alloca (strlen (oldname) + 11);
3267 sprintf (newname, "@i.__imp_%s", oldname);
3269 /* We pass newname through get_identifier to ensure it has a unique
3270 address. RTL processing can sometimes peek inside the symbol ref
3271 and compare the string's addresses to see if two symbols are
3273 /* ??? At least I think that's why we do this. */
3274 idp = get_identifier (newname);
3276 newrtl = gen_rtx_MEM (Pmode,
3277 gen_rtx_SYMBOL_REF (Pmode,
3278 IDENTIFIER_POINTER (idp)));
3279 XEXP (DECL_RTL (decl), 0) = newrtl;
3283 mcore_dllexport_p (tree decl)
3285 if ( TREE_CODE (decl) != VAR_DECL
3286 && TREE_CODE (decl) != FUNCTION_DECL)
3289 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
3293 mcore_dllimport_p (tree decl)
3295 if ( TREE_CODE (decl) != VAR_DECL
3296 && TREE_CODE (decl) != FUNCTION_DECL)
3299 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
3302 /* We must mark dll symbols specially. Definitions of dllexport'd objects
3303 install some info in the .drective (PE) or .exports (ELF) sections. */
3306 mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3308 /* Mark the decl so we can tell from the rtl whether the object is
3309 dllexport'd or dllimport'd. */
3310 if (mcore_dllexport_p (decl))
3311 mcore_mark_dllexport (decl);
3312 else if (mcore_dllimport_p (decl))
3313 mcore_mark_dllimport (decl);
3315 /* It might be that DECL has already been marked as dllimport, but
3316 a subsequent definition nullified that. The attribute is gone
3317 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
3318 else if ((TREE_CODE (decl) == FUNCTION_DECL
3319 || TREE_CODE (decl) == VAR_DECL)
3320 && DECL_RTL (decl) != NULL_RTX
3321 && GET_CODE (DECL_RTL (decl)) == MEM
3322 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3323 && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3324 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3326 const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3327 tree idp = get_identifier (oldname + 9);
3328 rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3330 XEXP (DECL_RTL (decl), 0) = newrtl;
3332 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3333 ??? We leave these alone for now. */
3337 /* Undo the effects of the above. */
3340 mcore_strip_name_encoding (const char * str)
3342 return str + (str[0] == '@' ? 3 : 0);
3345 /* MCore specific attribute support.
3346 dllexport - for exporting a function/variable that will live in a dll
3347 dllimport - for importing a function/variable from a dll
3348 naked - do not create a function prologue/epilogue. */
3350 const struct attribute_spec mcore_attribute_table[] =
3352 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3353 { "dllexport", 0, 0, true, false, false, NULL },
3354 { "dllimport", 0, 0, true, false, false, NULL },
3355 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute },
3356 { NULL, 0, 0, false, false, false, NULL }
3359 /* Handle a "naked" attribute; arguments as in
3360 struct attribute_spec.handler. */
3363 mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3364 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3366 if (TREE_CODE (*node) == FUNCTION_DECL)
3368 /* PR14310 - don't complain about lack of return statement
3369 in naked functions. The solution here is a gross hack
3370 but this is the only way to solve the problem without
3371 adding a new feature to GCC. I did try submitting a patch
3372 that would add such a new feature, but it was (rightfully)
3373 rejected on the grounds that it was creeping featurism,
3374 so hence this code. */
3375 if (warn_return_type)
3377 saved_warn_return_type = warn_return_type;
3378 warn_return_type = 0;
3379 saved_warn_return_type_count = 2;
3381 else if (saved_warn_return_type_count)
3382 saved_warn_return_type_count = 2;
3386 warning ("`%s' attribute only applies to functions",
3387 IDENTIFIER_POINTER (name));
3388 *no_add_attrs = true;
3394 /* ??? It looks like this is PE specific? Oh well, this is what the
3395 old code did as well. */
3398 mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3403 const char * prefix;
3405 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3407 /* Strip off any encoding in name. */
3408 name = (* targetm.strip_name_encoding) (name);
3410 /* The object is put in, for example, section .text$foo.
3411 The linker will then ultimately place them in .text
3412 (everything from the $ on is stripped). */
3413 if (TREE_CODE (decl) == FUNCTION_DECL)
3415 /* For compatibility with EPOC, we ignore the fact that the
3416 section might have relocs against it. */
3417 else if (decl_readonly_section (decl, 0))
3422 len = strlen (name) + strlen (prefix);
3423 string = alloca (len + 1);
3425 sprintf (string, "%s%s", prefix, name);
3427 DECL_SECTION_NAME (decl) = build_string (len, string);
3431 mcore_naked_function_p (void)
3433 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3436 #ifdef OBJECT_FORMAT_ELF
3438 mcore_asm_named_section (const char *name, unsigned int flags ATTRIBUTE_UNUSED)
3440 fprintf (asm_out_file, "\t.section %s\n", name);
3442 #endif /* OBJECT_FORMAT_ELF */
3444 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3447 mcore_external_libcall (rtx fun)
3449 fprintf (asm_out_file, "\t.import\t");
3450 assemble_name (asm_out_file, XSTR (fun, 0));
3451 fprintf (asm_out_file, "\n");
3454 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3457 mcore_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
3459 HOST_WIDE_INT size = int_size_in_bytes (type);
3460 return (size == -1 || size > 2 * UNITS_PER_WORD);