1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
124 A signed 16-bit constant address.
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
135 /* A function to save or store a register. The first argument is the
136 register and the second is the stack slot. */
137 typedef void (*mips_save_restore_fn) (rtx, rtx);
139 struct mips16_constant;
140 struct mips_arg_info;
141 struct mips_address_info;
142 struct mips_integer_op;
145 static enum mips_symbol_type mips_classify_symbol (rtx);
146 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
147 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
148 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
149 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
150 static bool mips_classify_address (struct mips_address_info *, rtx,
151 enum machine_mode, int);
152 static int mips_symbol_insns (enum mips_symbol_type);
153 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
154 static rtx mips_force_temporary (rtx, rtx);
155 static rtx mips_split_symbol (rtx, rtx);
156 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
157 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
158 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
159 static unsigned int mips_build_lower (struct mips_integer_op *,
160 unsigned HOST_WIDE_INT);
161 static unsigned int mips_build_integer (struct mips_integer_op *,
162 unsigned HOST_WIDE_INT);
163 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
164 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
165 static int m16_check_op (rtx, int, int, int);
166 static bool mips_rtx_costs (rtx, int, int, int *);
167 static int mips_address_cost (rtx);
168 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
169 static void mips_load_call_address (rtx, rtx, int);
170 static bool mips_function_ok_for_sibcall (tree, tree);
171 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
172 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
173 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
174 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
175 tree, int, struct mips_arg_info *);
176 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
177 static void mips_set_architecture (const struct mips_cpu_info *);
178 static void mips_set_tune (const struct mips_cpu_info *);
179 static struct machine_function *mips_init_machine_status (void);
180 static void print_operand_reloc (FILE *, rtx, const char **);
182 static void irix_output_external_libcall (rtx);
184 static void mips_file_start (void);
185 static void mips_file_end (void);
186 static bool mips_rewrite_small_data_p (rtx);
187 static int mips_small_data_pattern_1 (rtx *, void *);
188 static int mips_rewrite_small_data_1 (rtx *, void *);
189 static bool mips_function_has_gp_insn (void);
190 static unsigned int mips_global_pointer (void);
191 static bool mips_save_reg_p (unsigned int);
192 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
193 mips_save_restore_fn);
194 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
195 static void mips_output_cplocal (void);
196 static void mips_emit_loadgp (void);
197 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
198 static void mips_set_frame_expr (rtx);
199 static rtx mips_frame_set (rtx, rtx);
200 static void mips_save_reg (rtx, rtx);
201 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
202 static void mips_restore_reg (rtx, rtx);
203 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
204 HOST_WIDE_INT, tree);
205 static int symbolic_expression_p (rtx);
206 static void mips_select_rtx_section (enum machine_mode, rtx,
207 unsigned HOST_WIDE_INT);
208 static bool mips_in_small_data_p (tree);
209 static int mips_fpr_return_fields (tree, tree *);
210 static bool mips_return_in_msb (tree);
211 static rtx mips_return_fpr_pair (enum machine_mode mode,
212 enum machine_mode mode1, HOST_WIDE_INT,
213 enum machine_mode mode2, HOST_WIDE_INT);
214 static rtx mips16_gp_pseudo_reg (void);
215 static void mips16_fp_args (FILE *, int, int);
216 static void build_mips16_function_stub (FILE *);
217 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
218 static void dump_constants (struct mips16_constant *, rtx);
219 static int mips16_insn_length (rtx);
220 static int mips16_rewrite_pool_refs (rtx *, void *);
221 static void mips16_lay_out_constants (void);
222 static void mips_sim_reset (struct mips_sim *);
223 static void mips_sim_init (struct mips_sim *, state_t);
224 static void mips_sim_next_cycle (struct mips_sim *);
225 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
226 static int mips_sim_wait_regs_2 (rtx *, void *);
227 static void mips_sim_wait_regs_1 (rtx *, void *);
228 static void mips_sim_wait_regs (struct mips_sim *, rtx);
229 static void mips_sim_wait_units (struct mips_sim *, rtx);
230 static void mips_sim_wait_insn (struct mips_sim *, rtx);
231 static void mips_sim_record_set (rtx, rtx, void *);
232 static void mips_sim_issue_insn (struct mips_sim *, rtx);
233 static void mips_sim_issue_nop (struct mips_sim *);
234 static void mips_sim_finish_insn (struct mips_sim *, rtx);
235 static void vr4130_avoid_branch_rt_conflict (rtx);
236 static void vr4130_align_insns (void);
237 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
238 static void mips_avoid_hazards (void);
239 static void mips_reorg (void);
240 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
241 static bool mips_matching_cpu_name_p (const char *, const char *);
242 static const struct mips_cpu_info *mips_parse_cpu (const char *, const char *);
243 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
244 static bool mips_return_in_memory (tree, tree);
245 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
246 static void mips_macc_chains_record (rtx);
247 static void mips_macc_chains_reorder (rtx *, int);
248 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
249 static bool vr4130_true_reg_dependence_p (rtx);
250 static bool vr4130_swap_insns_p (rtx, rtx);
251 static void vr4130_reorder (rtx *, int);
252 static void mips_promote_ready (rtx *, int, int);
253 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
254 static int mips_variable_issue (FILE *, int, rtx, int);
255 static int mips_adjust_cost (rtx, rtx, rtx, int);
256 static int mips_issue_rate (void);
257 static int mips_multipass_dfa_lookahead (void);
258 static void mips_init_libfuncs (void);
259 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
261 static tree mips_build_builtin_va_list (void);
262 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
263 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
266 /* Structure to be filled in by compute_frame_size with register
267 save masks, and offsets for the current function. */
269 struct mips_frame_info GTY(())
271 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
272 HOST_WIDE_INT var_size; /* # bytes that variables take up */
273 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
274 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
275 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
276 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
277 unsigned int mask; /* mask of saved gp registers */
278 unsigned int fmask; /* mask of saved fp registers */
279 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
280 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
281 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
282 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
283 bool initialized; /* true if frame size already calculated */
284 int num_gp; /* number of gp registers saved */
285 int num_fp; /* number of fp registers saved */
288 struct machine_function GTY(()) {
289 /* Pseudo-reg holding the value of $28 in a mips16 function which
290 refers to GP relative global variables. */
291 rtx mips16_gp_pseudo_rtx;
293 /* Current frame information, calculated by compute_frame_size. */
294 struct mips_frame_info frame;
296 /* The register to use as the global pointer within this function. */
297 unsigned int global_pointer;
299 /* True if mips_adjust_insn_length should ignore an instruction's
301 bool ignore_hazard_length_p;
303 /* True if the whole function is suitable for .set noreorder and
305 bool all_noreorder_p;
307 /* True if the function is known to have an instruction that needs $gp. */
311 /* Information about a single argument. */
314 /* True if the argument is passed in a floating-point register, or
315 would have been if we hadn't run out of registers. */
318 /* The number of words passed in registers, rounded up. */
319 unsigned int reg_words;
321 /* The offset of the first register from GP_ARG_FIRST or FP_ARG_FIRST,
322 or MAX_ARGS_IN_REGISTERS if the argument is passed entirely
324 unsigned int reg_offset;
326 /* The number of words that must be passed on the stack, rounded up. */
327 unsigned int stack_words;
329 /* The offset from the start of the stack overflow area of the argument's
330 first stack word. Only meaningful when STACK_WORDS is nonzero. */
331 unsigned int stack_offset;
335 /* Information about an address described by mips_address_type.
341 REG is the base register and OFFSET is the constant offset.
344 REG is the register that contains the high part of the address,
345 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
346 is the type of OFFSET's symbol.
349 SYMBOL_TYPE is the type of symbol being referenced. */
351 struct mips_address_info
353 enum mips_address_type type;
356 enum mips_symbol_type symbol_type;
360 /* One stage in a constant building sequence. These sequences have
364 A = A CODE[1] VALUE[1]
365 A = A CODE[2] VALUE[2]
368 where A is an accumulator, each CODE[i] is a binary rtl operation
369 and each VALUE[i] is a constant integer. */
370 struct mips_integer_op {
372 unsigned HOST_WIDE_INT value;
376 /* The largest number of operations needed to load an integer constant.
377 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
378 When the lowest bit is clear, we can try, but reject a sequence with
379 an extra SLL at the end. */
380 #define MIPS_MAX_INTEGER_OPS 7
383 /* Global variables for machine-dependent things. */
385 /* Threshold for data being put into the small data/bss area, instead
386 of the normal data area. */
387 int mips_section_threshold = -1;
389 /* Count the number of .file directives, so that .loc is up to date. */
390 int num_source_filenames = 0;
392 /* Count the number of sdb related labels are generated (to find block
393 start and end boundaries). */
394 int sdb_label_count = 0;
396 /* Next label # for each statement for Silicon Graphics IRIS systems. */
399 /* Linked list of all externals that are to be emitted when optimizing
400 for the global pointer if they haven't been declared by the end of
401 the program with an appropriate .comm or initialization. */
403 struct extern_list GTY (())
405 struct extern_list *next; /* next external */
406 const char *name; /* name of the external */
407 int size; /* size in bytes */
410 static GTY (()) struct extern_list *extern_head = 0;
412 /* Name of the file containing the current function. */
413 const char *current_function_file = "";
415 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
421 /* The next branch instruction is a branch likely, not branch normal. */
422 int mips_branch_likely;
424 /* The operands passed to the last cmpMM expander. */
427 /* The target cpu for code generation. */
428 enum processor_type mips_arch;
429 const struct mips_cpu_info *mips_arch_info;
431 /* The target cpu for optimization and scheduling. */
432 enum processor_type mips_tune;
433 const struct mips_cpu_info *mips_tune_info;
435 /* Which instruction set architecture to use. */
438 /* Which ABI to use. */
441 /* Strings to hold which cpu and instruction set architecture to use. */
442 const char *mips_arch_string; /* for -march=<xxx> */
443 const char *mips_tune_string; /* for -mtune=<xxx> */
444 const char *mips_isa_string; /* for -mips{1,2,3,4} */
445 const char *mips_abi_string; /* for -mabi={32,n32,64,eabi} */
447 /* Whether we are generating mips16 hard float code. In mips16 mode
448 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
449 -msoft-float was not specified by the user, which means that we
450 should arrange to call mips32 hard floating point code. */
451 int mips16_hard_float;
453 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
455 /* If TRUE, we split addresses into their high and low parts in the RTL. */
456 int mips_split_addresses;
458 /* Mode used for saving/restoring general purpose registers. */
459 static enum machine_mode gpr_mode;
461 /* Array giving truth value on whether or not a given hard register
462 can support a given mode. */
463 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
465 /* List of all MIPS punctuation characters used by print_operand. */
466 char mips_print_operand_punct[256];
468 /* Map GCC register number to debugger register number. */
469 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
471 /* A copy of the original flag_delayed_branch: see override_options. */
472 static int mips_flag_delayed_branch;
474 static GTY (()) int mips_output_filename_first_time = 1;
476 /* mips_split_p[X] is true if symbols of type X can be split by
477 mips_split_symbol(). */
478 static bool mips_split_p[NUM_SYMBOL_TYPES];
480 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
481 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
482 if they are matched by a special .md file pattern. */
483 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
485 /* Likewise for HIGHs. */
486 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
488 /* Map hard register number to register class */
489 const enum reg_class mips_regno_to_class[] =
491 LEA_REGS, LEA_REGS, M16_NA_REGS, M16_NA_REGS,
492 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
493 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
494 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
495 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
496 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
497 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
498 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
499 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
500 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
501 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
502 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
503 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
504 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
505 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
506 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
507 HI_REG, LO_REG, NO_REGS, ST_REGS,
508 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
509 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
510 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
511 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
512 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
513 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
514 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
515 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
516 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
517 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
518 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
519 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
520 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
521 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
522 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
523 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
524 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
525 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
526 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
527 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
528 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
529 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
530 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
531 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
532 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
533 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
534 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
537 /* Map register constraint character to register class. */
538 enum reg_class mips_char_to_class[256];
540 /* A table describing all the processors gcc knows about. Names are
541 matched in the order listed. The first mention of an ISA level is
542 taken as the canonical name for that ISA.
544 To ease comparison, please keep this table in the same order as
545 gas's mips_cpu_info_table[]. */
546 const struct mips_cpu_info mips_cpu_info_table[] = {
547 /* Entries for generic ISAs */
548 { "mips1", PROCESSOR_R3000, 1 },
549 { "mips2", PROCESSOR_R6000, 2 },
550 { "mips3", PROCESSOR_R4000, 3 },
551 { "mips4", PROCESSOR_R8000, 4 },
552 { "mips32", PROCESSOR_4KC, 32 },
553 { "mips32r2", PROCESSOR_M4K, 33 },
554 { "mips64", PROCESSOR_5KC, 64 },
557 { "r3000", PROCESSOR_R3000, 1 },
558 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
559 { "r3900", PROCESSOR_R3900, 1 },
562 { "r6000", PROCESSOR_R6000, 2 },
565 { "r4000", PROCESSOR_R4000, 3 },
566 { "vr4100", PROCESSOR_R4100, 3 },
567 { "vr4111", PROCESSOR_R4111, 3 },
568 { "vr4120", PROCESSOR_R4120, 3 },
569 { "vr4130", PROCESSOR_R4130, 3 },
570 { "vr4300", PROCESSOR_R4300, 3 },
571 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
572 { "r4600", PROCESSOR_R4600, 3 },
573 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
574 { "r4650", PROCESSOR_R4650, 3 },
577 { "r8000", PROCESSOR_R8000, 4 },
578 { "vr5000", PROCESSOR_R5000, 4 },
579 { "vr5400", PROCESSOR_R5400, 4 },
580 { "vr5500", PROCESSOR_R5500, 4 },
581 { "rm7000", PROCESSOR_R7000, 4 },
582 { "rm9000", PROCESSOR_R9000, 4 },
585 { "4kc", PROCESSOR_4KC, 32 },
586 { "4kp", PROCESSOR_4KC, 32 }, /* = 4kc */
588 /* MIPS32 Release 2 */
589 { "m4k", PROCESSOR_M4K, 33 },
592 { "5kc", PROCESSOR_5KC, 64 },
593 { "20kc", PROCESSOR_20KC, 64 },
594 { "sb1", PROCESSOR_SB1, 64 },
595 { "sr71000", PROCESSOR_SR71000, 64 },
601 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
602 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
603 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
606 /* Initialize the GCC target structure. */
607 #undef TARGET_ASM_ALIGNED_HI_OP
608 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
609 #undef TARGET_ASM_ALIGNED_SI_OP
610 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
611 #undef TARGET_ASM_ALIGNED_DI_OP
612 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
614 #undef TARGET_ASM_FUNCTION_PROLOGUE
615 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
616 #undef TARGET_ASM_FUNCTION_EPILOGUE
617 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
618 #undef TARGET_ASM_SELECT_RTX_SECTION
619 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
621 #undef TARGET_SCHED_REORDER
622 #define TARGET_SCHED_REORDER mips_sched_reorder
623 #undef TARGET_SCHED_VARIABLE_ISSUE
624 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
625 #undef TARGET_SCHED_ADJUST_COST
626 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
627 #undef TARGET_SCHED_ISSUE_RATE
628 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
629 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
630 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
631 mips_multipass_dfa_lookahead
633 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
634 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
636 #undef TARGET_VALID_POINTER_MODE
637 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
638 #undef TARGET_RTX_COSTS
639 #define TARGET_RTX_COSTS mips_rtx_costs
640 #undef TARGET_ADDRESS_COST
641 #define TARGET_ADDRESS_COST mips_address_cost
643 #undef TARGET_IN_SMALL_DATA_P
644 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
646 #undef TARGET_MACHINE_DEPENDENT_REORG
647 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
649 #undef TARGET_ASM_FILE_START
650 #undef TARGET_ASM_FILE_END
651 #define TARGET_ASM_FILE_START mips_file_start
652 #define TARGET_ASM_FILE_END mips_file_end
653 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
654 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
656 #undef TARGET_INIT_LIBFUNCS
657 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
659 #undef TARGET_BUILD_BUILTIN_VA_LIST
660 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
661 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
662 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
664 #undef TARGET_PROMOTE_FUNCTION_ARGS
665 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
666 #undef TARGET_PROMOTE_FUNCTION_RETURN
667 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
668 #undef TARGET_PROMOTE_PROTOTYPES
669 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
671 #undef TARGET_RETURN_IN_MEMORY
672 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
673 #undef TARGET_RETURN_IN_MSB
674 #define TARGET_RETURN_IN_MSB mips_return_in_msb
676 #undef TARGET_ASM_OUTPUT_MI_THUNK
677 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
678 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
679 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
681 #undef TARGET_SETUP_INCOMING_VARARGS
682 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
683 #undef TARGET_STRICT_ARGUMENT_NAMING
684 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
685 #undef TARGET_MUST_PASS_IN_STACK
686 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
687 #undef TARGET_PASS_BY_REFERENCE
688 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
690 struct gcc_target targetm = TARGET_INITIALIZER;
692 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
694 static enum mips_symbol_type
695 mips_classify_symbol (rtx x)
697 if (GET_CODE (x) == LABEL_REF)
700 return SYMBOL_CONSTANT_POOL;
702 return SYMBOL_GOT_LOCAL;
703 return SYMBOL_GENERAL;
706 if (GET_CODE (x) != SYMBOL_REF)
709 if (CONSTANT_POOL_ADDRESS_P (x))
712 return SYMBOL_CONSTANT_POOL;
715 return SYMBOL_GOT_LOCAL;
717 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
718 return SYMBOL_SMALL_DATA;
720 return SYMBOL_GENERAL;
723 if (SYMBOL_REF_SMALL_P (x))
724 return SYMBOL_SMALL_DATA;
728 if (SYMBOL_REF_DECL (x) == 0)
729 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
731 /* There are three cases to consider:
733 - o32 PIC (either with or without explicit relocs)
734 - n32/n64 PIC without explicit relocs
735 - n32/n64 PIC with explicit relocs
737 In the first case, both local and global accesses will use an
738 R_MIPS_GOT16 relocation. We must correctly predict which of
739 the two semantics (local or global) the assembler and linker
740 will apply. The choice doesn't depend on the symbol's
741 visibility, so we deliberately ignore decl_visibility and
744 In the second case, the assembler will not use R_MIPS_GOT16
745 relocations, but it chooses between local and global accesses
746 in the same way as for o32 PIC.
748 In the third case we have more freedom since both forms of
749 access will work for any kind of symbol. However, there seems
750 little point in doing things differently. */
751 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
752 return SYMBOL_GOT_GLOBAL;
754 return SYMBOL_GOT_LOCAL;
757 return SYMBOL_GENERAL;
761 /* Split X into a base and a constant offset, storing them in *BASE
762 and *OFFSET respectively. */
765 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
769 if (GET_CODE (x) == CONST)
772 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
774 *offset += INTVAL (XEXP (x, 1));
781 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
782 to the same object as SYMBOL. */
785 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
787 if (GET_CODE (symbol) != SYMBOL_REF)
790 if (CONSTANT_POOL_ADDRESS_P (symbol)
792 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
795 if (SYMBOL_REF_DECL (symbol) != 0
797 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
804 /* Return true if X is a symbolic constant that can be calculated in
805 the same way as a bare symbol. If it is, store the type of the
806 symbol in *SYMBOL_TYPE. */
809 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
811 HOST_WIDE_INT offset;
813 mips_split_const (x, &x, &offset);
814 if (UNSPEC_ADDRESS_P (x))
815 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
816 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
817 *symbol_type = mips_classify_symbol (x);
824 /* Check whether a nonzero offset is valid for the underlying
826 switch (*symbol_type)
832 /* If the target has 64-bit pointers and the object file only
833 supports 32-bit symbols, the values of those symbols will be
834 sign-extended. In this case we can't allow an arbitrary offset
835 in case the 32-bit value X + OFFSET has a different sign from X. */
836 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
837 return mips_offset_within_object_p (x, offset);
839 /* In other cases the relocations can handle any offset. */
842 case SYMBOL_CONSTANT_POOL:
843 /* Allow constant pool references to be converted to LABEL+CONSTANT.
844 In this case, we no longer have access to the underlying constant,
845 but the original symbol-based access was known to be valid. */
846 if (GET_CODE (x) == LABEL_REF)
851 case SYMBOL_SMALL_DATA:
852 /* Make sure that the offset refers to something within the
853 underlying object. This should guarantee that the final
854 PC- or GP-relative offset is within the 16-bit limit. */
855 return mips_offset_within_object_p (x, offset);
857 case SYMBOL_GOT_LOCAL:
858 case SYMBOL_GOTOFF_PAGE:
859 /* The linker should provide enough local GOT entries for a
860 16-bit offset. Larger offsets may lead to GOT overflow. */
861 return SMALL_OPERAND (offset);
863 case SYMBOL_GOT_GLOBAL:
864 case SYMBOL_GOTOFF_GLOBAL:
865 case SYMBOL_GOTOFF_CALL:
866 case SYMBOL_GOTOFF_LOADGP:
873 /* Return true if X is a symbolic constant whose value is not split
874 into separate relocations. */
877 mips_atomic_symbolic_constant_p (rtx x)
879 enum mips_symbol_type type;
880 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
884 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
887 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
889 if (regno >= FIRST_PSEUDO_REGISTER)
893 regno = reg_renumber[regno];
896 /* These fake registers will be eliminated to either the stack or
897 hard frame pointer, both of which are usually valid base registers.
898 Reload deals with the cases where the eliminated form isn't valid. */
899 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
902 /* In mips16 mode, the stack pointer can only address word and doubleword
903 values, nothing smaller. There are two problems here:
905 (a) Instantiating virtual registers can introduce new uses of the
906 stack pointer. If these virtual registers are valid addresses,
907 the stack pointer should be too.
909 (b) Most uses of the stack pointer are not made explicit until
910 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
911 We don't know until that stage whether we'll be eliminating to the
912 stack pointer (which needs the restriction) or the hard frame
913 pointer (which doesn't).
915 All in all, it seems more consistent to only enforce this restriction
916 during and after reload. */
917 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
918 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
920 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
924 /* Return true if X is a valid base register for the given mode.
925 Allow only hard registers if STRICT. */
928 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
930 if (!strict && GET_CODE (x) == SUBREG)
933 return (GET_CODE (x) == REG
934 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
938 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
939 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
942 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
943 enum machine_mode mode)
948 return !TARGET_MIPS16;
950 case SYMBOL_SMALL_DATA:
953 case SYMBOL_CONSTANT_POOL:
954 /* PC-relative addressing is only available for lw and ld. */
955 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
957 case SYMBOL_GOT_LOCAL:
960 case SYMBOL_GOT_GLOBAL:
961 /* The address will have to be loaded from the GOT first. */
964 case SYMBOL_GOTOFF_PAGE:
965 case SYMBOL_GOTOFF_GLOBAL:
966 case SYMBOL_GOTOFF_CALL:
967 case SYMBOL_GOTOFF_LOADGP:
977 /* Return true if X is a valid address for machine mode MODE. If it is,
978 fill in INFO appropriately. STRICT is true if we should only accept
979 hard base registers. */
982 mips_classify_address (struct mips_address_info *info, rtx x,
983 enum machine_mode mode, int strict)
985 switch (GET_CODE (x))
989 info->type = ADDRESS_REG;
991 info->offset = const0_rtx;
992 return mips_valid_base_register_p (info->reg, mode, strict);
995 info->type = ADDRESS_REG;
996 info->reg = XEXP (x, 0);
997 info->offset = XEXP (x, 1);
998 return (mips_valid_base_register_p (info->reg, mode, strict)
999 && const_arith_operand (info->offset, VOIDmode));
1002 info->type = ADDRESS_LO_SUM;
1003 info->reg = XEXP (x, 0);
1004 info->offset = XEXP (x, 1);
1005 return (mips_valid_base_register_p (info->reg, mode, strict)
1006 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1007 && mips_symbolic_address_p (info->symbol_type, mode)
1008 && mips_lo_relocs[info->symbol_type] != 0);
1011 /* Small-integer addresses don't occur very often, but they
1012 are legitimate if $0 is a valid base register. */
1013 info->type = ADDRESS_CONST_INT;
1014 return !TARGET_MIPS16 && SMALL_INT (x);
1019 info->type = ADDRESS_SYMBOLIC;
1020 return (mips_symbolic_constant_p (x, &info->symbol_type)
1021 && mips_symbolic_address_p (info->symbol_type, mode)
1022 && !mips_split_p[info->symbol_type]);
1029 /* Return the number of instructions needed to load a symbol of the
1030 given type into a register. If valid in an address, the same number
1031 of instructions are needed for loads and stores. Treat extended
1032 mips16 instructions as two instructions. */
1035 mips_symbol_insns (enum mips_symbol_type type)
1039 case SYMBOL_GENERAL:
1040 /* In mips16 code, general symbols must be fetched from the
1045 /* When using 64-bit symbols, we need 5 preparatory instructions,
1048 lui $at,%highest(symbol)
1049 daddiu $at,$at,%higher(symbol)
1051 daddiu $at,$at,%hi(symbol)
1054 The final address is then $at + %lo(symbol). With 32-bit
1055 symbols we just need a preparatory lui. */
1056 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1058 case SYMBOL_SMALL_DATA:
1061 case SYMBOL_CONSTANT_POOL:
1062 /* This case is for mips16 only. Assume we'll need an
1063 extended instruction. */
1066 case SYMBOL_GOT_LOCAL:
1067 case SYMBOL_GOT_GLOBAL:
1068 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1069 the local/global classification is accurate. See override_options
1072 The worst cases are:
1074 (1) For local symbols when generating o32 or o64 code. The assembler
1080 ...and the final address will be $at + %lo(symbol).
1082 (2) For global symbols when -mxgot. The assembler will use:
1084 lui $at,%got_hi(symbol)
1087 ...and the final address will be $at + %got_lo(symbol). */
1090 case SYMBOL_GOTOFF_PAGE:
1091 case SYMBOL_GOTOFF_GLOBAL:
1092 case SYMBOL_GOTOFF_CALL:
1093 case SYMBOL_GOTOFF_LOADGP:
1094 case SYMBOL_64_HIGH:
1097 /* Check whether the offset is a 16- or 32-bit value. */
1098 return mips_split_p[type] ? 2 : 1;
1103 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1106 mips_stack_address_p (rtx x, enum machine_mode mode)
1108 struct mips_address_info addr;
1110 return (mips_classify_address (&addr, x, mode, false)
1111 && addr.type == ADDRESS_REG
1112 && addr.reg == stack_pointer_rtx);
1115 /* Return true if a value at OFFSET bytes from BASE can be accessed
1116 using an unextended mips16 instruction. MODE is the mode of the
1119 Usually the offset in an unextended instruction is a 5-bit field.
1120 The offset is unsigned and shifted left once for HIs, twice
1121 for SIs, and so on. An exception is SImode accesses off the
1122 stack pointer, which have an 8-bit immediate field. */
1125 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1128 && GET_CODE (offset) == CONST_INT
1129 && INTVAL (offset) >= 0
1130 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1132 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1133 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1134 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1140 /* Return the number of instructions needed to load or store a value
1141 of mode MODE at X. Return 0 if X isn't valid for MODE.
1143 For mips16 code, count extended instructions as two instructions. */
1146 mips_address_insns (rtx x, enum machine_mode mode)
1148 struct mips_address_info addr;
1151 if (mode == BLKmode)
1152 /* BLKmode is used for single unaligned loads and stores. */
1155 /* Each word of a multi-word value will be accessed individually. */
1156 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1158 if (mips_classify_address (&addr, x, mode, false))
1163 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1167 case ADDRESS_LO_SUM:
1168 return (TARGET_MIPS16 ? factor * 2 : factor);
1170 case ADDRESS_CONST_INT:
1173 case ADDRESS_SYMBOLIC:
1174 return factor * mips_symbol_insns (addr.symbol_type);
1180 /* Likewise for constant X. */
1183 mips_const_insns (rtx x)
1185 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1186 enum mips_symbol_type symbol_type;
1187 HOST_WIDE_INT offset;
1189 switch (GET_CODE (x))
1193 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1194 || !mips_split_p[symbol_type])
1201 /* Unsigned 8-bit constants can be loaded using an unextended
1202 LI instruction. Unsigned 16-bit constants can be loaded
1203 using an extended LI. Negative constants must be loaded
1204 using LI and then negated. */
1205 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1206 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1207 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1208 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1211 return mips_build_integer (codes, INTVAL (x));
1214 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1220 /* See if we can refer to X directly. */
1221 if (mips_symbolic_constant_p (x, &symbol_type))
1222 return mips_symbol_insns (symbol_type);
1224 /* Otherwise try splitting the constant into a base and offset.
1225 16-bit offsets can be added using an extra addiu. Larger offsets
1226 must be calculated separately and then added to the base. */
1227 mips_split_const (x, &x, &offset);
1230 int n = mips_const_insns (x);
1233 if (SMALL_OPERAND (offset))
1236 return n + 1 + mips_build_integer (codes, offset);
1243 return mips_symbol_insns (mips_classify_symbol (x));
1251 /* Return the number of instructions needed for memory reference X.
1252 Count extended mips16 instructions as two instructions. */
1255 mips_fetch_insns (rtx x)
1257 if (GET_CODE (x) != MEM)
1260 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1264 /* Return the number of instructions needed for an integer division. */
1267 mips_idiv_insns (void)
1272 if (TARGET_CHECK_ZERO_DIV)
1274 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1279 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1280 returns a nonzero value if X is a legitimate address for a memory
1281 operand of the indicated MODE. STRICT is nonzero if this function
1282 is called during reload. */
1285 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1287 struct mips_address_info addr;
1289 return mips_classify_address (&addr, x, mode, strict);
1293 /* Copy VALUE to a register and return that register. If new psuedos
1294 are allowed, copy it into a new register, otherwise use DEST. */
1297 mips_force_temporary (rtx dest, rtx value)
1299 if (!no_new_pseudos)
1300 return force_reg (Pmode, value);
1303 emit_move_insn (copy_rtx (dest), value);
1309 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1310 and is used to load the high part into a register. */
1313 mips_split_symbol (rtx temp, rtx addr)
1318 high = mips16_gp_pseudo_reg ();
1320 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1321 return gen_rtx_LO_SUM (Pmode, high, addr);
1325 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1326 type SYMBOL_TYPE. */
1329 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1332 HOST_WIDE_INT offset;
1334 mips_split_const (address, &base, &offset);
1335 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1336 UNSPEC_ADDRESS_FIRST + symbol_type);
1337 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1341 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1342 high part to BASE and return the result. Just return BASE otherwise.
1343 TEMP is available as a temporary register if needed.
1345 The returned expression can be used as the first operand to a LO_SUM. */
1348 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1349 enum mips_symbol_type symbol_type)
1351 if (mips_split_p[symbol_type])
1353 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1354 addr = mips_force_temporary (temp, addr);
1355 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1361 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1362 mips_force_temporary; it is only needed when OFFSET is not a
1366 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1368 if (!SMALL_OPERAND (offset))
1373 /* Load the full offset into a register so that we can use
1374 an unextended instruction for the address itself. */
1375 high = GEN_INT (offset);
1380 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1381 high = GEN_INT (CONST_HIGH_PART (offset));
1382 offset = CONST_LOW_PART (offset);
1384 high = mips_force_temporary (temp, high);
1385 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1387 return plus_constant (reg, offset);
1391 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1392 be legitimized in a way that the generic machinery might not expect,
1393 put the new address in *XLOC and return true. MODE is the mode of
1394 the memory being accessed. */
1397 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1399 enum mips_symbol_type symbol_type;
1401 /* See if the address can split into a high part and a LO_SUM. */
1402 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1403 && mips_symbolic_address_p (symbol_type, mode)
1404 && mips_split_p[symbol_type])
1406 *xloc = mips_split_symbol (0, *xloc);
1410 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1412 /* Handle REG + CONSTANT using mips_add_offset. */
1415 reg = XEXP (*xloc, 0);
1416 if (!mips_valid_base_register_p (reg, mode, 0))
1417 reg = copy_to_mode_reg (Pmode, reg);
1418 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1426 /* Subroutine of mips_build_integer (with the same interface).
1427 Assume that the final action in the sequence should be a left shift. */
1430 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1432 unsigned int i, shift;
1434 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1435 since signed numbers are easier to load than unsigned ones. */
1437 while ((value & 1) == 0)
1438 value /= 2, shift++;
1440 i = mips_build_integer (codes, value);
1441 codes[i].code = ASHIFT;
1442 codes[i].value = shift;
1447 /* As for mips_build_shift, but assume that the final action will be
1448 an IOR or PLUS operation. */
1451 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1453 unsigned HOST_WIDE_INT high;
1456 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1457 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1459 /* The constant is too complex to load with a simple lui/ori pair
1460 so our goal is to clear as many trailing zeros as possible.
1461 In this case, we know bit 16 is set and that the low 16 bits
1462 form a negative number. If we subtract that number from VALUE,
1463 we will clear at least the lowest 17 bits, maybe more. */
1464 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1465 codes[i].code = PLUS;
1466 codes[i].value = CONST_LOW_PART (value);
1470 i = mips_build_integer (codes, high);
1471 codes[i].code = IOR;
1472 codes[i].value = value & 0xffff;
1478 /* Fill CODES with a sequence of rtl operations to load VALUE.
1479 Return the number of operations needed. */
1482 mips_build_integer (struct mips_integer_op *codes,
1483 unsigned HOST_WIDE_INT value)
1485 if (SMALL_OPERAND (value)
1486 || SMALL_OPERAND_UNSIGNED (value)
1487 || LUI_OPERAND (value))
1489 /* The value can be loaded with a single instruction. */
1490 codes[0].code = UNKNOWN;
1491 codes[0].value = value;
1494 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1496 /* Either the constant is a simple LUI/ORI combination or its
1497 lowest bit is set. We don't want to shift in this case. */
1498 return mips_build_lower (codes, value);
1500 else if ((value & 0xffff) == 0)
1502 /* The constant will need at least three actions. The lowest
1503 16 bits are clear, so the final action will be a shift. */
1504 return mips_build_shift (codes, value);
1508 /* The final action could be a shift, add or inclusive OR.
1509 Rather than use a complex condition to select the best
1510 approach, try both mips_build_shift and mips_build_lower
1511 and pick the one that gives the shortest sequence.
1512 Note that this case is only used once per constant. */
1513 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1514 unsigned int cost, alt_cost;
1516 cost = mips_build_shift (codes, value);
1517 alt_cost = mips_build_lower (alt_codes, value);
1518 if (alt_cost < cost)
1520 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1528 /* Move VALUE into register DEST. */
1531 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1533 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1534 enum machine_mode mode;
1535 unsigned int i, cost;
1538 mode = GET_MODE (dest);
1539 cost = mips_build_integer (codes, value);
1541 /* Apply each binary operation to X. Invariant: X is a legitimate
1542 source operand for a SET pattern. */
1543 x = GEN_INT (codes[0].value);
1544 for (i = 1; i < cost; i++)
1547 emit_move_insn (dest, x), x = dest;
1549 x = force_reg (mode, x);
1550 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1553 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1557 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1558 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1562 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1565 HOST_WIDE_INT offset;
1566 enum mips_symbol_type symbol_type;
1568 /* Split moves of big integers into smaller pieces. In mips16 code,
1569 it's better to force the constant into memory instead. */
1570 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1572 mips_move_integer (dest, INTVAL (src));
1576 /* See if the symbol can be split. For mips16, this is often worse than
1577 forcing it in the constant pool since it needs the single-register form
1578 of addiu or daddiu. */
1580 && mips_symbolic_constant_p (src, &symbol_type)
1581 && mips_split_p[symbol_type])
1583 emit_move_insn (dest, mips_split_symbol (dest, src));
1587 /* If we have (const (plus symbol offset)), load the symbol first
1588 and then add in the offset. This is usually better than forcing
1589 the constant into memory, at least in non-mips16 code. */
1590 mips_split_const (src, &base, &offset);
1593 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1595 base = mips_force_temporary (dest, base);
1596 emit_move_insn (dest, mips_add_offset (0, base, offset));
1600 src = force_const_mem (mode, src);
1602 /* When using explicit relocs, constant pool references are sometimes
1603 not legitimate addresses. */
1604 if (!memory_operand (src, VOIDmode))
1605 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1606 emit_move_insn (dest, src);
1610 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1611 sequence that is valid. */
1614 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1616 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1618 emit_move_insn (dest, force_reg (mode, src));
1622 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1623 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1624 && REG_P (src) && MD_REG_P (REGNO (src))
1625 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1627 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1628 if (GET_MODE_SIZE (mode) <= 4)
1629 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1630 gen_rtx_REG (SImode, REGNO (src)),
1631 gen_rtx_REG (SImode, other_regno)));
1633 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1634 gen_rtx_REG (DImode, REGNO (src)),
1635 gen_rtx_REG (DImode, other_regno)));
1639 /* We need to deal with constants that would be legitimate
1640 immediate_operands but not legitimate move_operands. */
1641 if (CONSTANT_P (src) && !move_operand (src, mode))
1643 mips_legitimize_const_move (mode, dest, src);
1644 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1650 /* We need a lot of little routines to check constant values on the
1651 mips16. These are used to figure out how long the instruction will
1652 be. It would be much better to do this using constraints, but
1653 there aren't nearly enough letters available. */
1656 m16_check_op (rtx op, int low, int high, int mask)
1658 return (GET_CODE (op) == CONST_INT
1659 && INTVAL (op) >= low
1660 && INTVAL (op) <= high
1661 && (INTVAL (op) & mask) == 0);
1665 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1667 return m16_check_op (op, 0x1, 0x8, 0);
1671 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1673 return m16_check_op (op, - 0x8, 0x7, 0);
1677 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1679 return m16_check_op (op, - 0x7, 0x8, 0);
1683 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1685 return m16_check_op (op, - 0x10, 0xf, 0);
1689 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1691 return m16_check_op (op, - 0xf, 0x10, 0);
1695 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1697 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
1701 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1703 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
1707 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1709 return m16_check_op (op, - 0x80, 0x7f, 0);
1713 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1715 return m16_check_op (op, - 0x7f, 0x80, 0);
1719 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1721 return m16_check_op (op, 0x0, 0xff, 0);
1725 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1727 return m16_check_op (op, - 0xff, 0x0, 0);
1731 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1733 return m16_check_op (op, - 0x1, 0xfe, 0);
1737 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1739 return m16_check_op (op, 0x0, 0xff << 2, 3);
1743 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1745 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
1749 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1751 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
1755 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1757 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
1761 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
1763 enum machine_mode mode = GET_MODE (x);
1770 /* Always return 0, since we don't have different sized
1771 instructions, hence different costs according to Richard
1777 /* A number between 1 and 8 inclusive is efficient for a shift.
1778 Otherwise, we will need an extended instruction. */
1779 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
1780 || (outer_code) == LSHIFTRT)
1782 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
1785 *total = COSTS_N_INSNS (1);
1789 /* We can use cmpi for an xor with an unsigned 16 bit value. */
1790 if ((outer_code) == XOR
1791 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
1797 /* We may be able to use slt or sltu for a comparison with a
1798 signed 16 bit value. (The boundary conditions aren't quite
1799 right, but this is just a heuristic anyhow.) */
1800 if (((outer_code) == LT || (outer_code) == LE
1801 || (outer_code) == GE || (outer_code) == GT
1802 || (outer_code) == LTU || (outer_code) == LEU
1803 || (outer_code) == GEU || (outer_code) == GTU)
1804 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
1810 /* Equality comparisons with 0 are cheap. */
1811 if (((outer_code) == EQ || (outer_code) == NE)
1818 /* Constants in the range 0...255 can be loaded with an unextended
1819 instruction. They are therefore as cheap as a register move.
1821 Given the choice between "li R1,0...255" and "move R1,R2"
1822 (where R2 is a known constant), it is usually better to use "li",
1823 since we do not want to unnessarily extend the lifetime of R2. */
1824 if (outer_code == SET
1826 && INTVAL (x) < 256)
1832 /* Otherwise fall through to the handling below. */
1838 if (LEGITIMATE_CONSTANT_P (x))
1840 *total = COSTS_N_INSNS (1);
1845 /* The value will need to be fetched from the constant pool. */
1846 *total = CONSTANT_POOL_COST;
1852 /* If the address is legitimate, return the number of
1853 instructions it needs, otherwise use the default handling. */
1854 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
1857 *total = COSTS_N_INSNS (1 + n);
1864 *total = COSTS_N_INSNS (6);
1868 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
1874 if (mode == DImode && !TARGET_64BIT)
1876 *total = COSTS_N_INSNS (2);
1884 if (mode == DImode && !TARGET_64BIT)
1886 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
1893 if (mode == SFmode || mode == DFmode)
1894 *total = COSTS_N_INSNS (1);
1896 *total = COSTS_N_INSNS (4);
1900 *total = COSTS_N_INSNS (1);
1905 if (mode == SFmode || mode == DFmode)
1907 if (TUNE_MIPS3000 || TUNE_MIPS3900)
1908 *total = COSTS_N_INSNS (2);
1909 else if (TUNE_MIPS6000)
1910 *total = COSTS_N_INSNS (3);
1912 *total = COSTS_N_INSNS (4);
1914 *total = COSTS_N_INSNS (6);
1917 if (mode == DImode && !TARGET_64BIT)
1919 *total = COSTS_N_INSNS (4);
1925 if (mode == DImode && !TARGET_64BIT)
1939 *total = COSTS_N_INSNS (4);
1940 else if (TUNE_MIPS6000
1943 *total = COSTS_N_INSNS (5);
1945 *total = COSTS_N_INSNS (7);
1952 *total = COSTS_N_INSNS (4);
1953 else if (TUNE_MIPS3000
1956 *total = COSTS_N_INSNS (5);
1957 else if (TUNE_MIPS6000
1960 *total = COSTS_N_INSNS (6);
1962 *total = COSTS_N_INSNS (8);
1967 *total = COSTS_N_INSNS (12);
1968 else if (TUNE_MIPS3900)
1969 *total = COSTS_N_INSNS (2);
1970 else if (TUNE_MIPS4130)
1971 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
1972 else if (TUNE_MIPS5400 || TUNE_SB1)
1973 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
1974 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
1975 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
1976 else if (TUNE_MIPS9000)
1977 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
1978 else if (TUNE_MIPS6000)
1979 *total = COSTS_N_INSNS (17);
1980 else if (TUNE_MIPS5000)
1981 *total = COSTS_N_INSNS (5);
1983 *total = COSTS_N_INSNS (10);
1992 *total = COSTS_N_INSNS (12);
1993 else if (TUNE_MIPS6000)
1994 *total = COSTS_N_INSNS (15);
1996 *total = COSTS_N_INSNS (24);
1997 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
1998 *total = COSTS_N_INSNS (30);
2000 *total = COSTS_N_INSNS (23);
2008 *total = COSTS_N_INSNS (19);
2009 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2010 *total = COSTS_N_INSNS (59);
2011 else if (TUNE_MIPS6000)
2012 *total = COSTS_N_INSNS (16);
2014 *total = COSTS_N_INSNS (32);
2016 *total = COSTS_N_INSNS (36);
2025 *total = COSTS_N_INSNS (35);
2026 else if (TUNE_MIPS6000)
2027 *total = COSTS_N_INSNS (38);
2028 else if (TUNE_MIPS5000)
2029 *total = COSTS_N_INSNS (36);
2031 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2032 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2033 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2035 *total = COSTS_N_INSNS (69);
2039 /* A sign extend from SImode to DImode in 64 bit mode is often
2040 zero instructions, because the result can often be used
2041 directly by another instruction; we'll call it one. */
2042 if (TARGET_64BIT && mode == DImode
2043 && GET_MODE (XEXP (x, 0)) == SImode)
2044 *total = COSTS_N_INSNS (1);
2046 *total = COSTS_N_INSNS (2);
2050 if (TARGET_64BIT && mode == DImode
2051 && GET_MODE (XEXP (x, 0)) == SImode)
2052 *total = COSTS_N_INSNS (2);
2054 *total = COSTS_N_INSNS (1);
2062 /* Provide the costs of an addressing mode that contains ADDR.
2063 If ADDR is not a valid address, its cost is irrelevant. */
2066 mips_address_cost (rtx addr)
2068 return mips_address_insns (addr, SImode);
2071 /* Return one word of double-word value OP, taking into account the fixed
2072 endianness of certain registers. HIGH_P is true to select the high part,
2073 false to select the low part. */
2076 mips_subword (rtx op, int high_p)
2079 enum machine_mode mode;
2081 mode = GET_MODE (op);
2082 if (mode == VOIDmode)
2085 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2086 byte = UNITS_PER_WORD;
2090 if (GET_CODE (op) == REG)
2092 if (FP_REG_P (REGNO (op)))
2093 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2094 if (REGNO (op) == HI_REGNUM)
2095 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2098 if (GET_CODE (op) == MEM)
2099 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2101 return simplify_gen_subreg (word_mode, op, mode, byte);
2105 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2108 mips_split_64bit_move_p (rtx dest, rtx src)
2113 /* FP->FP moves can be done in a single instruction. */
2114 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2117 /* Check for floating-point loads and stores. They can be done using
2118 ldc1 and sdc1 on MIPS II and above. */
2121 if (FP_REG_RTX_P (dest) && GET_CODE (src) == MEM)
2123 if (FP_REG_RTX_P (src) && GET_CODE (dest) == MEM)
2130 /* Split a 64-bit move from SRC to DEST assuming that
2131 mips_split_64bit_move_p holds.
2133 Moves into and out of FPRs cause some difficulty here. Such moves
2134 will always be DFmode, since paired FPRs are not allowed to store
2135 DImode values. The most natural representation would be two separate
2136 32-bit moves, such as:
2138 (set (reg:SI $f0) (mem:SI ...))
2139 (set (reg:SI $f1) (mem:SI ...))
2141 However, the second insn is invalid because odd-numbered FPRs are
2142 not allowed to store independent values. Use the patterns load_df_low,
2143 load_df_high and store_df_high instead. */
2146 mips_split_64bit_move (rtx dest, rtx src)
2148 if (FP_REG_RTX_P (dest))
2150 /* Loading an FPR from memory or from GPRs. */
2151 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2152 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2155 else if (FP_REG_RTX_P (src))
2157 /* Storing an FPR into memory or GPRs. */
2158 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2159 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2163 /* The operation can be split into two normal moves. Decide in
2164 which order to do them. */
2167 low_dest = mips_subword (dest, 0);
2168 if (GET_CODE (low_dest) == REG
2169 && reg_overlap_mentioned_p (low_dest, src))
2171 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2172 emit_move_insn (low_dest, mips_subword (src, 0));
2176 emit_move_insn (low_dest, mips_subword (src, 0));
2177 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2182 /* Return the appropriate instructions to move SRC into DEST. Assume
2183 that SRC is operand 1 and DEST is operand 0. */
2186 mips_output_move (rtx dest, rtx src)
2188 enum rtx_code dest_code, src_code;
2191 dest_code = GET_CODE (dest);
2192 src_code = GET_CODE (src);
2193 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2195 if (dbl_p && mips_split_64bit_move_p (dest, src))
2198 if ((src_code == REG && GP_REG_P (REGNO (src)))
2199 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2201 if (dest_code == REG)
2203 if (GP_REG_P (REGNO (dest)))
2204 return "move\t%0,%z1";
2206 if (MD_REG_P (REGNO (dest)))
2209 if (FP_REG_P (REGNO (dest)))
2210 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2212 if (ALL_COP_REG_P (REGNO (dest)))
2214 static char retval[] = "dmtc_\t%z1,%0";
2216 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2217 return (dbl_p ? retval : retval + 1);
2220 if (dest_code == MEM)
2221 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2223 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2225 if (src_code == REG)
2227 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2228 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2230 if (FP_REG_P (REGNO (src)))
2231 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2233 if (ALL_COP_REG_P (REGNO (src)))
2235 static char retval[] = "dmfc_\t%0,%1";
2237 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2238 return (dbl_p ? retval : retval + 1);
2242 if (src_code == MEM)
2243 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2245 if (src_code == CONST_INT)
2247 /* Don't use the X format, because that will give out of
2248 range numbers for 64 bit hosts and 32 bit targets. */
2250 return "li\t%0,%1\t\t\t# %X1";
2252 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2255 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2259 if (src_code == HIGH)
2260 return "lui\t%0,%h1";
2262 if (CONST_GP_P (src))
2263 return "move\t%0,%1";
2265 if (symbolic_operand (src, VOIDmode))
2266 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2268 if (src_code == REG && FP_REG_P (REGNO (src)))
2270 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2271 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2273 if (dest_code == MEM)
2274 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2276 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2278 if (src_code == MEM)
2279 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2281 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2283 static char retval[] = "l_c_\t%0,%1";
2285 retval[1] = (dbl_p ? 'd' : 'w');
2286 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2289 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2291 static char retval[] = "s_c_\t%1,%0";
2293 retval[1] = (dbl_p ? 'd' : 'w');
2294 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2300 /* Restore $gp from its save slot. Valid only when using o32 or
2304 mips_restore_gp (void)
2308 if (!TARGET_ABICALLS || !TARGET_OLDABI)
2311 address = mips_add_offset (pic_offset_table_rtx,
2312 frame_pointer_needed
2313 ? hard_frame_pointer_rtx
2314 : stack_pointer_rtx,
2315 current_function_outgoing_args_size);
2316 slot = gen_rtx_MEM (Pmode, address);
2318 emit_move_insn (pic_offset_table_rtx, slot);
2319 if (!TARGET_EXPLICIT_RELOCS)
2320 emit_insn (gen_blockage ());
2323 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2326 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2328 emit_insn (gen_rtx_SET (VOIDmode, target,
2329 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2332 /* Return true if CMP1 is a suitable second operand for relational
2333 operator CODE. See also the *sCC patterns in mips.md. */
2336 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2342 return reg_or_0_operand (cmp1, VOIDmode);
2346 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2350 return arith_operand (cmp1, VOIDmode);
2353 return sle_operand (cmp1, VOIDmode);
2356 return sleu_operand (cmp1, VOIDmode);
2363 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2364 result in TARGET. CMP0 and TARGET are register_operands that have
2365 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2366 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2369 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2370 rtx target, rtx cmp0, rtx cmp1)
2372 /* First see if there is a MIPS instruction that can do this operation
2373 with CMP1 in its current form. If not, try doing the same for the
2374 inverse operation. If that also fails, force CMP1 into a register
2376 if (mips_relational_operand_ok_p (code, cmp1))
2377 mips_emit_binary (code, target, cmp0, cmp1);
2380 enum rtx_code inv_code = reverse_condition (code);
2381 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2383 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2384 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2386 else if (invert_ptr == 0)
2388 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2389 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2390 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2394 *invert_ptr = !*invert_ptr;
2395 mips_emit_binary (inv_code, target, cmp0, cmp1);
2400 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2401 The register will have the same mode as CMP0. */
2404 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2406 if (cmp1 == const0_rtx)
2409 if (uns_arith_operand (cmp1, VOIDmode))
2410 return expand_binop (GET_MODE (cmp0), xor_optab,
2411 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2413 return expand_binop (GET_MODE (cmp0), sub_optab,
2414 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2417 /* Convert a comparison into something that can be used in a branch or
2418 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2419 being compared and *CODE is the code used to compare them.
2421 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2422 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2423 otherwise any standard branch condition can be used. The standard branch
2426 - EQ/NE between two registers.
2427 - any comparison between a register and zero. */
2430 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2432 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2434 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2436 *op0 = cmp_operands[0];
2437 *op1 = cmp_operands[1];
2439 else if (*code == EQ || *code == NE)
2443 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2448 *op0 = cmp_operands[0];
2449 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2454 /* The comparison needs a separate scc instruction. Store the
2455 result of the scc in *OP0 and compare it against zero. */
2456 bool invert = false;
2457 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2459 mips_emit_int_relational (*code, &invert, *op0,
2460 cmp_operands[0], cmp_operands[1]);
2461 *code = (invert ? EQ : NE);
2466 enum rtx_code cmp_code;
2468 /* Floating-point tests use a separate c.cond.fmt comparison to
2469 set a condition code register. The branch or conditional move
2470 will then compare that register against zero.
2472 Set CMP_CODE to the code of the comparison instruction and
2473 *CODE to the code that the branch or move should use. */
2481 cmp_code = reverse_condition_maybe_unordered (*code);
2491 ? gen_reg_rtx (CCmode)
2492 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2494 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2498 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2499 Store the result in TARGET and return true if successful.
2501 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2504 mips_emit_scc (enum rtx_code code, rtx target)
2506 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2509 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2510 if (code == EQ || code == NE)
2512 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2513 mips_emit_binary (code, target, zie, const0_rtx);
2516 mips_emit_int_relational (code, 0, target,
2517 cmp_operands[0], cmp_operands[1]);
2521 /* Emit the common code for doing conditional branches.
2522 operand[0] is the label to jump to.
2523 The comparison operands are saved away by cmp{si,di,sf,df}. */
2526 gen_conditional_branch (rtx *operands, enum rtx_code code)
2528 rtx op0, op1, target;
2530 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2531 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2532 gen_rtx_fmt_ee (code, GET_MODE (op0),
2534 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2536 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2539 /* Emit the common code for conditional moves. OPERANDS is the array
2540 of operands passed to the conditional move define_expand. */
2543 gen_conditional_move (rtx *operands)
2548 code = GET_CODE (operands[1]);
2549 mips_emit_compare (&code, &op0, &op1, true);
2550 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2551 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2552 gen_rtx_fmt_ee (code,
2555 operands[2], operands[3])));
2558 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2559 the conditional_trap expander. */
2562 mips_gen_conditional_trap (rtx *operands)
2565 enum rtx_code cmp_code = GET_CODE (operands[0]);
2566 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2568 /* MIPS conditional trap machine instructions don't have GT or LE
2569 flavors, so we must invert the comparison and convert to LT and
2570 GE, respectively. */
2573 case GT: cmp_code = LT; break;
2574 case LE: cmp_code = GE; break;
2575 case GTU: cmp_code = LTU; break;
2576 case LEU: cmp_code = GEU; break;
2579 if (cmp_code == GET_CODE (operands[0]))
2581 op0 = cmp_operands[0];
2582 op1 = cmp_operands[1];
2586 op0 = cmp_operands[1];
2587 op1 = cmp_operands[0];
2589 op0 = force_reg (mode, op0);
2590 if (!arith_operand (op1, mode))
2591 op1 = force_reg (mode, op1);
2593 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2594 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2598 /* Load function address ADDR into register DEST. SIBCALL_P is true
2599 if the address is needed for a sibling call. */
2602 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2604 /* If we're generating PIC, and this call is to a global function,
2605 try to allow its address to be resolved lazily. This isn't
2606 possible for NewABI sibcalls since the value of $gp on entry
2607 to the stub would be our caller's gp, not ours. */
2608 if (TARGET_EXPLICIT_RELOCS
2609 && !(sibcall_p && TARGET_NEWABI)
2610 && global_got_operand (addr, VOIDmode))
2612 rtx high, lo_sum_symbol;
2614 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2615 addr, SYMBOL_GOTOFF_CALL);
2616 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2617 if (Pmode == SImode)
2618 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2620 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2623 emit_move_insn (dest, addr);
2627 /* Expand a call or call_value instruction. RESULT is where the
2628 result will go (null for calls), ADDR is the address of the
2629 function, ARGS_SIZE is the size of the arguments and AUX is
2630 the value passed to us by mips_function_arg. SIBCALL_P is true
2631 if we are expanding a sibling call, false if we're expanding
2635 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2637 rtx orig_addr, pattern, insn;
2640 if (!call_insn_operand (addr, VOIDmode))
2642 addr = gen_reg_rtx (Pmode);
2643 mips_load_call_address (addr, orig_addr, sibcall_p);
2647 && mips16_hard_float
2648 && build_mips16_call_stub (result, addr, args_size,
2649 aux == 0 ? 0 : (int) GET_MODE (aux)))
2653 pattern = (sibcall_p
2654 ? gen_sibcall_internal (addr, args_size)
2655 : gen_call_internal (addr, args_size));
2656 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2660 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2661 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2664 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2665 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2668 pattern = (sibcall_p
2669 ? gen_sibcall_value_internal (result, addr, args_size)
2670 : gen_call_value_internal (result, addr, args_size));
2672 insn = emit_call_insn (pattern);
2674 /* Lazy-binding stubs require $gp to be valid on entry. */
2675 if (global_got_operand (orig_addr, VOIDmode))
2676 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2680 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
2683 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
2684 tree exp ATTRIBUTE_UNUSED)
2686 return TARGET_SIBCALLS;
2689 /* Emit code to move general operand SRC into condition-code
2690 register DEST. SCRATCH is a scratch TFmode float register.
2697 where FP1 and FP2 are single-precision float registers
2698 taken from SCRATCH. */
2701 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
2705 /* Change the source to SFmode. */
2706 if (GET_CODE (src) == MEM)
2707 src = adjust_address (src, SFmode, 0);
2708 else if (GET_CODE (src) == REG || GET_CODE (src) == SUBREG)
2709 src = gen_rtx_REG (SFmode, true_regnum (src));
2711 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
2712 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
2714 emit_move_insn (copy_rtx (fp1), src);
2715 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
2716 emit_insn (gen_slt_sf (dest, fp2, fp1));
2719 /* Emit code to change the current function's return address to
2720 ADDRESS. SCRATCH is available as a scratch register, if needed.
2721 ADDRESS and SCRATCH are both word-mode GPRs. */
2724 mips_set_return_address (rtx address, rtx scratch)
2728 compute_frame_size (get_frame_size ());
2729 if (((cfun->machine->frame.mask >> 31) & 1) == 0)
2731 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
2732 cfun->machine->frame.gp_sp_offset);
2734 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
2737 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2738 Assume that the areas do not overlap. */
2741 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2743 HOST_WIDE_INT offset, delta;
2744 unsigned HOST_WIDE_INT bits;
2746 enum machine_mode mode;
2749 /* Work out how many bits to move at a time. If both operands have
2750 half-word alignment, it is usually better to move in half words.
2751 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
2752 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
2753 Otherwise move word-sized chunks. */
2754 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
2755 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
2756 bits = BITS_PER_WORD / 2;
2758 bits = BITS_PER_WORD;
2760 mode = mode_for_size (bits, MODE_INT, 0);
2761 delta = bits / BITS_PER_UNIT;
2763 /* Allocate a buffer for the temporary registers. */
2764 regs = alloca (sizeof (rtx) * length / delta);
2766 /* Load as many BITS-sized chunks as possible. Use a normal load if
2767 the source has enough alignment, otherwise use left/right pairs. */
2768 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2770 regs[i] = gen_reg_rtx (mode);
2771 if (MEM_ALIGN (src) >= bits)
2772 emit_move_insn (regs[i], adjust_address (src, mode, offset));
2775 rtx part = adjust_address (src, BLKmode, offset);
2776 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
2781 /* Copy the chunks to the destination. */
2782 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2783 if (MEM_ALIGN (dest) >= bits)
2784 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
2787 rtx part = adjust_address (dest, BLKmode, offset);
2788 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
2792 /* Mop up any left-over bytes. */
2793 if (offset < length)
2795 src = adjust_address (src, BLKmode, offset);
2796 dest = adjust_address (dest, BLKmode, offset);
2797 move_by_pieces (dest, src, length - offset,
2798 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2802 #define MAX_MOVE_REGS 4
2803 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
2806 /* Helper function for doing a loop-based block operation on memory
2807 reference MEM. Each iteration of the loop will operate on LENGTH
2810 Create a new base register for use within the loop and point it to
2811 the start of MEM. Create a new memory reference that uses this
2812 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2815 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2816 rtx *loop_reg, rtx *loop_mem)
2818 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2820 /* Although the new mem does not refer to a known location,
2821 it does keep up to LENGTH bytes of alignment. */
2822 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2823 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2827 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
2828 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
2829 memory regions do not overlap. */
2832 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
2834 rtx label, src_reg, dest_reg, final_src;
2835 HOST_WIDE_INT leftover;
2837 leftover = length % MAX_MOVE_BYTES;
2840 /* Create registers and memory references for use within the loop. */
2841 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
2842 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
2844 /* Calculate the value that SRC_REG should have after the last iteration
2846 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2849 /* Emit the start of the loop. */
2850 label = gen_label_rtx ();
2853 /* Emit the loop body. */
2854 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
2856 /* Move on to the next block. */
2857 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
2858 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
2860 /* Emit the loop condition. */
2861 if (Pmode == DImode)
2862 emit_insn (gen_cmpdi (src_reg, final_src));
2864 emit_insn (gen_cmpsi (src_reg, final_src));
2865 emit_jump_insn (gen_bne (label));
2867 /* Mop up any left-over bytes. */
2869 mips_block_move_straight (dest, src, leftover);
2872 /* Expand a movmemsi instruction. */
2875 mips_expand_block_move (rtx dest, rtx src, rtx length)
2877 if (GET_CODE (length) == CONST_INT)
2879 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
2881 mips_block_move_straight (dest, src, INTVAL (length));
2886 mips_block_move_loop (dest, src, INTVAL (length));
2893 /* Argument support functions. */
2895 /* Initialize CUMULATIVE_ARGS for a function. */
2898 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
2899 rtx libname ATTRIBUTE_UNUSED)
2901 static CUMULATIVE_ARGS zero_cum;
2902 tree param, next_param;
2905 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
2907 /* Determine if this function has variable arguments. This is
2908 indicated by the last argument being 'void_type_mode' if there
2909 are no variable arguments. The standard MIPS calling sequence
2910 passes all arguments in the general purpose registers in this case. */
2912 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
2913 param != 0; param = next_param)
2915 next_param = TREE_CHAIN (param);
2916 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2917 cum->gp_reg_found = 1;
2922 /* Fill INFO with information about a single argument. CUM is the
2923 cumulative state for earlier arguments. MODE is the mode of this
2924 argument and TYPE is its type (if known). NAMED is true if this
2925 is a named (fixed) argument rather than a variable one. */
2928 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
2929 tree type, int named, struct mips_arg_info *info)
2932 unsigned int num_bytes, num_words, max_regs;
2934 /* Work out the size of the argument. */
2935 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
2936 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2938 /* Decide whether it should go in a floating-point register, assuming
2939 one is free. Later code checks for availability.
2941 The checks against UNITS_PER_FPVALUE handle the soft-float and
2942 single-float cases. */
2946 /* The EABI conventions have traditionally been defined in terms
2947 of TYPE_MODE, regardless of the actual type. */
2948 info->fpr_p = (GET_MODE_CLASS (mode) == MODE_FLOAT
2949 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
2954 /* Only leading floating-point scalars are passed in
2955 floating-point registers. */
2956 info->fpr_p = (!cum->gp_reg_found
2957 && cum->arg_number < 2
2958 && (type == 0 || SCALAR_FLOAT_TYPE_P (type))
2959 && GET_MODE_CLASS (mode) == MODE_FLOAT
2960 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
2965 /* Scalar and complex floating-point types are passed in
2966 floating-point registers. */
2967 info->fpr_p = (named
2968 && (type == 0 || FLOAT_TYPE_P (type))
2969 && (GET_MODE_CLASS (mode) == MODE_FLOAT
2970 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
2971 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
2973 /* ??? According to the ABI documentation, the real and imaginary
2974 parts of complex floats should be passed in individual registers.
2975 The real and imaginary parts of stack arguments are supposed
2976 to be contiguous and there should be an extra word of padding
2979 This has two problems. First, it makes it impossible to use a
2980 single "void *" va_list type, since register and stack arguments
2981 are passed differently. (At the time of writing, MIPSpro cannot
2982 handle complex float varargs correctly.) Second, it's unclear
2983 what should happen when there is only one register free.
2985 For now, we assume that named complex floats should go into FPRs
2986 if there are two FPRs free, otherwise they should be passed in the
2987 same way as a struct containing two floats. */
2989 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
2990 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
2992 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
2993 info->fpr_p = false;
3003 /* Now decide whether the argument must go in an even-numbered register.
3004 Usually this is determined by type alignment, but there are two
3007 - Under the O64 ABI, the second float argument goes in $f14 if it
3008 is single precision (doubles go in $f13 as expected).
3010 - Floats passed in FPRs must be in an even-numbered register if
3011 we're using paired FPRs. */
3013 even_reg_p = TYPE_ALIGN (type) > BITS_PER_WORD;
3015 even_reg_p = GET_MODE_UNIT_SIZE (mode) > UNITS_PER_WORD;
3019 if (mips_abi == ABI_O64 && mode == SFmode)
3025 /* Set REG_OFFSET to the register count we're interested in.
3026 The EABI allocates the floating-point registers separately,
3027 but the other ABIs allocate them like integer registers. */
3028 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3033 info->reg_offset += info->reg_offset & 1;
3035 /* The alignment applied to registers is also applied to stack arguments. */
3036 info->stack_offset = cum->stack_words;
3038 info->stack_offset += info->stack_offset & 1;
3040 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3042 /* Partition the argument between registers and stack. */
3043 info->reg_words = MIN (num_words, max_regs);
3044 info->stack_words = num_words - info->reg_words;
3048 /* Implement FUNCTION_ARG_ADVANCE. */
3051 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3052 tree type, int named)
3054 struct mips_arg_info info;
3056 mips_arg_info (cum, mode, type, named, &info);
3059 cum->gp_reg_found = true;
3061 /* See the comment above the cumulative args structure in mips.h
3062 for an explanation of what this code does. It assumes the O32
3063 ABI, which passes at most 2 arguments in float registers. */
3064 if (cum->arg_number < 2 && info.fpr_p)
3065 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3067 if (mips_abi != ABI_EABI || !info.fpr_p)
3068 cum->num_gprs = info.reg_offset + info.reg_words;
3069 else if (info.reg_words > 0)
3070 cum->num_fprs += FP_INC;
3072 if (info.stack_words > 0)
3073 cum->stack_words = info.stack_offset + info.stack_words;
3078 /* Implement FUNCTION_ARG. */
3081 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3082 tree type, int named)
3084 struct mips_arg_info info;
3086 /* We will be called with a mode of VOIDmode after the last argument
3087 has been seen. Whatever we return will be passed to the call
3088 insn. If we need a mips16 fp_code, return a REG with the code
3089 stored as the mode. */
3090 if (mode == VOIDmode)
3092 if (TARGET_MIPS16 && cum->fp_code != 0)
3093 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3099 mips_arg_info (cum, mode, type, named, &info);
3101 /* Return straight away if the whole argument is passed on the stack. */
3102 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3106 && TREE_CODE (type) == RECORD_TYPE
3108 && TYPE_SIZE_UNIT (type)
3109 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3112 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3113 structure contains a double in its entirety, then that 64 bit
3114 chunk is passed in a floating point register. */
3117 /* First check to see if there is any such field. */
3118 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3119 if (TREE_CODE (field) == FIELD_DECL
3120 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3121 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3122 && host_integerp (bit_position (field), 0)
3123 && int_bit_position (field) % BITS_PER_WORD == 0)
3128 /* Now handle the special case by returning a PARALLEL
3129 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3130 chunks are passed in registers. */
3132 HOST_WIDE_INT bitpos;
3135 /* assign_parms checks the mode of ENTRY_PARM, so we must
3136 use the actual mode here. */
3137 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3140 field = TYPE_FIELDS (type);
3141 for (i = 0; i < info.reg_words; i++)
3145 for (; field; field = TREE_CHAIN (field))
3146 if (TREE_CODE (field) == FIELD_DECL
3147 && int_bit_position (field) >= bitpos)
3151 && int_bit_position (field) == bitpos
3152 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3153 && !TARGET_SOFT_FLOAT
3154 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3155 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3157 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3160 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3161 GEN_INT (bitpos / BITS_PER_UNIT));
3163 bitpos += BITS_PER_WORD;
3169 /* Handle the n32/n64 conventions for passing complex floating-point
3170 arguments in FPR pairs. The real part goes in the lower register
3171 and the imaginary part goes in the upper register. */
3174 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3177 enum machine_mode inner;
3180 inner = GET_MODE_INNER (mode);
3181 reg = FP_ARG_FIRST + info.reg_offset;
3182 real = gen_rtx_EXPR_LIST (VOIDmode,
3183 gen_rtx_REG (inner, reg),
3185 imag = gen_rtx_EXPR_LIST (VOIDmode,
3186 gen_rtx_REG (inner, reg + info.reg_words / 2),
3187 GEN_INT (GET_MODE_SIZE (inner)));
3188 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3192 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3194 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3198 /* Implement FUNCTION_ARG_PARTIAL_NREGS. */
3201 function_arg_partial_nregs (const CUMULATIVE_ARGS *cum,
3202 enum machine_mode mode, tree type, int named)
3204 struct mips_arg_info info;
3206 mips_arg_info (cum, mode, type, named, &info);
3207 return info.stack_words > 0 ? info.reg_words : 0;
3211 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3212 upward rather than downward. In other words, return true if the
3213 first byte of the stack slot has useful data, false if the last
3217 mips_pad_arg_upward (enum machine_mode mode, tree type)
3219 /* On little-endian targets, the first byte of every stack argument
3220 is passed in the first byte of the stack slot. */
3221 if (!BYTES_BIG_ENDIAN)
3224 /* Otherwise, integral types are padded downward: the last byte of a
3225 stack argument is passed in the last byte of the stack slot. */
3227 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3228 : GET_MODE_CLASS (mode) == MODE_INT)
3231 /* Big-endian o64 pads floating-point arguments downward. */
3232 if (mips_abi == ABI_O64)
3233 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3236 /* Other types are padded upward for o32, o64, n32 and n64. */
3237 if (mips_abi != ABI_EABI)
3240 /* Arguments smaller than a stack slot are padded downward. */
3241 if (mode != BLKmode)
3242 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3244 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3248 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3249 if the least significant byte of the register has useful data. Return
3250 the opposite if the most significant byte does. */
3253 mips_pad_reg_upward (enum machine_mode mode, tree type)
3255 /* No shifting is required for floating-point arguments. */
3256 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3257 return !BYTES_BIG_ENDIAN;
3259 /* Otherwise, apply the same padding to register arguments as we do
3260 to stack arguments. */
3261 return mips_pad_arg_upward (mode, type);
3265 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3266 tree type, int *pretend_size, int no_rtl)
3268 CUMULATIVE_ARGS local_cum;
3269 int gp_saved, fp_saved;
3271 /* The caller has advanced CUM up to, but not beyond, the last named
3272 argument. Advance a local copy of CUM past the last "real" named
3273 argument, to find out how many registers are left over. */
3276 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3278 /* Found out how many registers we need to save. */
3279 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3280 fp_saved = (EABI_FLOAT_VARARGS_P
3281 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3290 ptr = virtual_incoming_args_rtx;
3295 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3299 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3302 mem = gen_rtx_MEM (BLKmode, ptr);
3303 set_mem_alias_set (mem, get_varargs_alias_set ());
3305 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3310 /* We can't use move_block_from_reg, because it will use
3312 enum machine_mode mode;
3315 /* Set OFF to the offset from virtual_incoming_args_rtx of
3316 the first float register. The FP save area lies below
3317 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3318 off = -gp_saved * UNITS_PER_WORD;
3319 off &= ~(UNITS_PER_FPVALUE - 1);
3320 off -= fp_saved * UNITS_PER_FPREG;
3322 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3324 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3328 ptr = plus_constant (virtual_incoming_args_rtx, off);
3329 mem = gen_rtx_MEM (mode, ptr);
3330 set_mem_alias_set (mem, get_varargs_alias_set ());
3331 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3332 off += UNITS_PER_HWFPVALUE;
3338 /* No need for pretend arguments: the register parameter area was
3339 allocated by the caller. */
3343 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3346 /* Create the va_list data type.
3347 We keep 3 pointers, and two offsets.
3348 Two pointers are to the overflow area, which starts at the CFA.
3349 One of these is constant, for addressing into the GPR save area below it.
3350 The other is advanced up the stack through the overflow region.
3351 The third pointer is to the GPR save area. Since the FPR save area
3352 is just below it, we can address FPR slots off this pointer.
3353 We also keep two one-byte offsets, which are to be subtracted from the
3354 constant pointers to yield addresses in the GPR and FPR save areas.
3355 These are downcounted as float or non-float arguments are used,
3356 and when they get to zero, the argument must be obtained from the
3358 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3359 pointer is enough. It's started at the GPR save area, and is
3361 Note that the GPR save area is not constant size, due to optimization
3362 in the prologue. Hence, we can't use a design with two pointers
3363 and two offsets, although we could have designed this with two pointers
3364 and three offsets. */
3367 mips_build_builtin_va_list (void)
3369 if (EABI_FLOAT_VARARGS_P)
3371 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3374 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3376 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3378 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3380 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3382 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3383 unsigned_char_type_node);
3384 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3385 unsigned_char_type_node);
3386 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3387 warn on every user file. */
3388 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1, 0);
3389 array = build_array_type (unsigned_char_type_node,
3390 build_index_type (index));
3391 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3393 DECL_FIELD_CONTEXT (f_ovfl) = record;
3394 DECL_FIELD_CONTEXT (f_gtop) = record;
3395 DECL_FIELD_CONTEXT (f_ftop) = record;
3396 DECL_FIELD_CONTEXT (f_goff) = record;
3397 DECL_FIELD_CONTEXT (f_foff) = record;
3398 DECL_FIELD_CONTEXT (f_res) = record;
3400 TYPE_FIELDS (record) = f_ovfl;
3401 TREE_CHAIN (f_ovfl) = f_gtop;
3402 TREE_CHAIN (f_gtop) = f_ftop;
3403 TREE_CHAIN (f_ftop) = f_goff;
3404 TREE_CHAIN (f_goff) = f_foff;
3405 TREE_CHAIN (f_foff) = f_res;
3407 layout_type (record);
3410 else if (TARGET_IRIX && TARGET_IRIX6)
3411 /* On IRIX 6, this type is 'char *'. */
3412 return build_pointer_type (char_type_node);
3414 /* Otherwise, we use 'void *'. */
3415 return ptr_type_node;
3418 /* Implement va_start. */
3421 mips_va_start (tree valist, rtx nextarg)
3423 const CUMULATIVE_ARGS *cum = ¤t_function_args_info;
3425 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3426 since the stack is aligned for a pair of argument-passing slots,
3427 and the beginning of a variable argument list may be an odd slot,
3428 we have to decrease its alignment. */
3429 if (cfun && cfun->emit->regno_pointer_align)
3430 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3431 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3432 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3434 if (mips_abi == ABI_EABI)
3436 int gpr_save_area_size;
3439 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3441 if (EABI_FLOAT_VARARGS_P)
3443 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3444 tree ovfl, gtop, ftop, goff, foff;
3447 int fpr_save_area_size;
3449 f_ovfl = TYPE_FIELDS (va_list_type_node);
3450 f_gtop = TREE_CHAIN (f_ovfl);
3451 f_ftop = TREE_CHAIN (f_gtop);
3452 f_goff = TREE_CHAIN (f_ftop);
3453 f_foff = TREE_CHAIN (f_goff);
3455 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3457 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3459 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3461 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3463 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3466 /* Emit code to initialize OVFL, which points to the next varargs
3467 stack argument. CUM->STACK_WORDS gives the number of stack
3468 words used by named arguments. */
3469 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3470 if (cum->stack_words > 0)
3471 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3472 build_int_cst (NULL_TREE,
3473 cum->stack_words * UNITS_PER_WORD, 0));
3474 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3475 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3477 /* Emit code to initialize GTOP, the top of the GPR save area. */
3478 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3479 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3480 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3482 /* Emit code to initialize FTOP, the top of the FPR save area.
3483 This address is gpr_save_area_bytes below GTOP, rounded
3484 down to the next fp-aligned boundary. */
3485 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3486 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3487 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3489 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3490 build_int_cst (NULL_TREE, -fpr_offset, -1));
3491 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3492 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3494 /* Emit code to initialize GOFF, the offset from GTOP of the
3495 next GPR argument. */
3496 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3497 build_int_cst (NULL_TREE, gpr_save_area_size, 0));
3498 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3500 /* Likewise emit code to initialize FOFF, the offset from FTOP
3501 of the next FPR argument. */
3503 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3504 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3505 build_int_cst (NULL_TREE, fpr_save_area_size, 0));
3506 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3510 /* Everything is in the GPR save area, or in the overflow
3511 area which is contiguous with it. */
3512 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3513 std_expand_builtin_va_start (valist, nextarg);
3517 std_expand_builtin_va_start (valist, nextarg);
3520 /* Implement va_arg. */
3523 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3525 HOST_WIDE_INT size, rsize;
3529 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3532 type = build_pointer_type (type);
3534 size = int_size_in_bytes (type);
3535 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3537 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3538 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3541 /* Not a simple merged stack. */
3543 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3544 tree ovfl, top, off, align;
3545 HOST_WIDE_INT osize;
3548 f_ovfl = TYPE_FIELDS (va_list_type_node);
3549 f_gtop = TREE_CHAIN (f_ovfl);
3550 f_ftop = TREE_CHAIN (f_gtop);
3551 f_goff = TREE_CHAIN (f_ftop);
3552 f_foff = TREE_CHAIN (f_goff);
3554 /* We maintain separate pointers and offsets for floating-point
3555 and integer arguments, but we need similar code in both cases.
3558 TOP be the top of the register save area;
3559 OFF be the offset from TOP of the next register;
3560 ADDR_RTX be the address of the argument;
3561 RSIZE be the number of bytes used to store the argument
3562 when it's in the register save area;
3563 OSIZE be the number of bytes used to store it when it's
3564 in the stack overflow area; and
3565 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3567 The code we want is:
3569 1: off &= -rsize; // round down
3572 4: addr_rtx = top - off;
3577 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3578 10: addr_rtx = ovfl + PADDING;
3582 [1] and [9] can sometimes be optimized away. */
3584 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3587 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3588 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3590 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3592 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3595 /* When floating-point registers are saved to the stack,
3596 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3597 of the float's precision. */
3598 rsize = UNITS_PER_HWFPVALUE;
3600 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3601 (= PARM_BOUNDARY bits). This can be different from RSIZE
3604 (1) On 32-bit targets when TYPE is a structure such as:
3606 struct s { float f; };
3608 Such structures are passed in paired FPRs, so RSIZE
3609 will be 8 bytes. However, the structure only takes
3610 up 4 bytes of memory, so OSIZE will only be 4.
3612 (2) In combinations such as -mgp64 -msingle-float
3613 -fshort-double. Doubles passed in registers
3614 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3615 but those passed on the stack take up
3616 UNITS_PER_WORD bytes. */
3617 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3621 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3623 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3625 if (rsize > UNITS_PER_WORD)
3627 /* [1] Emit code for: off &= -rsize. */
3628 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3629 build_int_cst (NULL_TREE, -rsize, -1));
3630 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3631 gimplify_and_add (t, pre_p);
3636 /* [2] Emit code to branch if off == 0. */
3637 t = lang_hooks.truthvalue_conversion (off);
3638 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3640 /* [5] Emit code for: off -= rsize. We do this as a form of
3641 post-increment not available to C. Also widen for the
3642 coming pointer arithmetic. */
3643 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize, 0));
3644 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3645 t = fold_convert (sizetype, t);
3646 t = fold_convert (TREE_TYPE (top), t);
3648 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3649 the argument has RSIZE - SIZE bytes of leading padding. */
3650 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3651 if (BYTES_BIG_ENDIAN && rsize > size)
3653 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3655 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3657 COND_EXPR_THEN (addr) = t;
3659 if (osize > UNITS_PER_WORD)
3661 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
3662 u = fold_convert (TREE_TYPE (ovfl),
3663 build_int_cst (NULL_TREE, osize - 1, 0));
3664 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
3665 u = fold_convert (TREE_TYPE (ovfl),
3666 build_int_cst (NULL_TREE, -osize, -1));
3667 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
3668 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3673 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
3674 post-increment ovfl by osize. On big-endian machines,
3675 the argument has OSIZE - SIZE bytes of leading padding. */
3676 u = fold_convert (TREE_TYPE (ovfl),
3677 build_int_cst (NULL_TREE, osize, 0));
3678 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
3679 if (BYTES_BIG_ENDIAN && osize > size)
3681 u = fold_convert (TREE_TYPE (t),
3682 build_int_cst (NULL_TREE, osize - size, 0));
3683 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3686 /* String [9] and [10,11] together. */
3688 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
3689 COND_EXPR_ELSE (addr) = t;
3691 addr = fold_convert (build_pointer_type (type), addr);
3692 addr = build_fold_indirect_ref (addr);
3696 addr = build_fold_indirect_ref (addr);
3701 /* Return true if it is possible to use left/right accesses for a
3702 bitfield of WIDTH bits starting BITPOS bits into *OP. When
3703 returning true, update *OP, *LEFT and *RIGHT as follows:
3705 *OP is a BLKmode reference to the whole field.
3707 *LEFT is a QImode reference to the first byte if big endian or
3708 the last byte if little endian. This address can be used in the
3709 left-side instructions (lwl, swl, ldl, sdl).
3711 *RIGHT is a QImode reference to the opposite end of the field and
3712 can be used in the parterning right-side instruction. */
3715 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
3716 rtx *left, rtx *right)
3720 /* Check that the operand really is a MEM. Not all the extv and
3721 extzv predicates are checked. */
3722 if (GET_CODE (*op) != MEM)
3725 /* Check that the size is valid. */
3726 if (width != 32 && (!TARGET_64BIT || width != 64))
3729 /* We can only access byte-aligned values. Since we are always passed
3730 a reference to the first byte of the field, it is not necessary to
3731 do anything with BITPOS after this check. */
3732 if (bitpos % BITS_PER_UNIT != 0)
3735 /* Reject aligned bitfields: we want to use a normal load or store
3736 instead of a left/right pair. */
3737 if (MEM_ALIGN (*op) >= width)
3740 /* Adjust *OP to refer to the whole field. This also has the effect
3741 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
3742 *op = adjust_address (*op, BLKmode, 0);
3743 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
3745 /* Get references to both ends of the field. We deliberately don't
3746 use the original QImode *OP for FIRST since the new BLKmode one
3747 might have a simpler address. */
3748 first = adjust_address (*op, QImode, 0);
3749 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
3751 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
3752 be the upper word and RIGHT the lower word. */
3753 if (TARGET_BIG_ENDIAN)
3754 *left = first, *right = last;
3756 *left = last, *right = first;
3762 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
3763 Return true on success. We only handle cases where zero_extract is
3764 equivalent to sign_extract. */
3767 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
3769 rtx left, right, temp;
3771 /* If TARGET_64BIT, the destination of a 32-bit load will be a
3772 paradoxical word_mode subreg. This is the only case in which
3773 we allow the destination to be larger than the source. */
3774 if (GET_CODE (dest) == SUBREG
3775 && GET_MODE (dest) == DImode
3776 && SUBREG_BYTE (dest) == 0
3777 && GET_MODE (SUBREG_REG (dest)) == SImode)
3778 dest = SUBREG_REG (dest);
3780 /* After the above adjustment, the destination must be the same
3781 width as the source. */
3782 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
3785 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
3788 temp = gen_reg_rtx (GET_MODE (dest));
3789 if (GET_MODE (dest) == DImode)
3791 emit_insn (gen_mov_ldl (temp, src, left));
3792 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
3796 emit_insn (gen_mov_lwl (temp, src, left));
3797 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
3803 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
3807 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
3811 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
3814 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
3816 if (GET_MODE (src) == DImode)
3818 emit_insn (gen_mov_sdl (dest, src, left));
3819 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
3823 emit_insn (gen_mov_swl (dest, src, left));
3824 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
3829 /* Set up globals to generate code for the ISA or processor
3830 described by INFO. */
3833 mips_set_architecture (const struct mips_cpu_info *info)
3837 mips_arch_info = info;
3838 mips_arch = info->cpu;
3839 mips_isa = info->isa;
3844 /* Likewise for tuning. */
3847 mips_set_tune (const struct mips_cpu_info *info)
3851 mips_tune_info = info;
3852 mips_tune = info->cpu;
3857 /* Set up the threshold for data to go into the small data area, instead
3858 of the normal data area, and detect any conflicts in the switches. */
3861 override_options (void)
3863 int i, start, regno;
3864 enum machine_mode mode;
3866 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
3868 /* Interpret -mabi. */
3869 mips_abi = MIPS_ABI_DEFAULT;
3870 if (mips_abi_string != 0)
3872 if (strcmp (mips_abi_string, "32") == 0)
3874 else if (strcmp (mips_abi_string, "o64") == 0)
3876 else if (strcmp (mips_abi_string, "n32") == 0)
3878 else if (strcmp (mips_abi_string, "64") == 0)
3880 else if (strcmp (mips_abi_string, "eabi") == 0)
3881 mips_abi = ABI_EABI;
3883 fatal_error ("bad value (%s) for -mabi= switch", mips_abi_string);
3886 /* The following code determines the architecture and register size.
3887 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
3888 The GAS and GCC code should be kept in sync as much as possible. */
3890 if (mips_arch_string != 0)
3891 mips_set_architecture (mips_parse_cpu ("-march", mips_arch_string));
3893 if (mips_isa_string != 0)
3895 /* Handle -mipsN. */
3896 char *whole_isa_str = concat ("mips", mips_isa_string, NULL);
3897 const struct mips_cpu_info *isa_info;
3899 isa_info = mips_parse_cpu ("-mips option", whole_isa_str);
3900 free (whole_isa_str);
3902 /* -march takes precedence over -mipsN, since it is more descriptive.
3903 There's no harm in specifying both as long as the ISA levels
3905 if (mips_arch_info != 0 && mips_isa != isa_info->isa)
3906 error ("-mips%s conflicts with the other architecture options, "
3907 "which specify a MIPS%d processor",
3908 mips_isa_string, mips_isa);
3910 /* Set architecture based on the given option. */
3911 mips_set_architecture (isa_info);
3914 if (mips_arch_info == 0)
3916 #ifdef MIPS_CPU_STRING_DEFAULT
3917 mips_set_architecture (mips_parse_cpu ("default CPU",
3918 MIPS_CPU_STRING_DEFAULT));
3920 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
3924 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
3925 error ("-march=%s is not compatible with the selected ABI",
3926 mips_arch_info->name);
3928 /* Optimize for mips_arch, unless -mtune selects a different processor. */
3929 if (mips_tune_string != 0)
3930 mips_set_tune (mips_parse_cpu ("-mtune", mips_tune_string));
3932 if (mips_tune_info == 0)
3933 mips_set_tune (mips_arch_info);
3935 if ((target_flags_explicit & MASK_64BIT) != 0)
3937 /* The user specified the size of the integer registers. Make sure
3938 it agrees with the ABI and ISA. */
3939 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
3940 error ("-mgp64 used with a 32-bit processor");
3941 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
3942 error ("-mgp32 used with a 64-bit ABI");
3943 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
3944 error ("-mgp64 used with a 32-bit ABI");
3948 /* Infer the integer register size from the ABI and processor.
3949 Restrict ourselves to 32-bit registers if that's all the
3950 processor has, or if the ABI cannot handle 64-bit registers. */
3951 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
3952 target_flags &= ~MASK_64BIT;
3954 target_flags |= MASK_64BIT;
3957 if ((target_flags_explicit & MASK_FLOAT64) != 0)
3959 /* Really, -mfp32 and -mfp64 are ornamental options. There's
3960 only one right answer here. */
3961 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
3962 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
3963 else if (!TARGET_64BIT && TARGET_FLOAT64)
3964 error ("unsupported combination: %s", "-mgp32 -mfp64");
3965 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
3966 error ("unsupported combination: %s", "-mfp64 -msingle-float");
3970 /* -msingle-float selects 32-bit float registers. Otherwise the
3971 float registers should be the same size as the integer ones. */
3972 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
3973 target_flags |= MASK_FLOAT64;
3975 target_flags &= ~MASK_FLOAT64;
3978 /* End of code shared with GAS. */
3980 if ((target_flags_explicit & MASK_LONG64) == 0)
3982 /* If no type size setting options (-mlong64,-mint64,-mlong32)
3983 were used, then set the type sizes. In the EABI in 64 bit mode,
3984 longs and pointers are 64 bits. Likewise for the SGI Irix6 N64
3986 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
3987 target_flags |= MASK_LONG64;
3989 target_flags &= ~MASK_LONG64;
3992 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
3993 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
3995 /* For some configurations, it is useful to have -march control
3996 the default setting of MASK_SOFT_FLOAT. */
3997 switch ((int) mips_arch)
3999 case PROCESSOR_R4100:
4000 case PROCESSOR_R4111:
4001 case PROCESSOR_R4120:
4002 case PROCESSOR_R4130:
4003 target_flags |= MASK_SOFT_FLOAT;
4007 target_flags &= ~MASK_SOFT_FLOAT;
4013 flag_pcc_struct_return = 0;
4015 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4017 /* If neither -mbranch-likely nor -mno-branch-likely was given
4018 on the command line, set MASK_BRANCHLIKELY based on the target
4021 By default, we enable use of Branch Likely instructions on
4022 all architectures which support them with the following
4023 exceptions: when creating MIPS32 or MIPS64 code, and when
4024 tuning for architectures where their use tends to hurt
4027 The MIPS32 and MIPS64 architecture specifications say "Software
4028 is strongly encouraged to avoid use of Branch Likely
4029 instructions, as they will be removed from a future revision
4030 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4031 issue those instructions unless instructed to do so by
4033 if (ISA_HAS_BRANCHLIKELY
4034 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4035 && !(TUNE_MIPS5500 || TUNE_SB1))
4036 target_flags |= MASK_BRANCHLIKELY;
4038 target_flags &= ~MASK_BRANCHLIKELY;
4040 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4041 warning ("generation of Branch Likely instructions enabled, but not supported by architecture");
4043 /* The effect of -mabicalls isn't defined for the EABI. */
4044 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4046 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4047 target_flags &= ~MASK_ABICALLS;
4050 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4051 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4052 /* ??? -non_shared turns off pic code generation, but this is not
4054 if (TARGET_ABICALLS)
4057 if (mips_section_threshold > 0)
4058 warning ("-G is incompatible with PIC code which is the default");
4061 /* mips_split_addresses is a half-way house between explicit
4062 relocations and the traditional assembler macros. It can
4063 split absolute 32-bit symbolic constants into a high/lo_sum
4064 pair but uses macros for other sorts of access.
4066 Like explicit relocation support for REL targets, it relies
4067 on GNU extensions in the assembler and the linker.
4069 Although this code should work for -O0, it has traditionally
4070 been treated as an optimization. */
4071 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4072 && optimize && !flag_pic
4073 && !ABI_HAS_64BIT_SYMBOLS)
4074 mips_split_addresses = 1;
4076 mips_split_addresses = 0;
4078 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4079 faster code, but at the expense of more nops. Enable it at -O3 and
4081 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4082 target_flags |= MASK_VR4130_ALIGN;
4084 /* When compiling for the mips16, we cannot use floating point. We
4085 record the original hard float value in mips16_hard_float. */
4088 if (TARGET_SOFT_FLOAT)
4089 mips16_hard_float = 0;
4091 mips16_hard_float = 1;
4092 target_flags |= MASK_SOFT_FLOAT;
4094 /* Don't run the scheduler before reload, since it tends to
4095 increase register pressure. */
4096 flag_schedule_insns = 0;
4098 /* Silently disable -mexplicit-relocs since it doesn't apply
4099 to mips16 code. Even so, it would overly pedantic to warn
4100 about "-mips16 -mexplicit-relocs", especially given that
4101 we use a %gprel() operator. */
4102 target_flags &= ~MASK_EXPLICIT_RELOCS;
4105 /* When using explicit relocs, we call dbr_schedule from within
4107 if (TARGET_EXPLICIT_RELOCS)
4109 mips_flag_delayed_branch = flag_delayed_branch;
4110 flag_delayed_branch = 0;
4113 #ifdef MIPS_TFMODE_FORMAT
4114 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4117 mips_print_operand_punct['?'] = 1;
4118 mips_print_operand_punct['#'] = 1;
4119 mips_print_operand_punct['/'] = 1;
4120 mips_print_operand_punct['&'] = 1;
4121 mips_print_operand_punct['!'] = 1;
4122 mips_print_operand_punct['*'] = 1;
4123 mips_print_operand_punct['@'] = 1;
4124 mips_print_operand_punct['.'] = 1;
4125 mips_print_operand_punct['('] = 1;
4126 mips_print_operand_punct[')'] = 1;
4127 mips_print_operand_punct['['] = 1;
4128 mips_print_operand_punct[']'] = 1;
4129 mips_print_operand_punct['<'] = 1;
4130 mips_print_operand_punct['>'] = 1;
4131 mips_print_operand_punct['{'] = 1;
4132 mips_print_operand_punct['}'] = 1;
4133 mips_print_operand_punct['^'] = 1;
4134 mips_print_operand_punct['$'] = 1;
4135 mips_print_operand_punct['+'] = 1;
4136 mips_print_operand_punct['~'] = 1;
4138 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4139 mips_char_to_class['t'] = T_REG;
4140 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4141 mips_char_to_class['h'] = HI_REG;
4142 mips_char_to_class['l'] = LO_REG;
4143 mips_char_to_class['x'] = MD_REGS;
4144 mips_char_to_class['b'] = ALL_REGS;
4145 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4146 TARGET_MIPS16 ? M16_NA_REGS :
4148 mips_char_to_class['e'] = LEA_REGS;
4149 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4150 mips_char_to_class['y'] = GR_REGS;
4151 mips_char_to_class['z'] = ST_REGS;
4152 mips_char_to_class['B'] = COP0_REGS;
4153 mips_char_to_class['C'] = COP2_REGS;
4154 mips_char_to_class['D'] = COP3_REGS;
4156 /* Set up array to map GCC register number to debug register number.
4157 Ignore the special purpose register numbers. */
4159 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4160 mips_dbx_regno[i] = -1;
4162 start = GP_DBX_FIRST - GP_REG_FIRST;
4163 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4164 mips_dbx_regno[i] = i + start;
4166 start = FP_DBX_FIRST - FP_REG_FIRST;
4167 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4168 mips_dbx_regno[i] = i + start;
4170 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4171 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4173 /* Set up array giving whether a given register can hold a given mode. */
4175 for (mode = VOIDmode;
4176 mode != MAX_MACHINE_MODE;
4177 mode = (enum machine_mode) ((int)mode + 1))
4179 register int size = GET_MODE_SIZE (mode);
4180 register enum mode_class class = GET_MODE_CLASS (mode);
4182 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4189 temp = (regno == FPSW_REGNUM);
4191 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4192 || FP_REG_P (regno));
4195 else if (GP_REG_P (regno))
4196 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4198 else if (FP_REG_P (regno))
4199 temp = ((regno % FP_INC) == 0)
4200 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
4201 && size <= UNITS_PER_FPVALUE)
4202 /* Allow integer modes that fit into a single
4203 register. We need to put integers into FPRs
4204 when using instructions like cvt and trunc. */
4205 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4206 /* Allow TFmode for CCmode reloads. */
4207 || (ISA_HAS_8CC && mode == TFmode));
4209 else if (MD_REG_P (regno))
4210 temp = (INTEGRAL_MODE_P (mode)
4211 && (size <= UNITS_PER_WORD
4212 || (regno == MD_REG_FIRST
4213 && size == 2 * UNITS_PER_WORD)));
4215 else if (ALL_COP_REG_P (regno))
4216 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4220 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4224 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4225 initialized yet, so we can't use that here. */
4226 gpr_mode = TARGET_64BIT ? DImode : SImode;
4228 /* Provide default values for align_* for 64-bit targets. */
4229 if (TARGET_64BIT && !TARGET_MIPS16)
4231 if (align_loops == 0)
4233 if (align_jumps == 0)
4235 if (align_functions == 0)
4236 align_functions = 8;
4239 /* Function to allocate machine-dependent function status. */
4240 init_machine_status = &mips_init_machine_status;
4242 if (ABI_HAS_64BIT_SYMBOLS)
4244 if (TARGET_EXPLICIT_RELOCS)
4246 mips_split_p[SYMBOL_64_HIGH] = true;
4247 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4248 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4250 mips_split_p[SYMBOL_64_MID] = true;
4251 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4252 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4254 mips_split_p[SYMBOL_64_LOW] = true;
4255 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4256 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4258 mips_split_p[SYMBOL_GENERAL] = true;
4259 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4264 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4266 mips_split_p[SYMBOL_GENERAL] = true;
4267 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4268 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4274 /* The high part is provided by a pseudo copy of $gp. */
4275 mips_split_p[SYMBOL_SMALL_DATA] = true;
4276 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4279 if (TARGET_EXPLICIT_RELOCS)
4281 /* Small data constants are kept whole until after reload,
4282 then lowered by mips_rewrite_small_data. */
4283 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4285 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4288 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4289 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4293 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4294 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4299 /* The HIGH and LO_SUM are matched by special .md patterns. */
4300 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4302 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4303 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4304 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4306 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4307 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4308 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4313 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4315 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4316 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4322 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4323 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4324 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4327 /* Default to working around R4000 errata only if the processor
4328 was selected explicitly. */
4329 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4330 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4331 target_flags |= MASK_FIX_R4000;
4333 /* Default to working around R4400 errata only if the processor
4334 was selected explicitly. */
4335 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4336 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4337 target_flags |= MASK_FIX_R4400;
4340 /* Implement CONDITIONAL_REGISTER_USAGE. */
4343 mips_conditional_register_usage (void)
4345 if (!TARGET_HARD_FLOAT)
4349 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4350 fixed_regs[regno] = call_used_regs[regno] = 1;
4351 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4352 fixed_regs[regno] = call_used_regs[regno] = 1;
4354 else if (! ISA_HAS_8CC)
4358 /* We only have a single condition code register. We
4359 implement this by hiding all the condition code registers,
4360 and generating RTL that refers directly to ST_REG_FIRST. */
4361 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4362 fixed_regs[regno] = call_used_regs[regno] = 1;
4364 /* In mips16 mode, we permit the $t temporary registers to be used
4365 for reload. We prohibit the unused $s registers, since they
4366 are caller saved, and saving them via a mips16 register would
4367 probably waste more time than just reloading the value. */
4370 fixed_regs[18] = call_used_regs[18] = 1;
4371 fixed_regs[19] = call_used_regs[19] = 1;
4372 fixed_regs[20] = call_used_regs[20] = 1;
4373 fixed_regs[21] = call_used_regs[21] = 1;
4374 fixed_regs[22] = call_used_regs[22] = 1;
4375 fixed_regs[23] = call_used_regs[23] = 1;
4376 fixed_regs[26] = call_used_regs[26] = 1;
4377 fixed_regs[27] = call_used_regs[27] = 1;
4378 fixed_regs[30] = call_used_regs[30] = 1;
4380 /* fp20-23 are now caller saved. */
4381 if (mips_abi == ABI_64)
4384 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4385 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4387 /* Odd registers from fp21 to fp31 are now caller saved. */
4388 if (mips_abi == ABI_N32)
4391 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4392 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4396 /* Allocate a chunk of memory for per-function machine-dependent data. */
4397 static struct machine_function *
4398 mips_init_machine_status (void)
4400 return ((struct machine_function *)
4401 ggc_alloc_cleared (sizeof (struct machine_function)));
4404 /* On the mips16, we want to allocate $24 (T_REG) before other
4405 registers for instructions for which it is possible. This helps
4406 avoid shuffling registers around in order to set up for an xor,
4407 encouraging the compiler to use a cmp instead. */
4410 mips_order_regs_for_local_alloc (void)
4414 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4415 reg_alloc_order[i] = i;
4419 /* It really doesn't matter where we put register 0, since it is
4420 a fixed register anyhow. */
4421 reg_alloc_order[0] = 24;
4422 reg_alloc_order[24] = 0;
4427 /* The MIPS debug format wants all automatic variables and arguments
4428 to be in terms of the virtual frame pointer (stack pointer before
4429 any adjustment in the function), while the MIPS 3.0 linker wants
4430 the frame pointer to be the stack pointer after the initial
4431 adjustment. So, we do the adjustment here. The arg pointer (which
4432 is eliminated) points to the virtual frame pointer, while the frame
4433 pointer (which may be eliminated) points to the stack pointer after
4434 the initial adjustments. */
4437 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4439 rtx offset2 = const0_rtx;
4440 rtx reg = eliminate_constant_term (addr, &offset2);
4443 offset = INTVAL (offset2);
4445 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4446 || reg == hard_frame_pointer_rtx)
4448 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4449 ? compute_frame_size (get_frame_size ())
4450 : cfun->machine->frame.total_size;
4452 /* MIPS16 frame is smaller */
4453 if (frame_pointer_needed && TARGET_MIPS16)
4454 frame_size -= cfun->machine->frame.args_size;
4456 offset = offset - frame_size;
4459 /* sdbout_parms does not want this to crash for unrecognized cases. */
4461 else if (reg != arg_pointer_rtx)
4462 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4469 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4471 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4472 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4473 'h' OP is HIGH, prints %hi(X),
4474 'd' output integer constant in decimal,
4475 'z' if the operand is 0, use $0 instead of normal operand.
4476 'D' print second part of double-word register or memory operand.
4477 'L' print low-order register of double-word register operand.
4478 'M' print high-order register of double-word register operand.
4479 'C' print part of opcode for a branch condition.
4480 'F' print part of opcode for a floating-point branch condition.
4481 'N' print part of opcode for a branch condition, inverted.
4482 'W' print part of opcode for a floating-point branch condition, inverted.
4483 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4484 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4485 't' like 'T', but with the EQ/NE cases reversed
4486 'Z' print register and a comma, but print nothing for $fcc0
4487 'R' print the reloc associated with LO_SUM
4489 The punctuation characters are:
4491 '(' Turn on .set noreorder
4492 ')' Turn on .set reorder
4493 '[' Turn on .set noat
4495 '<' Turn on .set nomacro
4496 '>' Turn on .set macro
4497 '{' Turn on .set volatile (not GAS)
4498 '}' Turn on .set novolatile (not GAS)
4499 '&' Turn on .set noreorder if filling delay slots
4500 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4501 '!' Turn on .set nomacro if filling delay slots
4502 '#' Print nop if in a .set noreorder section.
4503 '/' Like '#', but does nothing within a delayed branch sequence
4504 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4505 '@' Print the name of the assembler temporary register (at or $1).
4506 '.' Print the name of the register with a hard-wired zero (zero or $0).
4507 '^' Print the name of the pic call-through register (t9 or $25).
4508 '$' Print the name of the stack pointer register (sp or $29).
4509 '+' Print the name of the gp register (usually gp or $28).
4510 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4513 print_operand (FILE *file, rtx op, int letter)
4515 register enum rtx_code code;
4517 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4522 if (mips_branch_likely)
4527 fputs (reg_names [GP_REG_FIRST + 1], file);
4531 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4535 fputs (reg_names [GP_REG_FIRST + 0], file);
4539 fputs (reg_names[STACK_POINTER_REGNUM], file);
4543 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4547 if (final_sequence != 0 && set_noreorder++ == 0)
4548 fputs (".set\tnoreorder\n\t", file);
4552 if (final_sequence != 0)
4554 if (set_noreorder++ == 0)
4555 fputs (".set\tnoreorder\n\t", file);
4557 if (set_nomacro++ == 0)
4558 fputs (".set\tnomacro\n\t", file);
4563 if (final_sequence != 0 && set_nomacro++ == 0)
4564 fputs ("\n\t.set\tnomacro", file);
4568 if (set_noreorder != 0)
4569 fputs ("\n\tnop", file);
4573 /* Print an extra newline so that the delayed insn is separated
4574 from the following ones. This looks neater and is consistent
4575 with non-nop delayed sequences. */
4576 if (set_noreorder != 0 && final_sequence == 0)
4577 fputs ("\n\tnop\n", file);
4581 if (set_noreorder++ == 0)
4582 fputs (".set\tnoreorder\n\t", file);
4586 if (set_noreorder == 0)
4587 error ("internal error: %%) found without a %%( in assembler pattern");
4589 else if (--set_noreorder == 0)
4590 fputs ("\n\t.set\treorder", file);
4595 if (set_noat++ == 0)
4596 fputs (".set\tnoat\n\t", file);
4601 error ("internal error: %%] found without a %%[ in assembler pattern");
4602 else if (--set_noat == 0)
4603 fputs ("\n\t.set\tat", file);
4608 if (set_nomacro++ == 0)
4609 fputs (".set\tnomacro\n\t", file);
4613 if (set_nomacro == 0)
4614 error ("internal error: %%> found without a %%< in assembler pattern");
4615 else if (--set_nomacro == 0)
4616 fputs ("\n\t.set\tmacro", file);
4621 if (set_volatile++ == 0)
4622 fputs ("#.set\tvolatile\n\t", file);
4626 if (set_volatile == 0)
4627 error ("internal error: %%} found without a %%{ in assembler pattern");
4628 else if (--set_volatile == 0)
4629 fputs ("\n\t#.set\tnovolatile", file);
4635 if (align_labels_log > 0)
4636 ASM_OUTPUT_ALIGN (file, align_labels_log);
4641 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
4650 error ("PRINT_OPERAND null pointer");
4654 code = GET_CODE (op);
4659 case EQ: fputs ("eq", file); break;
4660 case NE: fputs ("ne", file); break;
4661 case GT: fputs ("gt", file); break;
4662 case GE: fputs ("ge", file); break;
4663 case LT: fputs ("lt", file); break;
4664 case LE: fputs ("le", file); break;
4665 case GTU: fputs ("gtu", file); break;
4666 case GEU: fputs ("geu", file); break;
4667 case LTU: fputs ("ltu", file); break;
4668 case LEU: fputs ("leu", file); break;
4670 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
4673 else if (letter == 'N')
4676 case EQ: fputs ("ne", file); break;
4677 case NE: fputs ("eq", file); break;
4678 case GT: fputs ("le", file); break;
4679 case GE: fputs ("lt", file); break;
4680 case LT: fputs ("ge", file); break;
4681 case LE: fputs ("gt", file); break;
4682 case GTU: fputs ("leu", file); break;
4683 case GEU: fputs ("ltu", file); break;
4684 case LTU: fputs ("geu", file); break;
4685 case LEU: fputs ("gtu", file); break;
4687 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
4690 else if (letter == 'F')
4693 case EQ: fputs ("c1f", file); break;
4694 case NE: fputs ("c1t", file); break;
4696 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
4699 else if (letter == 'W')
4702 case EQ: fputs ("c1t", file); break;
4703 case NE: fputs ("c1f", file); break;
4705 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
4708 else if (letter == 'h')
4710 if (GET_CODE (op) == HIGH)
4713 print_operand_reloc (file, op, mips_hi_relocs);
4716 else if (letter == 'R')
4717 print_operand_reloc (file, op, mips_lo_relocs);
4719 else if (letter == 'Z')
4721 register int regnum;
4726 regnum = REGNO (op);
4727 if (! ST_REG_P (regnum))
4730 if (regnum != ST_REG_FIRST)
4731 fprintf (file, "%s,", reg_names[regnum]);
4734 else if (code == REG || code == SUBREG)
4736 register int regnum;
4739 regnum = REGNO (op);
4741 regnum = true_regnum (op);
4743 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
4744 || (letter == 'L' && WORDS_BIG_ENDIAN)
4748 fprintf (file, "%s", reg_names[regnum]);
4751 else if (code == MEM)
4754 output_address (plus_constant (XEXP (op, 0), 4));
4756 output_address (XEXP (op, 0));
4759 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
4760 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
4762 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
4763 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
4765 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
4766 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
4768 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
4769 fputs (reg_names[GP_REG_FIRST], file);
4771 else if (letter == 'd' || letter == 'x' || letter == 'X')
4772 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
4774 else if (letter == 'T' || letter == 't')
4776 int truth = (code == NE) == (letter == 'T');
4777 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
4780 else if (CONST_GP_P (op))
4781 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
4784 output_addr_const (file, op);
4788 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
4789 RELOCS is the array of relocations to use. */
4792 print_operand_reloc (FILE *file, rtx op, const char **relocs)
4794 enum mips_symbol_type symbol_type;
4797 HOST_WIDE_INT offset;
4799 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
4800 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
4802 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
4803 mips_split_const (op, &base, &offset);
4804 if (UNSPEC_ADDRESS_P (base))
4805 op = plus_constant (UNSPEC_ADDRESS (base), offset);
4807 fputs (relocs[symbol_type], file);
4808 output_addr_const (file, op);
4809 for (p = relocs[symbol_type]; *p != 0; p++)
4814 /* Output address operand X to FILE. */
4817 print_operand_address (FILE *file, rtx x)
4819 struct mips_address_info addr;
4821 if (mips_classify_address (&addr, x, word_mode, true))
4825 print_operand (file, addr.offset, 0);
4826 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
4829 case ADDRESS_LO_SUM:
4830 print_operand (file, addr.offset, 'R');
4831 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
4834 case ADDRESS_CONST_INT:
4835 output_addr_const (file, x);
4836 fprintf (file, "(%s)", reg_names[0]);
4839 case ADDRESS_SYMBOLIC:
4840 output_addr_const (file, x);
4846 /* When using assembler macros, keep track of all of small-data externs
4847 so that mips_file_end can emit the appropriate declarations for them.
4849 In most cases it would be safe (though pointless) to emit .externs
4850 for other symbols too. One exception is when an object is within
4851 the -G limit but declared by the user to be in a section other
4852 than .sbss or .sdata. */
4855 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
4857 register struct extern_list *p;
4859 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
4861 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
4862 p->next = extern_head;
4864 p->size = int_size_in_bytes (TREE_TYPE (decl));
4868 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
4870 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
4871 p->next = extern_head;
4882 irix_output_external_libcall (rtx fun)
4884 register struct extern_list *p;
4886 if (mips_abi == ABI_32)
4888 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
4889 p->next = extern_head;
4890 p->name = XSTR (fun, 0);
4897 /* Emit a new filename to a stream. If we are smuggling stabs, try to
4898 put out a MIPS ECOFF file and a stab. */
4901 mips_output_filename (FILE *stream, const char *name)
4903 char ltext_label_name[100];
4905 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
4907 if (write_symbols == DWARF2_DEBUG)
4909 else if (mips_output_filename_first_time)
4911 mips_output_filename_first_time = 0;
4912 num_source_filenames += 1;
4913 current_function_file = name;
4914 ASM_OUTPUT_FILENAME (stream, num_source_filenames, name);
4917 else if (write_symbols == DBX_DEBUG)
4919 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
4920 fputs ("\t.stabs\t", stream);
4921 output_quoted_string (stream, name);
4922 fprintf (stream, ",%d,0,0,%s\n", N_SOL, <ext_label_name[1]);
4925 else if (name != current_function_file
4926 && strcmp (name, current_function_file) != 0)
4928 num_source_filenames += 1;
4929 current_function_file = name;
4930 ASM_OUTPUT_FILENAME (stream, num_source_filenames, name);
4934 /* Emit a linenumber. For encapsulated stabs, we need to put out a stab
4935 as well as a .loc, since it is possible that MIPS ECOFF might not be
4936 able to represent the location for inlines that come from a different
4940 mips_output_lineno (FILE *stream, int line)
4942 if (write_symbols == DBX_DEBUG)
4945 fprintf (stream, "%sLM%d:\n\t.stabn\t%d,0,%d,%sLM%d\n",
4946 LOCAL_LABEL_PREFIX, sym_lineno, N_SLINE, line,
4947 LOCAL_LABEL_PREFIX, sym_lineno);
4951 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
4955 /* Output an ASCII string, in a space-saving way. PREFIX is the string
4956 that should be written before the opening quote, such as "\t.ascii\t"
4957 for real string data or "\t# " for a comment. */
4960 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
4965 register const unsigned char *string =
4966 (const unsigned char *)string_param;
4968 fprintf (stream, "%s\"", prefix);
4969 for (i = 0; i < len; i++)
4971 register int c = string[i];
4977 putc ('\\', stream);
4982 case TARGET_NEWLINE:
4983 fputs ("\\n", stream);
4985 && (((c = string[i+1]) >= '\040' && c <= '~')
4986 || c == TARGET_TAB))
4987 cur_pos = 32767; /* break right here */
4993 fputs ("\\t", stream);
4998 fputs ("\\f", stream);
5003 fputs ("\\b", stream);
5008 fputs ("\\r", stream);
5013 if (c >= ' ' && c < 0177)
5020 fprintf (stream, "\\%03o", c);
5025 if (cur_pos > 72 && i+1 < len)
5028 fprintf (stream, "\"\n%s\"", prefix);
5031 fprintf (stream, "\"\n");
5034 /* Implement TARGET_ASM_FILE_START. */
5037 mips_file_start (void)
5039 default_file_start ();
5043 /* Generate a special section to describe the ABI switches used to
5044 produce the resultant binary. This used to be done by the assembler
5045 setting bits in the ELF header's flags field, but we have run out of
5046 bits. GDB needs this information in order to be able to correctly
5047 debug these binaries. See the function mips_gdbarch_init() in
5048 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5049 causes unnecessary IRIX 6 ld warnings. */
5050 const char * abi_string = NULL;
5054 case ABI_32: abi_string = "abi32"; break;
5055 case ABI_N32: abi_string = "abiN32"; break;
5056 case ABI_64: abi_string = "abi64"; break;
5057 case ABI_O64: abi_string = "abiO64"; break;
5058 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5062 /* Note - we use fprintf directly rather than called named_section()
5063 because in this way we can avoid creating an allocated section. We
5064 do not want this section to take up any space in the running
5066 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5068 /* There is no ELF header flag to distinguish long32 forms of the
5069 EABI from long64 forms. Emit a special section to help tools
5071 if (mips_abi == ABI_EABI)
5072 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5073 TARGET_LONG64 ? 64 : 32);
5075 /* Restore the default section. */
5076 fprintf (asm_out_file, "\t.previous\n");
5079 /* Generate the pseudo ops that System V.4 wants. */
5080 if (TARGET_ABICALLS)
5081 /* ??? but do not want this (or want pic0) if -non-shared? */
5082 fprintf (asm_out_file, "\t.abicalls\n");
5085 fprintf (asm_out_file, "\t.set\tmips16\n");
5087 if (flag_verbose_asm)
5088 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5090 mips_section_threshold, mips_arch_info->name, mips_isa);
5093 #ifdef BSS_SECTION_ASM_OP
5094 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5095 in the use of sbss. */
5098 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5099 unsigned HOST_WIDE_INT size, int align)
5101 extern tree last_assemble_variable_decl;
5103 if (mips_in_small_data_p (decl))
5104 named_section (0, ".sbss", 0);
5107 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5108 last_assemble_variable_decl = decl;
5109 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5110 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5114 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5115 .externs for any small-data variables that turned out to be external. */
5118 mips_file_end (void)
5121 struct extern_list *p;
5125 fputs ("\n", asm_out_file);
5127 for (p = extern_head; p != 0; p = p->next)
5129 name_tree = get_identifier (p->name);
5131 /* Positively ensure only one .extern for any given symbol. */
5132 if (!TREE_ASM_WRITTEN (name_tree)
5133 && TREE_SYMBOL_REFERENCED (name_tree))
5135 TREE_ASM_WRITTEN (name_tree) = 1;
5136 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5137 `.global name .text' directive for every used but
5138 undefined function. If we don't, the linker may perform
5139 an optimization (skipping over the insns that set $gp)
5140 when it is unsafe. */
5141 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5143 fputs ("\t.globl ", asm_out_file);
5144 assemble_name (asm_out_file, p->name);
5145 fputs (" .text\n", asm_out_file);
5149 fputs ("\t.extern\t", asm_out_file);
5150 assemble_name (asm_out_file, p->name);
5151 fprintf (asm_out_file, ", %d\n", p->size);
5158 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5159 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5162 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5163 unsigned HOST_WIDE_INT size,
5166 /* If the target wants uninitialized const declarations in
5167 .rdata then don't put them in .comm. */
5168 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5169 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5170 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5172 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5173 targetm.asm_out.globalize_label (stream, name);
5175 readonly_data_section ();
5176 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5177 mips_declare_object (stream, name, "",
5178 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5182 mips_declare_common_object (stream, name, "\n\t.comm\t",
5186 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5187 NAME is the name of the object and ALIGN is the required alignment
5188 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5189 alignment argument. */
5192 mips_declare_common_object (FILE *stream, const char *name,
5193 const char *init_string,
5194 unsigned HOST_WIDE_INT size,
5195 unsigned int align, bool takes_alignment_p)
5197 if (!takes_alignment_p)
5199 size += (align / BITS_PER_UNIT) - 1;
5200 size -= size % (align / BITS_PER_UNIT);
5201 mips_declare_object (stream, name, init_string,
5202 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5205 mips_declare_object (stream, name, init_string,
5206 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5207 size, align / BITS_PER_UNIT);
5210 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5211 macros, mark the symbol as written so that mips_file_end won't emit an
5212 .extern for it. STREAM is the output file, NAME is the name of the
5213 symbol, INIT_STRING is the string that should be written before the
5214 symbol and FINAL_STRING is the string that should be written after it.
5215 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5218 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5219 const char *final_string, ...)
5223 fputs (init_string, stream);
5224 assemble_name (stream, name);
5225 va_start (ap, final_string);
5226 vfprintf (stream, final_string, ap);
5229 if (!TARGET_EXPLICIT_RELOCS)
5231 tree name_tree = get_identifier (name);
5232 TREE_ASM_WRITTEN (name_tree) = 1;
5236 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5237 extern int size_directive_output;
5239 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5240 definitions except that it uses mips_declare_object() to emit the label. */
5243 mips_declare_object_name (FILE *stream, const char *name,
5244 tree decl ATTRIBUTE_UNUSED)
5246 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5247 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5250 size_directive_output = 0;
5251 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5255 size_directive_output = 1;
5256 size = int_size_in_bytes (TREE_TYPE (decl));
5257 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5260 mips_declare_object (stream, name, "", ":\n", 0);
5263 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5266 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5270 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5271 if (!flag_inhibit_size_directive
5272 && DECL_SIZE (decl) != 0
5273 && !at_end && top_level
5274 && DECL_INITIAL (decl) == error_mark_node
5275 && !size_directive_output)
5279 size_directive_output = 1;
5280 size = int_size_in_bytes (TREE_TYPE (decl));
5281 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5286 /* Return true if X is a small data address that can be rewritten
5290 mips_rewrite_small_data_p (rtx x)
5292 enum mips_symbol_type symbol_type;
5294 return (TARGET_EXPLICIT_RELOCS
5295 && mips_symbolic_constant_p (x, &symbol_type)
5296 && symbol_type == SYMBOL_SMALL_DATA);
5300 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5303 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5305 if (GET_CODE (*loc) == LO_SUM)
5308 return mips_rewrite_small_data_p (*loc);
5311 /* Return true if OP refers to small data symbols directly, not through
5315 mips_small_data_pattern_p (rtx op)
5317 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5320 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5323 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5325 if (mips_rewrite_small_data_p (*loc))
5326 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5328 if (GET_CODE (*loc) == LO_SUM)
5334 /* If possible, rewrite OP so that it refers to small data using
5335 explicit relocations. */
5338 mips_rewrite_small_data (rtx op)
5340 op = copy_insn (op);
5341 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5345 /* Return true if the current function has an insn that implicitly
5349 mips_function_has_gp_insn (void)
5351 /* Don't bother rechecking if we found one last time. */
5352 if (!cfun->machine->has_gp_insn_p)
5356 push_topmost_sequence ();
5357 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5359 && GET_CODE (PATTERN (insn)) != USE
5360 && GET_CODE (PATTERN (insn)) != CLOBBER
5361 && (get_attr_got (insn) != GOT_UNSET
5362 || small_data_pattern (PATTERN (insn), VOIDmode)))
5364 pop_topmost_sequence ();
5366 cfun->machine->has_gp_insn_p = (insn != 0);
5368 return cfun->machine->has_gp_insn_p;
5372 /* Return the register that should be used as the global pointer
5373 within this function. Return 0 if the function doesn't need
5374 a global pointer. */
5377 mips_global_pointer (void)
5381 /* $gp is always available in non-abicalls code. */
5382 if (!TARGET_ABICALLS)
5383 return GLOBAL_POINTER_REGNUM;
5385 /* We must always provide $gp when it is used implicitly. */
5386 if (!TARGET_EXPLICIT_RELOCS)
5387 return GLOBAL_POINTER_REGNUM;
5389 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5391 if (current_function_profile)
5392 return GLOBAL_POINTER_REGNUM;
5394 /* If the function has a nonlocal goto, $gp must hold the correct
5395 global pointer for the target function. */
5396 if (current_function_has_nonlocal_goto)
5397 return GLOBAL_POINTER_REGNUM;
5399 /* If the gp is never referenced, there's no need to initialize it.
5400 Note that reload can sometimes introduce constant pool references
5401 into a function that otherwise didn't need them. For example,
5402 suppose we have an instruction like:
5404 (set (reg:DF R1) (float:DF (reg:SI R2)))
5406 If R2 turns out to be constant such as 1, the instruction may have a
5407 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5408 using this constant if R2 doesn't get allocated to a register.
5410 In cases like these, reload will have added the constant to the pool
5411 but no instruction will yet refer to it. */
5412 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5413 && !current_function_uses_const_pool
5414 && !mips_function_has_gp_insn ())
5417 /* We need a global pointer, but perhaps we can use a call-clobbered
5418 register instead of $gp. */
5419 if (TARGET_NEWABI && current_function_is_leaf)
5420 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5421 if (!regs_ever_live[regno]
5422 && call_used_regs[regno]
5423 && !fixed_regs[regno]
5424 && regno != PIC_FUNCTION_ADDR_REGNUM)
5427 return GLOBAL_POINTER_REGNUM;
5431 /* Return true if the current function must save REGNO. */
5434 mips_save_reg_p (unsigned int regno)
5436 /* We only need to save $gp for NewABI PIC. */
5437 if (regno == GLOBAL_POINTER_REGNUM)
5438 return (TARGET_ABICALLS && TARGET_NEWABI
5439 && cfun->machine->global_pointer == regno);
5441 /* Check call-saved registers. */
5442 if (regs_ever_live[regno] && !call_used_regs[regno])
5445 /* We need to save the old frame pointer before setting up a new one. */
5446 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5449 /* We need to save the incoming return address if it is ever clobbered
5450 within the function. */
5451 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5458 return_type = DECL_RESULT (current_function_decl);
5460 /* $18 is a special case in mips16 code. It may be used to call
5461 a function which returns a floating point value, but it is
5462 marked in call_used_regs. */
5463 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5466 /* $31 is also a special case. It will be used to copy a return
5467 value into the floating point registers if the return value is
5469 if (regno == GP_REG_FIRST + 31
5470 && mips16_hard_float
5471 && !aggregate_value_p (return_type, current_function_decl)
5472 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5473 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5481 /* Return the bytes needed to compute the frame pointer from the current
5482 stack pointer. SIZE is the size (in bytes) of the local variables.
5484 Mips stack frames look like:
5486 Before call After call
5487 +-----------------------+ +-----------------------+
5490 | caller's temps. | | caller's temps. |
5492 +-----------------------+ +-----------------------+
5494 | arguments on stack. | | arguments on stack. |
5496 +-----------------------+ +-----------------------+
5497 | 4 words to save | | 4 words to save |
5498 | arguments passed | | arguments passed |
5499 | in registers, even | | in registers, even |
5500 SP->| if not passed. | VFP->| if not passed. |
5501 +-----------------------+ +-----------------------+
5503 | fp register save |
5505 +-----------------------+
5507 | gp register save |
5509 +-----------------------+
5513 +-----------------------+
5515 | alloca allocations |
5517 +-----------------------+
5519 | GP save for V.4 abi |
5521 +-----------------------+
5523 | arguments on stack |
5525 +-----------------------+
5527 | arguments passed |
5528 | in registers, even |
5529 low SP->| if not passed. |
5530 memory +-----------------------+
5535 compute_frame_size (HOST_WIDE_INT size)
5538 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5539 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5540 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5541 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5542 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5543 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5544 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5545 unsigned int mask; /* mask of saved gp registers */
5546 unsigned int fmask; /* mask of saved fp registers */
5548 cfun->machine->global_pointer = mips_global_pointer ();
5554 var_size = MIPS_STACK_ALIGN (size);
5555 args_size = current_function_outgoing_args_size;
5556 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5558 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5559 functions. If the function has local variables, we're committed
5560 to allocating it anyway. Otherwise reclaim it here. */
5561 if (var_size == 0 && current_function_is_leaf)
5562 cprestore_size = args_size = 0;
5564 /* The MIPS 3.0 linker does not like functions that dynamically
5565 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5566 looks like we are trying to create a second frame pointer to the
5567 function, so allocate some stack space to make it happy. */
5569 if (args_size == 0 && current_function_calls_alloca)
5570 args_size = 4 * UNITS_PER_WORD;
5572 total_size = var_size + args_size + cprestore_size;
5574 /* Calculate space needed for gp registers. */
5575 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5576 if (mips_save_reg_p (regno))
5578 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5579 mask |= 1 << (regno - GP_REG_FIRST);
5582 /* We need to restore these for the handler. */
5583 if (current_function_calls_eh_return)
5588 regno = EH_RETURN_DATA_REGNO (i);
5589 if (regno == INVALID_REGNUM)
5591 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5592 mask |= 1 << (regno - GP_REG_FIRST);
5596 /* This loop must iterate over the same space as its companion in
5597 save_restore_insns. */
5598 for (regno = (FP_REG_LAST - FP_INC + 1);
5599 regno >= FP_REG_FIRST;
5602 if (mips_save_reg_p (regno))
5604 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5605 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5609 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5610 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5612 /* Add in space reserved on the stack by the callee for storing arguments
5613 passed in registers. */
5615 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5617 /* Save other computed information. */
5618 cfun->machine->frame.total_size = total_size;
5619 cfun->machine->frame.var_size = var_size;
5620 cfun->machine->frame.args_size = args_size;
5621 cfun->machine->frame.cprestore_size = cprestore_size;
5622 cfun->machine->frame.gp_reg_size = gp_reg_size;
5623 cfun->machine->frame.fp_reg_size = fp_reg_size;
5624 cfun->machine->frame.mask = mask;
5625 cfun->machine->frame.fmask = fmask;
5626 cfun->machine->frame.initialized = reload_completed;
5627 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5628 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5632 HOST_WIDE_INT offset;
5634 offset = (args_size + cprestore_size + var_size
5635 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
5636 cfun->machine->frame.gp_sp_offset = offset;
5637 cfun->machine->frame.gp_save_offset = offset - total_size;
5641 cfun->machine->frame.gp_sp_offset = 0;
5642 cfun->machine->frame.gp_save_offset = 0;
5647 HOST_WIDE_INT offset;
5649 offset = (args_size + cprestore_size + var_size
5650 + gp_reg_rounded + fp_reg_size
5651 - FP_INC * UNITS_PER_FPREG);
5652 cfun->machine->frame.fp_sp_offset = offset;
5653 cfun->machine->frame.fp_save_offset = offset - total_size;
5657 cfun->machine->frame.fp_sp_offset = 0;
5658 cfun->machine->frame.fp_save_offset = 0;
5661 /* Ok, we're done. */
5665 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
5666 pointer or argument pointer. TO is either the stack pointer or
5667 hard frame pointer. */
5670 mips_initial_elimination_offset (int from, int to)
5672 HOST_WIDE_INT offset;
5674 compute_frame_size (get_frame_size ());
5676 /* Set OFFSET to the offset from the stack pointer. */
5679 case FRAME_POINTER_REGNUM:
5683 case ARG_POINTER_REGNUM:
5684 offset = cfun->machine->frame.total_size;
5686 offset -= current_function_pretend_args_size;
5693 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
5694 offset -= cfun->machine->frame.args_size;
5699 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
5700 back to a previous frame. */
5702 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5707 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
5710 /* Use FN to save or restore register REGNO. MODE is the register's
5711 mode and OFFSET is the offset of its save slot from the current
5715 mips_save_restore_reg (enum machine_mode mode, int regno,
5716 HOST_WIDE_INT offset, mips_save_restore_fn fn)
5720 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
5722 fn (gen_rtx_REG (mode, regno), mem);
5726 /* Call FN for each register that is saved by the current function.
5727 SP_OFFSET is the offset of the current stack pointer from the start
5731 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
5733 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
5735 enum machine_mode fpr_mode;
5736 HOST_WIDE_INT offset;
5739 /* Save registers starting from high to low. The debuggers prefer at least
5740 the return register be stored at func+4, and also it allows us not to
5741 need a nop in the epilog if at least one register is reloaded in
5742 addition to return address. */
5743 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
5744 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
5745 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
5747 mips_save_restore_reg (gpr_mode, regno, offset, fn);
5748 offset -= GET_MODE_SIZE (gpr_mode);
5751 /* This loop must iterate over the same space as its companion in
5752 compute_frame_size. */
5753 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
5754 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
5755 for (regno = (FP_REG_LAST - FP_INC + 1);
5756 regno >= FP_REG_FIRST;
5758 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
5760 mips_save_restore_reg (fpr_mode, regno, offset, fn);
5761 offset -= GET_MODE_SIZE (fpr_mode);
5766 /* If we're generating n32 or n64 abicalls, and the current function
5767 does not use $28 as its global pointer, emit a cplocal directive.
5768 Use pic_offset_table_rtx as the argument to the directive. */
5771 mips_output_cplocal (void)
5773 if (!TARGET_EXPLICIT_RELOCS
5774 && cfun->machine->global_pointer > 0
5775 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
5776 output_asm_insn (".cplocal %+", 0);
5779 /* If we're generating n32 or n64 abicalls, emit instructions
5780 to set up the global pointer. */
5783 mips_emit_loadgp (void)
5785 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
5787 rtx addr, offset, incoming_address;
5789 addr = XEXP (DECL_RTL (current_function_decl), 0);
5790 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
5791 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
5792 emit_insn (gen_loadgp (offset, incoming_address));
5793 if (!TARGET_EXPLICIT_RELOCS)
5794 emit_insn (gen_loadgp_blockage ());
5798 /* Set up the stack and frame (if desired) for the function. */
5801 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5804 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
5806 #ifdef SDB_DEBUGGING_INFO
5807 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
5808 ASM_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl), 0);
5811 /* In mips16 mode, we may need to generate a 32 bit to handle
5812 floating point arguments. The linker will arrange for any 32 bit
5813 functions to call this stub, which will then jump to the 16 bit
5815 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
5816 && current_function_args_info.fp_code != 0)
5817 build_mips16_function_stub (file);
5819 if (!FUNCTION_NAME_ALREADY_DECLARED)
5821 /* Get the function name the same way that toplev.c does before calling
5822 assemble_start_function. This is needed so that the name used here
5823 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
5824 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5826 if (!flag_inhibit_size_directive)
5828 fputs ("\t.ent\t", file);
5829 assemble_name (file, fnname);
5833 assemble_name (file, fnname);
5834 fputs (":\n", file);
5837 /* Stop mips_file_end from treating this function as external. */
5838 if (TARGET_IRIX && mips_abi == ABI_32)
5839 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
5841 if (!flag_inhibit_size_directive)
5843 /* .frame FRAMEREG, FRAMESIZE, RETREG */
5845 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
5846 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
5847 ", args= " HOST_WIDE_INT_PRINT_DEC
5848 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
5849 (reg_names[(frame_pointer_needed)
5850 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
5851 ((frame_pointer_needed && TARGET_MIPS16)
5852 ? tsize - cfun->machine->frame.args_size
5854 reg_names[GP_REG_FIRST + 31],
5855 cfun->machine->frame.var_size,
5856 cfun->machine->frame.num_gp,
5857 cfun->machine->frame.num_fp,
5858 cfun->machine->frame.args_size,
5859 cfun->machine->frame.cprestore_size);
5861 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
5862 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
5863 cfun->machine->frame.mask,
5864 cfun->machine->frame.gp_save_offset);
5865 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
5866 cfun->machine->frame.fmask,
5867 cfun->machine->frame.fp_save_offset);
5870 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
5871 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
5874 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
5876 /* Handle the initialization of $gp for SVR4 PIC. */
5877 if (!cfun->machine->all_noreorder_p)
5878 output_asm_insn ("%(.cpload\t%^%)", 0);
5880 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
5882 else if (cfun->machine->all_noreorder_p)
5883 output_asm_insn ("%(%<", 0);
5885 /* Tell the assembler which register we're using as the global
5886 pointer. This is needed for thunks, since they can use either
5887 explicit relocs or assembler macros. */
5888 mips_output_cplocal ();
5891 /* Make the last instruction frame related and note that it performs
5892 the operation described by FRAME_PATTERN. */
5895 mips_set_frame_expr (rtx frame_pattern)
5899 insn = get_last_insn ();
5900 RTX_FRAME_RELATED_P (insn) = 1;
5901 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5907 /* Return a frame-related rtx that stores REG at MEM.
5908 REG must be a single register. */
5911 mips_frame_set (rtx mem, rtx reg)
5913 rtx set = gen_rtx_SET (VOIDmode, mem, reg);
5914 RTX_FRAME_RELATED_P (set) = 1;
5919 /* Save register REG to MEM. Make the instruction frame-related. */
5922 mips_save_reg (rtx reg, rtx mem)
5924 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
5928 if (mips_split_64bit_move_p (mem, reg))
5929 mips_split_64bit_move (mem, reg);
5931 emit_move_insn (mem, reg);
5933 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
5934 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
5935 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
5940 && REGNO (reg) != GP_REG_FIRST + 31
5941 && !M16_REG_P (REGNO (reg)))
5943 /* Save a non-mips16 register by moving it through a temporary.
5944 We don't need to do this for $31 since there's a special
5945 instruction for it. */
5946 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
5947 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
5950 emit_move_insn (mem, reg);
5952 mips_set_frame_expr (mips_frame_set (mem, reg));
5957 /* Expand the prologue into a bunch of separate insns. */
5960 mips_expand_prologue (void)
5964 if (cfun->machine->global_pointer > 0)
5965 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
5967 size = compute_frame_size (get_frame_size ());
5969 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
5970 bytes beforehand; this is enough to cover the register save area
5971 without going out of range. */
5972 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
5974 HOST_WIDE_INT step1;
5976 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
5977 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
5979 GEN_INT (-step1)))) = 1;
5981 mips_for_each_saved_reg (size, mips_save_reg);
5984 /* Allocate the rest of the frame. */
5987 if (SMALL_OPERAND (-size))
5988 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
5990 GEN_INT (-size)))) = 1;
5993 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
5996 /* There are no instructions to add or subtract registers
5997 from the stack pointer, so use the frame pointer as a
5998 temporary. We should always be using a frame pointer
5999 in this case anyway. */
6000 if (!frame_pointer_needed)
6003 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6004 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6005 hard_frame_pointer_rtx,
6006 MIPS_PROLOGUE_TEMP (Pmode)));
6007 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6010 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6012 MIPS_PROLOGUE_TEMP (Pmode)));
6014 /* Describe the combined effect of the previous instructions. */
6016 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6017 plus_constant (stack_pointer_rtx, -size)));
6021 /* Set up the frame pointer, if we're using one. In mips16 code,
6022 we point the frame pointer ahead of the outgoing argument area.
6023 This should allow more variables & incoming arguments to be
6024 accessed with unextended instructions. */
6025 if (frame_pointer_needed)
6027 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6029 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6031 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6036 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6037 stack_pointer_rtx)) = 1;
6040 /* If generating o32/o64 abicalls, save $gp on the stack. */
6041 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6042 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6044 mips_emit_loadgp ();
6046 /* If we are profiling, make sure no instructions are scheduled before
6047 the call to mcount. */
6049 if (current_function_profile)
6050 emit_insn (gen_blockage ());
6053 /* Do any necessary cleanup after a function to restore stack, frame,
6056 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6059 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6060 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6062 /* Reinstate the normal $gp. */
6063 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6064 mips_output_cplocal ();
6066 if (cfun->machine->all_noreorder_p)
6068 /* Avoid using %>%) since it adds excess whitespace. */
6069 output_asm_insn (".set\tmacro", 0);
6070 output_asm_insn (".set\treorder", 0);
6071 set_noreorder = set_nomacro = 0;
6074 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6078 /* Get the function name the same way that toplev.c does before calling
6079 assemble_start_function. This is needed so that the name used here
6080 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6081 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6082 fputs ("\t.end\t", file);
6083 assemble_name (file, fnname);
6088 /* Emit instructions to restore register REG from slot MEM. */
6091 mips_restore_reg (rtx reg, rtx mem)
6093 /* There's no mips16 instruction to load $31 directly. Load into
6094 $7 instead and adjust the return insn appropriately. */
6095 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6096 reg = gen_rtx_REG (GET_MODE (reg), 7);
6098 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6100 /* Can't restore directly; move through a temporary. */
6101 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6102 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6105 emit_move_insn (reg, mem);
6109 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6110 if this epilogue precedes a sibling call, false if it is for a normal
6111 "epilogue" pattern. */
6114 mips_expand_epilogue (int sibcall_p)
6116 HOST_WIDE_INT step1, step2;
6119 if (!sibcall_p && mips_can_use_return_insn ())
6121 emit_jump_insn (gen_return ());
6125 /* Split the frame into two. STEP1 is the amount of stack we should
6126 deallocate before restoring the registers. STEP2 is the amount we
6127 should deallocate afterwards.
6129 Start off by assuming that no registers need to be restored. */
6130 step1 = cfun->machine->frame.total_size;
6133 /* Work out which register holds the frame address. Account for the
6134 frame pointer offset used by mips16 code. */
6135 if (!frame_pointer_needed)
6136 base = stack_pointer_rtx;
6139 base = hard_frame_pointer_rtx;
6141 step1 -= cfun->machine->frame.args_size;
6144 /* If we need to restore registers, deallocate as much stack as
6145 possible in the second step without going out of range. */
6146 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6148 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6152 /* Set TARGET to BASE + STEP1. */
6158 /* Get an rtx for STEP1 that we can add to BASE. */
6159 adjust = GEN_INT (step1);
6160 if (!SMALL_OPERAND (step1))
6162 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6163 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6166 /* Normal mode code can copy the result straight into $sp. */
6168 target = stack_pointer_rtx;
6170 emit_insn (gen_add3_insn (target, base, adjust));
6173 /* Copy TARGET into the stack pointer. */
6174 if (target != stack_pointer_rtx)
6175 emit_move_insn (stack_pointer_rtx, target);
6177 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6178 implicitly used by all SYMBOL_REFs. We must emit a blockage
6179 insn before restoring it. */
6180 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6181 emit_insn (gen_blockage ());
6183 /* Restore the registers. */
6184 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6187 /* Deallocate the final bit of the frame. */
6189 emit_insn (gen_add3_insn (stack_pointer_rtx,
6193 /* Add in the __builtin_eh_return stack adjustment. We need to
6194 use a temporary in mips16 code. */
6195 if (current_function_calls_eh_return)
6199 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6200 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6201 MIPS_EPILOGUE_TEMP (Pmode),
6202 EH_RETURN_STACKADJ_RTX));
6203 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6206 emit_insn (gen_add3_insn (stack_pointer_rtx,
6208 EH_RETURN_STACKADJ_RTX));
6213 /* The mips16 loads the return address into $7, not $31. */
6214 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6215 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6216 GP_REG_FIRST + 7)));
6218 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6219 GP_REG_FIRST + 31)));
6223 /* Return nonzero if this function is known to have a null epilogue.
6224 This allows the optimizer to omit jumps to jumps if no stack
6228 mips_can_use_return_insn (void)
6232 if (! reload_completed)
6235 if (regs_ever_live[31] || current_function_profile)
6238 return_type = DECL_RESULT (current_function_decl);
6240 /* In mips16 mode, a function which returns a floating point value
6241 needs to arrange to copy the return value into the floating point
6244 && mips16_hard_float
6245 && ! aggregate_value_p (return_type, current_function_decl)
6246 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6247 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6250 if (cfun->machine->frame.initialized)
6251 return cfun->machine->frame.total_size == 0;
6253 return compute_frame_size (get_frame_size ()) == 0;
6256 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6257 in order to avoid duplicating too much logic from elsewhere. */
6260 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6261 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6264 rtx this, temp1, temp2, insn, fnaddr;
6266 /* Pretend to be a post-reload pass while generating rtl. */
6268 reload_completed = 1;
6269 reset_block_changes ();
6271 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6272 for TARGET_NEWABI since the latter is a call-saved register. */
6273 if (TARGET_ABICALLS)
6274 cfun->machine->global_pointer
6275 = REGNO (pic_offset_table_rtx)
6276 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6278 /* Set up the global pointer for n32 or n64 abicalls. */
6279 mips_emit_loadgp ();
6281 /* We need two temporary registers in some cases. */
6282 temp1 = gen_rtx_REG (Pmode, 2);
6283 temp2 = gen_rtx_REG (Pmode, 3);
6285 /* Find out which register contains the "this" pointer. */
6286 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6287 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6289 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6291 /* Add DELTA to THIS. */
6294 rtx offset = GEN_INT (delta);
6295 if (!SMALL_OPERAND (delta))
6297 emit_move_insn (temp1, offset);
6300 emit_insn (gen_add3_insn (this, this, offset));
6303 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6304 if (vcall_offset != 0)
6308 /* Set TEMP1 to *THIS. */
6309 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6311 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6312 addr = mips_add_offset (temp2, temp1, vcall_offset);
6314 /* Load the offset and add it to THIS. */
6315 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6316 emit_insn (gen_add3_insn (this, this, temp1));
6319 /* Jump to the target function. Use a sibcall if direct jumps are
6320 allowed, otherwise load the address into a register first. */
6321 fnaddr = XEXP (DECL_RTL (function), 0);
6322 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6324 /* This is messy. gas treats "la $25,foo" as part of a call
6325 sequence and may allow a global "foo" to be lazily bound.
6326 The general move patterns therefore reject this combination.
6328 In this context, lazy binding would actually be OK for o32 and o64,
6329 but it's still wrong for n32 and n64; see mips_load_call_address.
6330 We must therefore load the address via a temporary register if
6331 mips_dangerous_for_la25_p.
6333 If we jump to the temporary register rather than $25, the assembler
6334 can use the move insn to fill the jump's delay slot. */
6335 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6336 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6337 mips_load_call_address (temp1, fnaddr, true);
6339 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6340 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6341 emit_jump_insn (gen_indirect_jump (temp1));
6345 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6346 SIBLING_CALL_P (insn) = 1;
6349 /* Run just enough of rest_of_compilation. This sequence was
6350 "borrowed" from alpha.c. */
6351 insn = get_insns ();
6352 insn_locators_initialize ();
6353 split_all_insns_noflow ();
6355 mips16_lay_out_constants ();
6356 shorten_branches (insn);
6357 final_start_function (insn, file, 1);
6358 final (insn, file, 1, 0);
6359 final_end_function ();
6361 /* Clean up the vars set above. Note that final_end_function resets
6362 the global pointer for us. */
6363 reload_completed = 0;
6367 /* Returns nonzero if X contains a SYMBOL_REF. */
6370 symbolic_expression_p (rtx x)
6372 if (GET_CODE (x) == SYMBOL_REF)
6375 if (GET_CODE (x) == CONST)
6376 return symbolic_expression_p (XEXP (x, 0));
6379 return symbolic_expression_p (XEXP (x, 0));
6381 if (ARITHMETIC_P (x))
6382 return (symbolic_expression_p (XEXP (x, 0))
6383 || symbolic_expression_p (XEXP (x, 1)));
6388 /* Choose the section to use for the constant rtx expression X that has
6392 mips_select_rtx_section (enum machine_mode mode, rtx x,
6393 unsigned HOST_WIDE_INT align)
6397 /* In mips16 mode, the constant table always goes in the same section
6398 as the function, so that constants can be loaded using PC relative
6400 function_section (current_function_decl);
6402 else if (TARGET_EMBEDDED_DATA)
6404 /* For embedded applications, always put constants in read-only data,
6405 in order to reduce RAM usage. */
6406 mergeable_constant_section (mode, align, 0);
6410 /* For hosted applications, always put constants in small data if
6411 possible, as this gives the best performance. */
6412 /* ??? Consider using mergeable small data sections. */
6414 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6415 && mips_section_threshold > 0)
6416 named_section (0, ".sdata", 0);
6417 else if (flag_pic && symbolic_expression_p (x))
6418 named_section (0, ".data.rel.ro", 3);
6420 mergeable_constant_section (mode, align, 0);
6424 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6425 access DECL using %gp_rel(...)($gp). */
6428 mips_in_small_data_p (tree decl)
6432 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6435 /* We don't yet generate small-data references for -mabicalls. See related
6436 -G handling in override_options. */
6437 if (TARGET_ABICALLS)
6440 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6444 /* Reject anything that isn't in a known small-data section. */
6445 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6446 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6449 /* If a symbol is defined externally, the assembler will use the
6450 usual -G rules when deciding how to implement macros. */
6451 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6454 else if (TARGET_EMBEDDED_DATA)
6456 /* Don't put constants into the small data section: we want them
6457 to be in ROM rather than RAM. */
6458 if (TREE_CODE (decl) != VAR_DECL)
6461 if (TREE_READONLY (decl)
6462 && !TREE_SIDE_EFFECTS (decl)
6463 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6467 size = int_size_in_bytes (TREE_TYPE (decl));
6468 return (size > 0 && size <= mips_section_threshold);
6471 /* See whether VALTYPE is a record whose fields should be returned in
6472 floating-point registers. If so, return the number of fields and
6473 list them in FIELDS (which should have two elements). Return 0
6476 For n32 & n64, a structure with one or two fields is returned in
6477 floating-point registers as long as every field has a floating-point
6481 mips_fpr_return_fields (tree valtype, tree *fields)
6489 if (TREE_CODE (valtype) != RECORD_TYPE)
6493 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6495 if (TREE_CODE (field) != FIELD_DECL)
6498 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6504 fields[i++] = field;
6510 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6511 a value in the most significant part of $2/$3 if:
6513 - the target is big-endian;
6515 - the value has a structure or union type (we generalize this to
6516 cover aggregates from other languages too); and
6518 - the structure is not returned in floating-point registers. */
6521 mips_return_in_msb (tree valtype)
6525 return (TARGET_NEWABI
6526 && TARGET_BIG_ENDIAN
6527 && AGGREGATE_TYPE_P (valtype)
6528 && mips_fpr_return_fields (valtype, fields) == 0);
6532 /* Return a composite value in a pair of floating-point registers.
6533 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6534 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6537 For n32 & n64, $f0 always holds the first value and $f2 the second.
6538 Otherwise the values are packed together as closely as possible. */
6541 mips_return_fpr_pair (enum machine_mode mode,
6542 enum machine_mode mode1, HOST_WIDE_INT offset1,
6543 enum machine_mode mode2, HOST_WIDE_INT offset2)
6547 inc = (TARGET_NEWABI ? 2 : FP_INC);
6548 return gen_rtx_PARALLEL
6551 gen_rtx_EXPR_LIST (VOIDmode,
6552 gen_rtx_REG (mode1, FP_RETURN),
6554 gen_rtx_EXPR_LIST (VOIDmode,
6555 gen_rtx_REG (mode2, FP_RETURN + inc),
6556 GEN_INT (offset2))));
6561 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6562 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6563 VALTYPE is null and MODE is the mode of the return value. */
6566 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6567 enum machine_mode mode)
6574 mode = TYPE_MODE (valtype);
6575 unsignedp = TYPE_UNSIGNED (valtype);
6577 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6578 true, we must promote the mode just as PROMOTE_MODE does. */
6579 mode = promote_mode (valtype, mode, &unsignedp, 1);
6581 /* Handle structures whose fields are returned in $f0/$f2. */
6582 switch (mips_fpr_return_fields (valtype, fields))
6585 return gen_rtx_REG (mode, FP_RETURN);
6588 return mips_return_fpr_pair (mode,
6589 TYPE_MODE (TREE_TYPE (fields[0])),
6590 int_byte_position (fields[0]),
6591 TYPE_MODE (TREE_TYPE (fields[1])),
6592 int_byte_position (fields[1]));
6595 /* If a value is passed in the most significant part of a register, see
6596 whether we have to round the mode up to a whole number of words. */
6597 if (mips_return_in_msb (valtype))
6599 HOST_WIDE_INT size = int_size_in_bytes (valtype);
6600 if (size % UNITS_PER_WORD != 0)
6602 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6603 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6608 if (GET_MODE_CLASS (mode) == MODE_FLOAT
6609 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
6610 return gen_rtx_REG (mode, FP_RETURN);
6612 /* Handle long doubles for n32 & n64. */
6614 return mips_return_fpr_pair (mode,
6616 DImode, GET_MODE_SIZE (mode) / 2);
6618 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6619 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
6620 return mips_return_fpr_pair (mode,
6621 GET_MODE_INNER (mode), 0,
6622 GET_MODE_INNER (mode),
6623 GET_MODE_SIZE (mode) / 2);
6625 return gen_rtx_REG (mode, GP_RETURN);
6628 /* Return nonzero when an argument must be passed by reference. */
6631 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6632 enum machine_mode mode, tree type,
6633 bool named ATTRIBUTE_UNUSED)
6635 if (mips_abi == ABI_EABI)
6639 /* ??? How should SCmode be handled? */
6640 if (type == NULL_TREE || mode == DImode || mode == DFmode)
6643 size = int_size_in_bytes (type);
6644 return size == -1 || size > UNITS_PER_WORD;
6648 /* If we have a variable-sized parameter, we have no choice. */
6649 return targetm.calls.must_pass_in_stack (mode, type);
6653 /* Return the class of registers for which a mode change from FROM to TO
6656 In little-endian mode, the hi-lo registers are numbered backwards,
6657 so (subreg:SI (reg:DI hi) 0) gets the high word instead of the low
6660 Similarly, when using paired floating-point registers, the first
6661 register holds the low word, regardless of endianness. So in big
6662 endian mode, (subreg:SI (reg:DF $f0) 0) does not get the high word
6665 Also, loading a 32-bit value into a 64-bit floating-point register
6666 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
6667 We can't allow 64-bit float registers to change from a 32-bit
6668 mode to a 64-bit mode. */
6671 mips_cannot_change_mode_class (enum machine_mode from,
6672 enum machine_mode to, enum reg_class class)
6674 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
6676 if (TARGET_BIG_ENDIAN)
6677 return reg_classes_intersect_p (FP_REGS, class);
6679 return reg_classes_intersect_p (HI_AND_FP_REGS, class);
6680 return reg_classes_intersect_p (HI_REG, class);
6685 /* Return true if X should not be moved directly into register $25.
6686 We need this because many versions of GAS will treat "la $25,foo" as
6687 part of a call sequence and so allow a global "foo" to be lazily bound. */
6690 mips_dangerous_for_la25_p (rtx x)
6692 HOST_WIDE_INT offset;
6694 if (TARGET_EXPLICIT_RELOCS)
6697 mips_split_const (x, &x, &offset);
6698 return global_got_operand (x, VOIDmode);
6701 /* Implement PREFERRED_RELOAD_CLASS. */
6704 mips_preferred_reload_class (rtx x, enum reg_class class)
6706 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
6709 if (TARGET_HARD_FLOAT
6710 && FLOAT_MODE_P (GET_MODE (x))
6711 && reg_class_subset_p (FP_REGS, class))
6714 if (reg_class_subset_p (GR_REGS, class))
6717 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
6723 /* This function returns the register class required for a secondary
6724 register when copying between one of the registers in CLASS, and X,
6725 using MODE. If IN_P is nonzero, the copy is going from X to the
6726 register, otherwise the register is the source. A return value of
6727 NO_REGS means that no secondary register is required. */
6730 mips_secondary_reload_class (enum reg_class class,
6731 enum machine_mode mode, rtx x, int in_p)
6733 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
6737 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
6738 regno = true_regnum (x);
6740 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
6742 if (mips_dangerous_for_la25_p (x))
6745 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
6749 /* Copying from HI or LO to anywhere other than a general register
6750 requires a general register. */
6751 if (class == HI_REG || class == LO_REG || class == MD_REGS)
6753 if (TARGET_MIPS16 && in_p)
6755 /* We can't really copy to HI or LO at all in mips16 mode. */
6758 return gp_reg_p ? NO_REGS : gr_regs;
6760 if (MD_REG_P (regno))
6762 if (TARGET_MIPS16 && ! in_p)
6764 /* We can't really copy to HI or LO at all in mips16 mode. */
6767 return class == gr_regs ? NO_REGS : gr_regs;
6770 /* We can only copy a value to a condition code register from a
6771 floating point register, and even then we require a scratch
6772 floating point register. We can only copy a value out of a
6773 condition code register into a general register. */
6774 if (class == ST_REGS)
6778 return gp_reg_p ? NO_REGS : gr_regs;
6780 if (ST_REG_P (regno))
6784 return class == gr_regs ? NO_REGS : gr_regs;
6787 if (class == FP_REGS)
6789 if (GET_CODE (x) == MEM)
6791 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
6794 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
6796 /* We can use the l.s and l.d macros to load floating-point
6797 constants. ??? For l.s, we could probably get better
6798 code by returning GR_REGS here. */
6801 else if (gp_reg_p || x == CONST0_RTX (mode))
6803 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
6806 else if (FP_REG_P (regno))
6808 /* In this case we can use mov.s or mov.d. */
6813 /* Otherwise, we need to reload through an integer register. */
6818 /* In mips16 mode, going between memory and anything but M16_REGS
6819 requires an M16_REG. */
6822 if (class != M16_REGS && class != M16_NA_REGS)
6830 if (class == M16_REGS || class == M16_NA_REGS)
6839 /* Implement CLASS_MAX_NREGS.
6841 Usually all registers are word-sized. The only supported exception
6842 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
6843 registers. A word-based calculation is correct even in that case,
6844 since -msingle-float disallows multi-FPR values. */
6847 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
6848 enum machine_mode mode)
6850 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6854 mips_valid_pointer_mode (enum machine_mode mode)
6856 return (mode == SImode || (TARGET_64BIT && mode == DImode));
6860 /* If we can access small data directly (using gp-relative relocation
6861 operators) return the small data pointer, otherwise return null.
6863 For each mips16 function which refers to GP relative symbols, we
6864 use a pseudo register, initialized at the start of the function, to
6865 hold the $gp value. */
6868 mips16_gp_pseudo_reg (void)
6870 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
6875 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
6877 /* We want to initialize this to a value which gcc will believe
6880 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
6881 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
6882 gen_rtx_CONST (Pmode, unspec));
6883 insn = get_insns ();
6886 push_topmost_sequence ();
6887 /* We need to emit the initialization after the FUNCTION_BEG
6888 note, so that it will be integrated. */
6889 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
6890 if (GET_CODE (scan) == NOTE
6891 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
6893 if (scan == NULL_RTX)
6894 scan = get_insns ();
6895 insn = emit_insn_after (insn, scan);
6896 pop_topmost_sequence ();
6899 return cfun->machine->mips16_gp_pseudo_rtx;
6902 /* Write out code to move floating point arguments in or out of
6903 general registers. Output the instructions to FILE. FP_CODE is
6904 the code describing which arguments are present (see the comment at
6905 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
6906 we are copying from the floating point registers. */
6909 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
6915 /* This code only works for the original 32 bit ABI and the O64 ABI. */
6923 gparg = GP_ARG_FIRST;
6924 fparg = FP_ARG_FIRST;
6925 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6929 if ((fparg & 1) != 0)
6931 fprintf (file, "\t%s\t%s,%s\n", s,
6932 reg_names[gparg], reg_names[fparg]);
6934 else if ((f & 3) == 2)
6937 fprintf (file, "\td%s\t%s,%s\n", s,
6938 reg_names[gparg], reg_names[fparg]);
6941 if ((fparg & 1) != 0)
6943 if (TARGET_BIG_ENDIAN)
6944 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
6945 reg_names[gparg], reg_names[fparg + 1], s,
6946 reg_names[gparg + 1], reg_names[fparg]);
6948 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
6949 reg_names[gparg], reg_names[fparg], s,
6950 reg_names[gparg + 1], reg_names[fparg + 1]);
6963 /* Build a mips16 function stub. This is used for functions which
6964 take arguments in the floating point registers. It is 32 bit code
6965 that moves the floating point args into the general registers, and
6966 then jumps to the 16 bit code. */
6969 build_mips16_function_stub (FILE *file)
6972 char *secname, *stubname;
6973 tree stubid, stubdecl;
6977 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6978 secname = (char *) alloca (strlen (fnname) + 20);
6979 sprintf (secname, ".mips16.fn.%s", fnname);
6980 stubname = (char *) alloca (strlen (fnname) + 20);
6981 sprintf (stubname, "__fn_stub_%s", fnname);
6982 stubid = get_identifier (stubname);
6983 stubdecl = build_decl (FUNCTION_DECL, stubid,
6984 build_function_type (void_type_node, NULL_TREE));
6985 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6987 fprintf (file, "\t# Stub function for %s (", current_function_name ());
6989 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
6991 fprintf (file, "%s%s",
6992 need_comma ? ", " : "",
6993 (f & 3) == 1 ? "float" : "double");
6996 fprintf (file, ")\n");
6998 fprintf (file, "\t.set\tnomips16\n");
6999 function_section (stubdecl);
7000 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7002 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7003 within a .ent, and we cannot emit another .ent. */
7004 if (!FUNCTION_NAME_ALREADY_DECLARED)
7006 fputs ("\t.ent\t", file);
7007 assemble_name (file, stubname);
7011 assemble_name (file, stubname);
7012 fputs (":\n", file);
7014 /* We don't want the assembler to insert any nops here. */
7015 fprintf (file, "\t.set\tnoreorder\n");
7017 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7019 fprintf (asm_out_file, "\t.set\tnoat\n");
7020 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7021 assemble_name (file, fnname);
7022 fprintf (file, "\n");
7023 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7024 fprintf (asm_out_file, "\t.set\tat\n");
7026 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7027 with one of the mfc1 instructions, because the result is not
7028 available for one instruction, so if the very first instruction
7029 in the function refers to the register, it will see the wrong
7031 fprintf (file, "\tnop\n");
7033 fprintf (file, "\t.set\treorder\n");
7035 if (!FUNCTION_NAME_ALREADY_DECLARED)
7037 fputs ("\t.end\t", file);
7038 assemble_name (file, stubname);
7042 fprintf (file, "\t.set\tmips16\n");
7044 function_section (current_function_decl);
7047 /* We keep a list of functions for which we have already built stubs
7048 in build_mips16_call_stub. */
7052 struct mips16_stub *next;
7057 static struct mips16_stub *mips16_stubs;
7059 /* Build a call stub for a mips16 call. A stub is needed if we are
7060 passing any floating point values which should go into the floating
7061 point registers. If we are, and the call turns out to be to a 32
7062 bit function, the stub will be used to move the values into the
7063 floating point registers before calling the 32 bit function. The
7064 linker will magically adjust the function call to either the 16 bit
7065 function or the 32 bit stub, depending upon where the function call
7066 is actually defined.
7068 Similarly, we need a stub if the return value might come back in a
7069 floating point register.
7071 RETVAL is the location of the return value, or null if this is
7072 a call rather than a call_value. FN is the address of the
7073 function and ARG_SIZE is the size of the arguments. FP_CODE
7074 is the code built by function_arg. This function returns a nonzero
7075 value if it builds the call instruction itself. */
7078 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7082 char *secname, *stubname;
7083 struct mips16_stub *l;
7084 tree stubid, stubdecl;
7088 /* We don't need to do anything if we aren't in mips16 mode, or if
7089 we were invoked with the -msoft-float option. */
7090 if (! TARGET_MIPS16 || ! mips16_hard_float)
7093 /* Figure out whether the value might come back in a floating point
7095 fpret = (retval != 0
7096 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7097 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7099 /* We don't need to do anything if there were no floating point
7100 arguments and the value will not be returned in a floating point
7102 if (fp_code == 0 && ! fpret)
7105 /* We don't need to do anything if this is a call to a special
7106 mips16 support function. */
7107 if (GET_CODE (fn) == SYMBOL_REF
7108 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7111 /* This code will only work for o32 and o64 abis. The other ABI's
7112 require more sophisticated support. */
7116 /* We can only handle SFmode and DFmode floating point return
7118 if (fpret && GET_MODE (retval) != SFmode && GET_MODE (retval) != DFmode)
7121 /* If we're calling via a function pointer, then we must always call
7122 via a stub. There are magic stubs provided in libgcc.a for each
7123 of the required cases. Each of them expects the function address
7124 to arrive in register $2. */
7126 if (GET_CODE (fn) != SYMBOL_REF)
7132 /* ??? If this code is modified to support other ABI's, we need
7133 to handle PARALLEL return values here. */
7135 sprintf (buf, "__mips16_call_stub_%s%d",
7137 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7140 id = get_identifier (buf);
7141 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7143 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7145 if (retval == NULL_RTX)
7146 insn = gen_call_internal (stub_fn, arg_size);
7148 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7149 insn = emit_call_insn (insn);
7151 /* Put the register usage information on the CALL. */
7152 if (GET_CODE (insn) != CALL_INSN)
7154 CALL_INSN_FUNCTION_USAGE (insn) =
7155 gen_rtx_EXPR_LIST (VOIDmode,
7156 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7157 CALL_INSN_FUNCTION_USAGE (insn));
7159 /* If we are handling a floating point return value, we need to
7160 save $18 in the function prologue. Putting a note on the
7161 call will mean that regs_ever_live[$18] will be true if the
7162 call is not eliminated, and we can check that in the prologue
7165 CALL_INSN_FUNCTION_USAGE (insn) =
7166 gen_rtx_EXPR_LIST (VOIDmode,
7167 gen_rtx_USE (VOIDmode,
7168 gen_rtx_REG (word_mode, 18)),
7169 CALL_INSN_FUNCTION_USAGE (insn));
7171 /* Return 1 to tell the caller that we've generated the call
7176 /* We know the function we are going to call. If we have already
7177 built a stub, we don't need to do anything further. */
7179 fnname = XSTR (fn, 0);
7180 for (l = mips16_stubs; l != NULL; l = l->next)
7181 if (strcmp (l->name, fnname) == 0)
7186 /* Build a special purpose stub. When the linker sees a
7187 function call in mips16 code, it will check where the target
7188 is defined. If the target is a 32 bit call, the linker will
7189 search for the section defined here. It can tell which
7190 symbol this section is associated with by looking at the
7191 relocation information (the name is unreliable, since this
7192 might be a static function). If such a section is found, the
7193 linker will redirect the call to the start of the magic
7196 If the function does not return a floating point value, the
7197 special stub section is named
7200 If the function does return a floating point value, the stub
7202 .mips16.call.fp.FNNAME
7205 secname = (char *) alloca (strlen (fnname) + 40);
7206 sprintf (secname, ".mips16.call.%s%s",
7209 stubname = (char *) alloca (strlen (fnname) + 20);
7210 sprintf (stubname, "__call_stub_%s%s",
7213 stubid = get_identifier (stubname);
7214 stubdecl = build_decl (FUNCTION_DECL, stubid,
7215 build_function_type (void_type_node, NULL_TREE));
7216 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7218 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7220 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7224 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7226 fprintf (asm_out_file, "%s%s",
7227 need_comma ? ", " : "",
7228 (f & 3) == 1 ? "float" : "double");
7231 fprintf (asm_out_file, ")\n");
7233 fprintf (asm_out_file, "\t.set\tnomips16\n");
7234 assemble_start_function (stubdecl, stubname);
7236 if (!FUNCTION_NAME_ALREADY_DECLARED)
7238 fputs ("\t.ent\t", asm_out_file);
7239 assemble_name (asm_out_file, stubname);
7240 fputs ("\n", asm_out_file);
7242 assemble_name (asm_out_file, stubname);
7243 fputs (":\n", asm_out_file);
7246 /* We build the stub code by hand. That's the only way we can
7247 do it, since we can't generate 32 bit code during a 16 bit
7250 /* We don't want the assembler to insert any nops here. */
7251 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7253 mips16_fp_args (asm_out_file, fp_code, 0);
7257 fprintf (asm_out_file, "\t.set\tnoat\n");
7258 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7260 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7261 fprintf (asm_out_file, "\t.set\tat\n");
7262 /* Unfortunately, we can't fill the jump delay slot. We
7263 can't fill with one of the mtc1 instructions, because the
7264 result is not available for one instruction, so if the
7265 very first instruction in the function refers to the
7266 register, it will see the wrong value. */
7267 fprintf (asm_out_file, "\tnop\n");
7271 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7272 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7273 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7274 /* As above, we can't fill the delay slot. */
7275 fprintf (asm_out_file, "\tnop\n");
7276 if (GET_MODE (retval) == SFmode)
7277 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7278 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7281 if (TARGET_BIG_ENDIAN)
7283 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7284 reg_names[GP_REG_FIRST + 2],
7285 reg_names[FP_REG_FIRST + 1]);
7286 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7287 reg_names[GP_REG_FIRST + 3],
7288 reg_names[FP_REG_FIRST + 0]);
7292 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7293 reg_names[GP_REG_FIRST + 2],
7294 reg_names[FP_REG_FIRST + 0]);
7295 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7296 reg_names[GP_REG_FIRST + 3],
7297 reg_names[FP_REG_FIRST + 1]);
7300 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7301 /* As above, we can't fill the delay slot. */
7302 fprintf (asm_out_file, "\tnop\n");
7305 fprintf (asm_out_file, "\t.set\treorder\n");
7307 #ifdef ASM_DECLARE_FUNCTION_SIZE
7308 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7311 if (!FUNCTION_NAME_ALREADY_DECLARED)
7313 fputs ("\t.end\t", asm_out_file);
7314 assemble_name (asm_out_file, stubname);
7315 fputs ("\n", asm_out_file);
7318 fprintf (asm_out_file, "\t.set\tmips16\n");
7320 /* Record this stub. */
7321 l = (struct mips16_stub *) xmalloc (sizeof *l);
7322 l->name = xstrdup (fnname);
7324 l->next = mips16_stubs;
7328 /* If we expect a floating point return value, but we've built a
7329 stub which does not expect one, then we're in trouble. We can't
7330 use the existing stub, because it won't handle the floating point
7331 value. We can't build a new stub, because the linker won't know
7332 which stub to use for the various calls in this object file.
7333 Fortunately, this case is illegal, since it means that a function
7334 was declared in two different ways in a single compilation. */
7335 if (fpret && ! l->fpret)
7336 error ("cannot handle inconsistent calls to `%s'", fnname);
7338 /* If we are calling a stub which handles a floating point return
7339 value, we need to arrange to save $18 in the prologue. We do
7340 this by marking the function call as using the register. The
7341 prologue will later see that it is used, and emit code to save
7348 if (retval == NULL_RTX)
7349 insn = gen_call_internal (fn, arg_size);
7351 insn = gen_call_value_internal (retval, fn, arg_size);
7352 insn = emit_call_insn (insn);
7354 if (GET_CODE (insn) != CALL_INSN)
7357 CALL_INSN_FUNCTION_USAGE (insn) =
7358 gen_rtx_EXPR_LIST (VOIDmode,
7359 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7360 CALL_INSN_FUNCTION_USAGE (insn));
7362 /* Return 1 to tell the caller that we've generated the call
7367 /* Return 0 to let the caller generate the call insn. */
7371 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7372 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7374 struct mips16_constant {
7375 struct mips16_constant *next;
7378 enum machine_mode mode;
7381 /* Information about an incomplete mips16 constant pool. FIRST is the
7382 first constant, HIGHEST_ADDRESS is the highest address that the first
7383 byte of the pool can have, and INSN_ADDRESS is the current instruction
7386 struct mips16_constant_pool {
7387 struct mips16_constant *first;
7388 int highest_address;
7392 /* Add constant VALUE to POOL and return its label. MODE is the
7393 value's mode (used for CONST_INTs, etc.). */
7396 add_constant (struct mips16_constant_pool *pool,
7397 rtx value, enum machine_mode mode)
7399 struct mips16_constant **p, *c;
7400 bool first_of_size_p;
7402 /* See whether the constant is already in the pool. If so, return the
7403 existing label, otherwise leave P pointing to the place where the
7404 constant should be added.
7406 Keep the pool sorted in increasing order of mode size so that we can
7407 reduce the number of alignments needed. */
7408 first_of_size_p = true;
7409 for (p = &pool->first; *p != 0; p = &(*p)->next)
7411 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7413 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7415 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7416 first_of_size_p = false;
7419 /* In the worst case, the constant needed by the earliest instruction
7420 will end up at the end of the pool. The entire pool must then be
7421 accessible from that instruction.
7423 When adding the first constant, set the pool's highest address to
7424 the address of the first out-of-range byte. Adjust this address
7425 downwards each time a new constant is added. */
7426 if (pool->first == 0)
7427 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7428 is the address of the instruction with the lowest two bits clear.
7429 The base PC value for ld has the lowest three bits clear. Assume
7430 the worst case here. */
7431 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7432 pool->highest_address -= GET_MODE_SIZE (mode);
7433 if (first_of_size_p)
7434 /* Take into account the worst possible padding due to alignment. */
7435 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7437 /* Create a new entry. */
7438 c = (struct mips16_constant *) xmalloc (sizeof *c);
7441 c->label = gen_label_rtx ();
7448 /* Output constant VALUE after instruction INSN and return the last
7449 instruction emitted. MODE is the mode of the constant. */
7452 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7454 switch (GET_MODE_CLASS (mode))
7458 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7459 return emit_insn_after (gen_consttable_int (value, size), insn);
7463 return emit_insn_after (gen_consttable_float (value), insn);
7465 case MODE_VECTOR_FLOAT:
7466 case MODE_VECTOR_INT:
7469 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7470 insn = dump_constants_1 (GET_MODE_INNER (mode),
7471 CONST_VECTOR_ELT (value, i), insn);
7481 /* Dump out the constants in CONSTANTS after INSN. */
7484 dump_constants (struct mips16_constant *constants, rtx insn)
7486 struct mips16_constant *c, *next;
7490 for (c = constants; c != NULL; c = next)
7492 /* If necessary, increase the alignment of PC. */
7493 if (align < GET_MODE_SIZE (c->mode))
7495 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7496 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7498 align = GET_MODE_SIZE (c->mode);
7500 insn = emit_label_after (c->label, insn);
7501 insn = dump_constants_1 (c->mode, c->value, insn);
7507 emit_barrier_after (insn);
7510 /* Return the length of instruction INSN.
7512 ??? MIPS16 switch tables go in .text, but we don't define
7513 JUMP_TABLES_IN_TEXT_SECTION, so get_attr_length will not
7514 compute their lengths correctly. */
7517 mips16_insn_length (rtx insn)
7519 if (GET_CODE (insn) == JUMP_INSN)
7521 rtx body = PATTERN (insn);
7522 if (GET_CODE (body) == ADDR_VEC)
7523 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
7524 if (GET_CODE (body) == ADDR_DIFF_VEC)
7525 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
7527 return get_attr_length (insn);
7530 /* Rewrite *X so that constant pool references refer to the constant's
7531 label instead. DATA points to the constant pool structure. */
7534 mips16_rewrite_pool_refs (rtx *x, void *data)
7536 struct mips16_constant_pool *pool = data;
7537 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
7538 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
7539 get_pool_constant (*x),
7540 get_pool_mode (*x)));
7544 /* Build MIPS16 constant pools. */
7547 mips16_lay_out_constants (void)
7549 struct mips16_constant_pool pool;
7553 memset (&pool, 0, sizeof (pool));
7554 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7556 /* Rewrite constant pool references in INSN. */
7558 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
7560 pool.insn_address += mips16_insn_length (insn);
7562 if (pool.first != NULL)
7564 /* If there are no natural barriers between the first user of
7565 the pool and the highest acceptable address, we'll need to
7566 create a new instruction to jump around the constant pool.
7567 In the worst case, this instruction will be 4 bytes long.
7569 If it's too late to do this transformation after INSN,
7570 do it immediately before INSN. */
7571 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
7575 label = gen_label_rtx ();
7577 jump = emit_jump_insn_before (gen_jump (label), insn);
7578 JUMP_LABEL (jump) = label;
7579 LABEL_NUSES (label) = 1;
7580 barrier = emit_barrier_after (jump);
7582 emit_label_after (label, barrier);
7583 pool.insn_address += 4;
7586 /* See whether the constant pool is now out of range of the first
7587 user. If so, output the constants after the previous barrier.
7588 Note that any instructions between BARRIER and INSN (inclusive)
7589 will use negative offsets to refer to the pool. */
7590 if (pool.insn_address > pool.highest_address)
7592 dump_constants (pool.first, barrier);
7596 else if (BARRIER_P (insn))
7600 dump_constants (pool.first, get_last_insn ());
7603 /* A temporary variable used by for_each_rtx callbacks, etc. */
7604 static rtx mips_sim_insn;
7606 /* A structure representing the state of the processor pipeline.
7607 Used by the mips_sim_* family of functions. */
7609 /* The maximum number of instructions that can be issued in a cycle.
7610 (Caches mips_issue_rate.) */
7611 unsigned int issue_rate;
7613 /* The current simulation time. */
7616 /* How many more instructions can be issued in the current cycle. */
7617 unsigned int insns_left;
7619 /* LAST_SET[X].INSN is the last instruction to set register X.
7620 LAST_SET[X].TIME is the time at which that instruction was issued.
7621 INSN is null if no instruction has yet set register X. */
7625 } last_set[FIRST_PSEUDO_REGISTER];
7627 /* The pipeline's current DFA state. */
7631 /* Reset STATE to the initial simulation state. */
7634 mips_sim_reset (struct mips_sim *state)
7637 state->insns_left = state->issue_rate;
7638 memset (&state->last_set, 0, sizeof (state->last_set));
7639 state_reset (state->dfa_state);
7642 /* Initialize STATE before its first use. DFA_STATE points to an
7643 allocated but uninitialized DFA state. */
7646 mips_sim_init (struct mips_sim *state, state_t dfa_state)
7648 state->issue_rate = mips_issue_rate ();
7649 state->dfa_state = dfa_state;
7650 mips_sim_reset (state);
7653 /* Advance STATE by one clock cycle. */
7656 mips_sim_next_cycle (struct mips_sim *state)
7659 state->insns_left = state->issue_rate;
7660 state_transition (state->dfa_state, 0);
7663 /* Advance simulation state STATE until instruction INSN can read
7667 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
7671 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
7672 if (state->last_set[REGNO (reg) + i].insn != 0)
7676 t = state->last_set[REGNO (reg) + i].time;
7677 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
7678 while (state->time < t)
7679 mips_sim_next_cycle (state);
7683 /* A for_each_rtx callback. If *X is a register, advance simulation state
7684 DATA until mips_sim_insn can read the register's value. */
7687 mips_sim_wait_regs_2 (rtx *x, void *data)
7690 mips_sim_wait_reg (data, mips_sim_insn, *x);
7694 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
7697 mips_sim_wait_regs_1 (rtx *x, void *data)
7699 for_each_rtx (x, mips_sim_wait_regs_2, data);
7702 /* Advance simulation state STATE until all of INSN's register
7703 dependencies are satisfied. */
7706 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
7708 mips_sim_insn = insn;
7709 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
7712 /* Advance simulation state STATE until the units required by
7713 instruction INSN are available. */
7716 mips_sim_wait_units (struct mips_sim *state, rtx insn)
7720 tmp_state = alloca (state_size ());
7721 while (state->insns_left == 0
7722 || (memcpy (tmp_state, state->dfa_state, state_size ()),
7723 state_transition (tmp_state, insn) >= 0))
7724 mips_sim_next_cycle (state);
7727 /* Advance simulation state STATE until INSN is ready to issue. */
7730 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
7732 mips_sim_wait_regs (state, insn);
7733 mips_sim_wait_units (state, insn);
7736 /* mips_sim_insn has just set X. Update the LAST_SET array
7737 in simulation state DATA. */
7740 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
7742 struct mips_sim *state;
7747 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
7749 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
7750 state->last_set[REGNO (x) + i].time = state->time;
7754 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
7755 can issue immediately (i.e., that mips_sim_wait_insn has already
7759 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
7761 state_transition (state->dfa_state, insn);
7762 state->insns_left--;
7764 mips_sim_insn = insn;
7765 note_stores (PATTERN (insn), mips_sim_record_set, state);
7768 /* Simulate issuing a NOP in state STATE. */
7771 mips_sim_issue_nop (struct mips_sim *state)
7773 if (state->insns_left == 0)
7774 mips_sim_next_cycle (state);
7775 state->insns_left--;
7778 /* Update simulation state STATE so that it's ready to accept the instruction
7779 after INSN. INSN should be part of the main rtl chain, not a member of a
7783 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
7785 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
7787 mips_sim_issue_nop (state);
7789 switch (GET_CODE (SEQ_BEGIN (insn)))
7793 /* We can't predict the processor state after a call or label. */
7794 mips_sim_reset (state);
7798 /* The delay slots of branch likely instructions are only executed
7799 when the branch is taken. Therefore, if the caller has simulated
7800 the delay slot instruction, STATE does not really reflect the state
7801 of the pipeline for the instruction after the delay slot. Also,
7802 branch likely instructions tend to incur a penalty when not taken,
7803 so there will probably be an extra delay between the branch and
7804 the instruction after the delay slot. */
7805 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
7806 mips_sim_reset (state);
7814 /* The VR4130 pipeline issues aligned pairs of instructions together,
7815 but it stalls the second instruction if it depends on the first.
7816 In order to cut down the amount of logic required, this dependence
7817 check is not based on a full instruction decode. Instead, any non-SPECIAL
7818 instruction is assumed to modify the register specified by bits 20-16
7819 (which is usually the "rt" field).
7821 In beq, beql, bne and bnel instructions, the rt field is actually an
7822 input, so we can end up with a false dependence between the branch
7823 and its delay slot. If this situation occurs in instruction INSN,
7824 try to avoid it by swapping rs and rt. */
7827 vr4130_avoid_branch_rt_conflict (rtx insn)
7831 first = SEQ_BEGIN (insn);
7832 second = SEQ_END (insn);
7833 if (GET_CODE (first) == JUMP_INSN
7834 && GET_CODE (second) == INSN
7835 && GET_CODE (PATTERN (first)) == SET
7836 && GET_CODE (SET_DEST (PATTERN (first))) == PC
7837 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
7839 /* Check for the right kind of condition. */
7840 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
7841 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
7842 && REG_P (XEXP (cond, 0))
7843 && REG_P (XEXP (cond, 1))
7844 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
7845 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
7847 /* SECOND mentions the rt register but not the rs register. */
7848 rtx tmp = XEXP (cond, 0);
7849 XEXP (cond, 0) = XEXP (cond, 1);
7850 XEXP (cond, 1) = tmp;
7855 /* Implement -mvr4130-align. Go through each basic block and simulate the
7856 processor pipeline. If we find that a pair of instructions could execute
7857 in parallel, and the first of those instruction is not 8-byte aligned,
7858 insert a nop to make it aligned. */
7861 vr4130_align_insns (void)
7863 struct mips_sim state;
7864 rtx insn, subinsn, last, last2, next;
7869 /* LAST is the last instruction before INSN to have a nonzero length.
7870 LAST2 is the last such instruction before LAST. */
7874 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
7877 mips_sim_init (&state, alloca (state_size ()));
7878 for (insn = get_insns (); insn != 0; insn = next)
7880 unsigned int length;
7882 next = NEXT_INSN (insn);
7884 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
7885 This isn't really related to the alignment pass, but we do it on
7886 the fly to avoid a separate instruction walk. */
7887 vr4130_avoid_branch_rt_conflict (insn);
7889 if (USEFUL_INSN_P (insn))
7890 FOR_EACH_SUBINSN (subinsn, insn)
7892 mips_sim_wait_insn (&state, subinsn);
7894 /* If we want this instruction to issue in parallel with the
7895 previous one, make sure that the previous instruction is
7896 aligned. There are several reasons why this isn't worthwhile
7897 when the second instruction is a call:
7899 - Calls are less likely to be performance critical,
7900 - There's a good chance that the delay slot can execute
7901 in parallel with the call.
7902 - The return address would then be unaligned.
7904 In general, if we're going to insert a nop between instructions
7905 X and Y, it's better to insert it immediately after X. That
7906 way, if the nop makes Y aligned, it will also align any labels
7908 if (state.insns_left != state.issue_rate
7909 && GET_CODE (subinsn) != CALL_INSN)
7911 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
7913 /* SUBINSN is the first instruction in INSN and INSN is
7914 aligned. We want to align the previous instruction
7915 instead, so insert a nop between LAST2 and LAST.
7917 Note that LAST could be either a single instruction
7918 or a branch with a delay slot. In the latter case,
7919 LAST, like INSN, is already aligned, but the delay
7920 slot must have some extra delay that stops it from
7921 issuing at the same time as the branch. We therefore
7922 insert a nop before the branch in order to align its
7924 emit_insn_after (gen_nop (), last2);
7927 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
7929 /* SUBINSN is the delay slot of INSN, but INSN is
7930 currently unaligned. Insert a nop between
7931 LAST and INSN to align it. */
7932 emit_insn_after (gen_nop (), last);
7936 mips_sim_issue_insn (&state, subinsn);
7938 mips_sim_finish_insn (&state, insn);
7940 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
7941 length = get_attr_length (insn);
7944 /* If the instruction is an asm statement or multi-instruction
7945 mips.md patern, the length is only an estimate. Insert an
7946 8 byte alignment after it so that the following instructions
7947 can be handled correctly. */
7948 if (GET_CODE (SEQ_BEGIN (insn)) == INSN
7949 && (recog_memoized (insn) < 0 || length >= 8))
7951 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
7952 next = NEXT_INSN (next);
7953 mips_sim_next_cycle (&state);
7956 else if (length & 4)
7957 aligned_p = !aligned_p;
7962 /* See whether INSN is an aligned label. */
7963 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
7969 /* Subroutine of mips_reorg. If there is a hazard between INSN
7970 and a previous instruction, avoid it by inserting nops after
7973 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
7974 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
7975 before using the value of that register. *HILO_DELAY counts the
7976 number of instructions since the last hilo hazard (that is,
7977 the number of instructions since the last mflo or mfhi).
7979 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
7980 for the next instruction.
7982 LO_REG is an rtx for the LO register, used in dependence checking. */
7985 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
7986 rtx *delayed_reg, rtx lo_reg)
7994 pattern = PATTERN (insn);
7996 /* Do not put the whole function in .set noreorder if it contains
7997 an asm statement. We don't know whether there will be hazards
7998 between the asm statement and the gcc-generated code. */
7999 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8000 cfun->machine->all_noreorder_p = false;
8002 /* Ignore zero-length instructions (barriers and the like). */
8003 ninsns = get_attr_length (insn) / 4;
8007 /* Work out how many nops are needed. Note that we only care about
8008 registers that are explicitly mentioned in the instruction's pattern.
8009 It doesn't matter that calls use the argument registers or that they
8010 clobber hi and lo. */
8011 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8012 nops = 2 - *hilo_delay;
8013 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8018 /* Insert the nops between this instruction and the previous one.
8019 Each new nop takes us further from the last hilo hazard. */
8020 *hilo_delay += nops;
8022 emit_insn_after (gen_hazard_nop (), after);
8024 /* Set up the state for the next instruction. */
8025 *hilo_delay += ninsns;
8027 if (INSN_CODE (insn) >= 0)
8028 switch (get_attr_hazard (insn))
8038 set = single_set (insn);
8041 *delayed_reg = SET_DEST (set);
8047 /* Go through the instruction stream and insert nops where necessary.
8048 See if the whole function can then be put into .set noreorder &
8052 mips_avoid_hazards (void)
8054 rtx insn, last_insn, lo_reg, delayed_reg;
8057 /* Force all instructions to be split into their final form. */
8058 split_all_insns_noflow ();
8060 /* Recalculate instruction lengths without taking nops into account. */
8061 cfun->machine->ignore_hazard_length_p = true;
8062 shorten_branches (get_insns ());
8064 /* The profiler code uses assembler macros. -mfix-vr4120 relies on
8065 assembler nop insertion. */
8066 cfun->machine->all_noreorder_p = (!current_function_profile
8067 && !TARGET_FIX_VR4120);
8072 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8074 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8077 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8078 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8079 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8080 &hilo_delay, &delayed_reg, lo_reg);
8082 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8083 &delayed_reg, lo_reg);
8090 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8096 mips16_lay_out_constants ();
8097 else if (TARGET_EXPLICIT_RELOCS)
8099 if (mips_flag_delayed_branch)
8100 dbr_schedule (get_insns (), dump_file);
8101 mips_avoid_hazards ();
8102 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8103 vr4130_align_insns ();
8107 /* This function does three things:
8109 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8110 - Register the mips16 hardware floating point stubs.
8111 - Register the gofast functions if selected using --enable-gofast. */
8113 #include "config/gofast.h"
8116 mips_init_libfuncs (void)
8118 if (TARGET_FIX_VR4120)
8120 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8121 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8124 if (TARGET_MIPS16 && mips16_hard_float)
8126 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8127 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8128 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8129 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8131 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8132 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8133 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8134 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8135 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8136 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8138 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8139 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8141 if (TARGET_DOUBLE_FLOAT)
8143 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8144 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8145 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8146 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8148 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8149 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8150 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8151 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8152 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8153 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8155 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8156 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8158 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8159 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8163 gofast_maybe_init_libfuncs ();
8166 /* Return a number assessing the cost of moving a register in class
8167 FROM to class TO. The classes are expressed using the enumeration
8168 values such as `GENERAL_REGS'. A value of 2 is the default; other
8169 values are interpreted relative to that.
8171 It is not required that the cost always equal 2 when FROM is the
8172 same as TO; on some machines it is expensive to move between
8173 registers if they are not general registers.
8175 If reload sees an insn consisting of a single `set' between two
8176 hard registers, and if `REGISTER_MOVE_COST' applied to their
8177 classes returns a value of 2, reload does not check to ensure that
8178 the constraints of the insn are met. Setting a cost of other than
8179 2 will allow reload to verify that the constraints are met. You
8180 should do this if the `movM' pattern's constraints do not allow
8183 ??? We make the cost of moving from HI/LO into general
8184 registers the same as for one of moving general registers to
8185 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8186 pseudo to HI/LO. This might hurt optimizations though, it
8187 isn't clear if it is wise. And it might not work in all cases. We
8188 could solve the DImode LO reg problem by using a multiply, just
8189 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8190 problem by using divide instructions. divu puts the remainder in
8191 the HI reg, so doing a divide by -1 will move the value in the HI
8192 reg for all values except -1. We could handle that case by using a
8193 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8194 a compare/branch to test the input value to see which instruction
8195 we need to use. This gets pretty messy, but it is feasible. */
8198 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8199 enum reg_class to, enum reg_class from)
8201 if (from == M16_REGS && GR_REG_CLASS_P (to))
8203 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8205 else if (GR_REG_CLASS_P (from))
8209 else if (to == M16_NA_REGS)
8211 else if (GR_REG_CLASS_P (to))
8218 else if (to == FP_REGS)
8220 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8227 else if (COP_REG_CLASS_P (to))
8231 } /* GR_REG_CLASS_P (from) */
8232 else if (from == FP_REGS)
8234 if (GR_REG_CLASS_P (to))
8236 else if (to == FP_REGS)
8238 else if (to == ST_REGS)
8240 } /* from == FP_REGS */
8241 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8243 if (GR_REG_CLASS_P (to))
8250 } /* from == HI_REG, etc. */
8251 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8253 else if (COP_REG_CLASS_P (from))
8256 } /* COP_REG_CLASS_P (from) */
8263 /* Return the length of INSN. LENGTH is the initial length computed by
8264 attributes in the machine-description file. */
8267 mips_adjust_insn_length (rtx insn, int length)
8269 /* A unconditional jump has an unfilled delay slot if it is not part
8270 of a sequence. A conditional jump normally has a delay slot, but
8271 does not on MIPS16. */
8272 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8275 /* See how many nops might be needed to avoid hardware hazards. */
8276 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8277 switch (get_attr_hazard (insn))
8291 /* All MIPS16 instructions are a measly two bytes. */
8299 /* Return an asm sequence to start a noat block and load the address
8300 of a label into $1. */
8303 mips_output_load_label (void)
8305 if (TARGET_EXPLICIT_RELOCS)
8309 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8312 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8315 if (ISA_HAS_LOAD_DELAY)
8316 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8317 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8321 if (Pmode == DImode)
8322 return "%[dla\t%@,%0";
8324 return "%[la\t%@,%0";
8329 /* Output assembly instructions to peform a conditional branch.
8331 INSN is the branch instruction. OPERANDS[0] is the condition.
8332 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8333 of the first operand to the condition. If TWO_OPERANDS_P is
8334 nonzero the comparison takes two operands; OPERANDS[3] will be the
8337 If INVERTED_P is nonzero we are to branch if the condition does
8338 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8340 LENGTH is the length (in bytes) of the sequence we are to generate.
8341 That tells us whether to generate a simple conditional branch, or a
8342 reversed conditional branch around a `jr' instruction. */
8344 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8345 int float_p, int inverted_p, int length)
8347 static char buffer[200];
8348 /* The kind of comparison we are doing. */
8349 enum rtx_code code = GET_CODE (operands[0]);
8350 /* Nonzero if the opcode for the comparison needs a `z' indicating
8351 that it is a comparison against zero. */
8353 /* A string to use in the assembly output to represent the first
8355 const char *op1 = "%z2";
8356 /* A string to use in the assembly output to represent the second
8357 operand. Use the hard-wired zero register if there's no second
8359 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8360 /* The operand-printing string for the comparison. */
8361 const char *const comp = (float_p ? "%F0" : "%C0");
8362 /* The operand-printing string for the inverted comparison. */
8363 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8365 /* The MIPS processors (for levels of the ISA at least two), have
8366 "likely" variants of each branch instruction. These instructions
8367 annul the instruction in the delay slot if the branch is not
8369 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8371 if (!two_operands_p)
8373 /* To compute whether than A > B, for example, we normally
8374 subtract B from A and then look at the sign bit. But, if we
8375 are doing an unsigned comparison, and B is zero, we don't
8376 have to do the subtraction. Instead, we can just check to
8377 see if A is nonzero. Thus, we change the CODE here to
8378 reflect the simpler comparison operation. */
8390 /* A condition which will always be true. */
8396 /* A condition which will always be false. */
8402 /* Not a special case. */
8407 /* Relative comparisons are always done against zero. But
8408 equality comparisons are done between two operands, and therefore
8409 do not require a `z' in the assembly language output. */
8410 need_z_p = (!float_p && code != EQ && code != NE);
8411 /* For comparisons against zero, the zero is not provided
8416 /* Begin by terminating the buffer. That way we can always use
8417 strcat to add to it. */
8424 /* Just a simple conditional branch. */
8426 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8427 inverted_p ? inverted_comp : comp);
8429 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8430 inverted_p ? inverted_comp : comp,
8431 need_z_p ? "z" : "",
8441 /* Generate a reversed conditional branch around ` j'
8454 If the original branch was a likely branch, the delay slot
8455 must be executed only if the branch is taken, so generate:
8467 When generating PIC, instead of:
8480 rtx target = gen_label_rtx ();
8482 orig_target = operands[1];
8483 operands[1] = target;
8484 /* Generate the reversed comparison. This takes four
8487 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8488 inverted_p ? comp : inverted_comp);
8490 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8491 inverted_p ? comp : inverted_comp,
8492 need_z_p ? "z" : "",
8495 output_asm_insn (buffer, operands);
8497 if (length != 16 && length != 28 && ! mips_branch_likely)
8499 /* Output delay slot instruction. */
8500 rtx insn = final_sequence;
8501 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8502 optimize, 0, 1, NULL);
8503 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8506 output_asm_insn ("%#", 0);
8509 output_asm_insn ("j\t%0", &orig_target);
8512 output_asm_insn (mips_output_load_label (), &orig_target);
8513 output_asm_insn ("jr\t%@%]", 0);
8516 if (length != 16 && length != 28 && mips_branch_likely)
8518 /* Output delay slot instruction. */
8519 rtx insn = final_sequence;
8520 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8521 optimize, 0, 1, NULL);
8522 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8525 output_asm_insn ("%#", 0);
8527 (*targetm.asm_out.internal_label) (asm_out_file, "L",
8528 CODE_LABEL_NUMBER (target));
8541 /* Used to output div or ddiv instruction DIVISION, which has the operands
8542 given by OPERANDS. Add in a divide-by-zero check if needed.
8544 When working around R4000 and R4400 errata, we need to make sure that
8545 the division is not immediately followed by a shift[1][2]. We also
8546 need to stop the division from being put into a branch delay slot[3].
8547 The easiest way to avoid both problems is to add a nop after the
8548 division. When a divide-by-zero check is needed, this nop can be
8549 used to fill the branch delay slot.
8551 [1] If a double-word or a variable shift executes immediately
8552 after starting an integer division, the shift may give an
8553 incorrect result. See quotations of errata #16 and #28 from
8554 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8555 in mips.md for details.
8557 [2] A similar bug to [1] exists for all revisions of the
8558 R4000 and the R4400 when run in an MC configuration.
8559 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
8561 "19. In this following sequence:
8563 ddiv (or ddivu or div or divu)
8564 dsll32 (or dsrl32, dsra32)
8566 if an MPT stall occurs, while the divide is slipping the cpu
8567 pipeline, then the following double shift would end up with an
8570 Workaround: The compiler needs to avoid generating any
8571 sequence with divide followed by extended double shift."
8573 This erratum is also present in "MIPS R4400MC Errata, Processor
8574 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
8575 & 3.0" as errata #10 and #4, respectively.
8577 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8578 (also valid for MIPS R4000MC processors):
8580 "52. R4000SC: This bug does not apply for the R4000PC.
8582 There are two flavors of this bug:
8584 1) If the instruction just after divide takes an RF exception
8585 (tlb-refill, tlb-invalid) and gets an instruction cache
8586 miss (both primary and secondary) and the line which is
8587 currently in secondary cache at this index had the first
8588 data word, where the bits 5..2 are set, then R4000 would
8589 get a wrong result for the div.
8594 ------------------- # end-of page. -tlb-refill
8599 ------------------- # end-of page. -tlb-invalid
8602 2) If the divide is in the taken branch delay slot, where the
8603 target takes RF exception and gets an I-cache miss for the
8604 exception vector or where I-cache miss occurs for the
8605 target address, under the above mentioned scenarios, the
8606 div would get wrong results.
8609 j r2 # to next page mapped or unmapped
8610 div r8,r9 # this bug would be there as long
8611 # as there is an ICache miss and
8612 nop # the "data pattern" is present
8615 beq r0, r0, NextPage # to Next page
8619 This bug is present for div, divu, ddiv, and ddivu
8622 Workaround: For item 1), OS could make sure that the next page
8623 after the divide instruction is also mapped. For item 2), the
8624 compiler could make sure that the divide instruction is not in
8625 the branch delay slot."
8627 These processors have PRId values of 0x00004220 and 0x00004300 for
8628 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
8631 mips_output_division (const char *division, rtx *operands)
8636 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
8638 output_asm_insn (s, operands);
8641 if (TARGET_CHECK_ZERO_DIV)
8645 output_asm_insn (s, operands);
8646 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
8650 output_asm_insn ("%(bne\t%2,%.,1f", operands);
8651 output_asm_insn (s, operands);
8652 s = "break\t7%)\n1:";
8658 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
8659 with a final "000" replaced by "k". Ignore case.
8661 Note: this function is shared between GCC and GAS. */
8664 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
8666 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
8667 given++, canonical++;
8669 return ((*given == 0 && *canonical == 0)
8670 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
8674 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
8675 CPU name. We've traditionally allowed a lot of variation here.
8677 Note: this function is shared between GCC and GAS. */
8680 mips_matching_cpu_name_p (const char *canonical, const char *given)
8682 /* First see if the name matches exactly, or with a final "000"
8684 if (mips_strict_matching_cpu_name_p (canonical, given))
8687 /* If not, try comparing based on numerical designation alone.
8688 See if GIVEN is an unadorned number, or 'r' followed by a number. */
8689 if (TOLOWER (*given) == 'r')
8691 if (!ISDIGIT (*given))
8694 /* Skip over some well-known prefixes in the canonical name,
8695 hoping to find a number there too. */
8696 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
8698 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
8700 else if (TOLOWER (canonical[0]) == 'r')
8703 return mips_strict_matching_cpu_name_p (canonical, given);
8707 /* Parse an option that takes the name of a processor as its argument.
8708 OPTION is the name of the option and CPU_STRING is the argument.
8709 Return the corresponding processor enumeration if the CPU_STRING is
8710 recognized, otherwise report an error and return null.
8712 A similar function exists in GAS. */
8714 static const struct mips_cpu_info *
8715 mips_parse_cpu (const char *option, const char *cpu_string)
8717 const struct mips_cpu_info *p;
8720 /* In the past, we allowed upper-case CPU names, but it doesn't
8721 work well with the multilib machinery. */
8722 for (s = cpu_string; *s != 0; s++)
8725 warning ("the cpu name must be lower case");
8729 /* 'from-abi' selects the most compatible architecture for the given
8730 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
8731 EABIs, we have to decide whether we're using the 32-bit or 64-bit
8732 version. Look first at the -mgp options, if given, otherwise base
8733 the choice on MASK_64BIT in TARGET_DEFAULT. */
8734 if (strcasecmp (cpu_string, "from-abi") == 0)
8735 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
8736 : ABI_NEEDS_64BIT_REGS ? 3
8737 : (TARGET_64BIT ? 3 : 1));
8739 /* 'default' has traditionally been a no-op. Probably not very useful. */
8740 if (strcasecmp (cpu_string, "default") == 0)
8743 for (p = mips_cpu_info_table; p->name != 0; p++)
8744 if (mips_matching_cpu_name_p (p->name, cpu_string))
8747 error ("bad value (%s) for %s", cpu_string, option);
8752 /* Return the processor associated with the given ISA level, or null
8753 if the ISA isn't valid. */
8755 static const struct mips_cpu_info *
8756 mips_cpu_info_from_isa (int isa)
8758 const struct mips_cpu_info *p;
8760 for (p = mips_cpu_info_table; p->name != 0; p++)
8767 /* Implement HARD_REGNO_NREGS. The size of FP registers are controlled
8768 by UNITS_PER_FPREG. All other registers are word sized. */
8771 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8773 if (! FP_REG_P (regno))
8774 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
8776 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
8779 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
8780 all BLKmode objects are returned in memory. Under the new (N32 and
8781 64-bit MIPS ABIs) small structures are returned in a register.
8782 Objects with varying size must still be returned in memory, of
8786 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
8789 return (TYPE_MODE (type) == BLKmode);
8791 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
8792 || (int_size_in_bytes (type) == -1));
8796 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
8798 return !TARGET_OLDABI;
8801 /* Return true if INSN is a multiply-add or multiply-subtract
8802 instruction and PREV assigns to the accumulator operand. */
8805 mips_linked_madd_p (rtx prev, rtx insn)
8809 x = single_set (insn);
8815 if (GET_CODE (x) == PLUS
8816 && GET_CODE (XEXP (x, 0)) == MULT
8817 && reg_set_p (XEXP (x, 1), prev))
8820 if (GET_CODE (x) == MINUS
8821 && GET_CODE (XEXP (x, 1)) == MULT
8822 && reg_set_p (XEXP (x, 0), prev))
8828 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
8829 that may clobber hi or lo. */
8831 static rtx mips_macc_chains_last_hilo;
8833 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
8834 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
8837 mips_macc_chains_record (rtx insn)
8839 if (get_attr_may_clobber_hilo (insn))
8840 mips_macc_chains_last_hilo = insn;
8843 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
8844 has NREADY elements, looking for a multiply-add or multiply-subtract
8845 instruction that is cumulative with mips_macc_chains_last_hilo.
8846 If there is one, promote it ahead of anything else that might
8847 clobber hi or lo. */
8850 mips_macc_chains_reorder (rtx *ready, int nready)
8854 if (mips_macc_chains_last_hilo != 0)
8855 for (i = nready - 1; i >= 0; i--)
8856 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
8858 for (j = nready - 1; j > i; j--)
8859 if (recog_memoized (ready[j]) >= 0
8860 && get_attr_may_clobber_hilo (ready[j]))
8862 mips_promote_ready (ready, i, j);
8869 /* The last instruction to be scheduled. */
8871 static rtx vr4130_last_insn;
8873 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
8874 points to an rtx that is initially an instruction. Nullify the rtx
8875 if the instruction uses the value of register X. */
8878 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8880 rtx *insn_ptr = data;
8883 && reg_referenced_p (x, PATTERN (*insn_ptr)))
8887 /* Return true if there is true register dependence between vr4130_last_insn
8891 vr4130_true_reg_dependence_p (rtx insn)
8893 note_stores (PATTERN (vr4130_last_insn),
8894 vr4130_true_reg_dependence_p_1, &insn);
8898 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
8899 the ready queue and that INSN2 is the instruction after it, return
8900 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
8901 in which INSN1 and INSN2 can probably issue in parallel, but for
8902 which (INSN2, INSN1) should be less sensitive to instruction
8903 alignment than (INSN1, INSN2). See 4130.md for more details. */
8906 vr4130_swap_insns_p (rtx insn1, rtx insn2)
8910 /* Check for the following case:
8912 1) there is some other instruction X with an anti dependence on INSN1;
8913 2) X has a higher priority than INSN2; and
8914 3) X is an arithmetic instruction (and thus has no unit restrictions).
8916 If INSN1 is the last instruction blocking X, it would better to
8917 choose (INSN1, X) over (INSN2, INSN1). */
8918 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
8919 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
8920 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
8921 && recog_memoized (XEXP (dep, 0)) >= 0
8922 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
8925 if (vr4130_last_insn != 0
8926 && recog_memoized (insn1) >= 0
8927 && recog_memoized (insn2) >= 0)
8929 /* See whether INSN1 and INSN2 use different execution units,
8930 or if they are both ALU-type instructions. If so, they can
8931 probably execute in parallel. */
8932 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
8933 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
8934 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
8936 /* If only one of the instructions has a dependence on
8937 vr4130_last_insn, prefer to schedule the other one first. */
8938 bool dep1 = vr4130_true_reg_dependence_p (insn1);
8939 bool dep2 = vr4130_true_reg_dependence_p (insn2);
8943 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
8944 is not an ALU-type instruction and if INSN1 uses the same
8945 execution unit. (Note that if this condition holds, we already
8946 know that INSN2 uses a different execution unit.) */
8947 if (class1 != VR4130_CLASS_ALU
8948 && recog_memoized (vr4130_last_insn) >= 0
8949 && class1 == get_attr_vr4130_class (vr4130_last_insn))
8956 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
8957 queue with at least two instructions. Swap the first two if
8958 vr4130_swap_insns_p says that it could be worthwhile. */
8961 vr4130_reorder (rtx *ready, int nready)
8963 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
8964 mips_promote_ready (ready, nready - 2, nready - 1);
8967 /* Remove the instruction at index LOWER from ready queue READY and
8968 reinsert it in front of the instruction at index HIGHER. LOWER must
8972 mips_promote_ready (rtx *ready, int lower, int higher)
8977 new_head = ready[lower];
8978 for (i = lower; i < higher; i++)
8979 ready[i] = ready[i + 1];
8980 ready[i] = new_head;
8983 /* Implement TARGET_SCHED_REORDER. */
8986 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
8987 rtx *ready, int *nreadyp, int cycle)
8989 if (!reload_completed && TUNE_MACC_CHAINS)
8992 mips_macc_chains_last_hilo = 0;
8994 mips_macc_chains_reorder (ready, *nreadyp);
8996 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
8999 vr4130_last_insn = 0;
9001 vr4130_reorder (ready, *nreadyp);
9003 return mips_issue_rate ();
9006 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9009 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9012 switch (GET_CODE (PATTERN (insn)))
9016 /* Don't count USEs and CLOBBERs against the issue rate. */
9021 if (!reload_completed && TUNE_MACC_CHAINS)
9022 mips_macc_chains_record (insn);
9023 vr4130_last_insn = insn;
9029 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9030 dependencies have no cost. */
9033 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9034 rtx dep ATTRIBUTE_UNUSED, int cost)
9036 if (REG_NOTE_KIND (link) != 0)
9041 /* Return the number of instructions that can be issued per cycle. */
9044 mips_issue_rate (void)
9048 case PROCESSOR_R4130:
9049 case PROCESSOR_R5400:
9050 case PROCESSOR_R5500:
9051 case PROCESSOR_R7000:
9052 case PROCESSOR_R9000:
9056 /* This is actually 4, but we get better performance if we claim 3.
9057 This is partly because of unwanted speculative code motion with the
9058 larger number, and partly because in most common cases we can't
9059 reach the theoretical max of 4. */
9070 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9071 be as wide as the scheduling freedom in the DFA. */
9074 mips_multipass_dfa_lookahead (void)
9076 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9077 if (mips_tune == PROCESSOR_SB1)
9083 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9084 return the first operand of the associated "pref" or "prefx" insn. */
9087 mips_prefetch_cookie (rtx write, rtx locality)
9089 /* store_streamed / load_streamed. */
9090 if (INTVAL (locality) <= 0)
9091 return GEN_INT (INTVAL (write) + 4);
9094 if (INTVAL (locality) <= 2)
9097 /* store_retained / load_retained. */
9098 return GEN_INT (INTVAL (write) + 6);
9101 #include "gt-mips.h"