1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
124 A signed 16-bit constant address.
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
135 /* Classifies the prototype of a builtin function. */
136 enum mips_function_type
138 MIPS_V2SF_FTYPE_V2SF,
139 MIPS_V2SF_FTYPE_V2SF_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
142 MIPS_V2SF_FTYPE_SF_SF,
143 MIPS_INT_FTYPE_V2SF_V2SF,
144 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
145 MIPS_INT_FTYPE_SF_SF,
146 MIPS_INT_FTYPE_DF_DF,
157 /* Specifies how a builtin function should be converted into rtl. */
158 enum mips_builtin_type
160 /* The builtin corresponds directly to an .md pattern. The return
161 value is mapped to operand 0 and the arguments are mapped to
162 operands 1 and above. */
165 /* The builtin corresponds to a comparison instruction followed by
166 a mips_cond_move_tf_ps pattern. The first two arguments are the
167 values to compare and the second two arguments are the vector
168 operands for the movt.ps or movf.ps instruction (in assembly order). */
172 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
173 of this instruction is the result of the comparison, which has mode
174 CCV2 or CCV4. The function arguments are mapped to operands 1 and
175 above. The function's return value is an SImode boolean that is
176 true under the following conditions:
178 MIPS_BUILTIN_CMP_ANY: one of the registers is true
179 MIPS_BUILTIN_CMP_ALL: all of the registers are true
180 MIPS_BUILTIN_CMP_LOWER: the first register is true
181 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
182 MIPS_BUILTIN_CMP_ANY,
183 MIPS_BUILTIN_CMP_ALL,
184 MIPS_BUILTIN_CMP_UPPER,
185 MIPS_BUILTIN_CMP_LOWER,
187 /* As above, but the instruction only sets a single $fcc register. */
188 MIPS_BUILTIN_CMP_SINGLE
191 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
192 #define MIPS_FP_CONDITIONS(MACRO) \
210 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
211 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
212 enum mips_fp_condition {
213 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
216 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
217 #define STRINGIFY(X) #X
218 static const char *const mips_fp_conditions[] = {
219 MIPS_FP_CONDITIONS (STRINGIFY)
222 /* A function to save or store a register. The first argument is the
223 register and the second is the stack slot. */
224 typedef void (*mips_save_restore_fn) (rtx, rtx);
226 struct mips16_constant;
227 struct mips_arg_info;
228 struct mips_address_info;
229 struct mips_integer_op;
232 static enum mips_symbol_type mips_classify_symbol (rtx);
233 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
234 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
235 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
236 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
237 static bool mips_classify_address (struct mips_address_info *, rtx,
238 enum machine_mode, int);
239 static int mips_symbol_insns (enum mips_symbol_type);
240 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
241 static rtx mips_force_temporary (rtx, rtx);
242 static rtx mips_split_symbol (rtx, rtx);
243 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
244 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
245 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
246 static unsigned int mips_build_lower (struct mips_integer_op *,
247 unsigned HOST_WIDE_INT);
248 static unsigned int mips_build_integer (struct mips_integer_op *,
249 unsigned HOST_WIDE_INT);
250 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
251 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
252 static int m16_check_op (rtx, int, int, int);
253 static bool mips_rtx_costs (rtx, int, int, int *);
254 static int mips_address_cost (rtx);
255 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
256 static void mips_load_call_address (rtx, rtx, int);
257 static bool mips_function_ok_for_sibcall (tree, tree);
258 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
259 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
260 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
261 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
262 tree, int, struct mips_arg_info *);
263 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
264 static void mips_set_architecture (const struct mips_cpu_info *);
265 static void mips_set_tune (const struct mips_cpu_info *);
266 static struct machine_function *mips_init_machine_status (void);
267 static void print_operand_reloc (FILE *, rtx, const char **);
269 static void irix_output_external_libcall (rtx);
271 static void mips_file_start (void);
272 static void mips_file_end (void);
273 static bool mips_rewrite_small_data_p (rtx);
274 static int mips_small_data_pattern_1 (rtx *, void *);
275 static int mips_rewrite_small_data_1 (rtx *, void *);
276 static bool mips_function_has_gp_insn (void);
277 static unsigned int mips_global_pointer (void);
278 static bool mips_save_reg_p (unsigned int);
279 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
280 mips_save_restore_fn);
281 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
282 static void mips_output_cplocal (void);
283 static void mips_emit_loadgp (void);
284 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
285 static void mips_set_frame_expr (rtx);
286 static rtx mips_frame_set (rtx, rtx);
287 static void mips_save_reg (rtx, rtx);
288 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
289 static void mips_restore_reg (rtx, rtx);
290 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
291 HOST_WIDE_INT, tree);
292 static int symbolic_expression_p (rtx);
293 static void mips_select_rtx_section (enum machine_mode, rtx,
294 unsigned HOST_WIDE_INT);
295 static void mips_function_rodata_section (tree);
296 static bool mips_in_small_data_p (tree);
297 static int mips_fpr_return_fields (tree, tree *);
298 static bool mips_return_in_msb (tree);
299 static rtx mips_return_fpr_pair (enum machine_mode mode,
300 enum machine_mode mode1, HOST_WIDE_INT,
301 enum machine_mode mode2, HOST_WIDE_INT);
302 static rtx mips16_gp_pseudo_reg (void);
303 static void mips16_fp_args (FILE *, int, int);
304 static void build_mips16_function_stub (FILE *);
305 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
306 static void dump_constants (struct mips16_constant *, rtx);
307 static int mips16_insn_length (rtx);
308 static int mips16_rewrite_pool_refs (rtx *, void *);
309 static void mips16_lay_out_constants (void);
310 static void mips_sim_reset (struct mips_sim *);
311 static void mips_sim_init (struct mips_sim *, state_t);
312 static void mips_sim_next_cycle (struct mips_sim *);
313 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
314 static int mips_sim_wait_regs_2 (rtx *, void *);
315 static void mips_sim_wait_regs_1 (rtx *, void *);
316 static void mips_sim_wait_regs (struct mips_sim *, rtx);
317 static void mips_sim_wait_units (struct mips_sim *, rtx);
318 static void mips_sim_wait_insn (struct mips_sim *, rtx);
319 static void mips_sim_record_set (rtx, rtx, void *);
320 static void mips_sim_issue_insn (struct mips_sim *, rtx);
321 static void mips_sim_issue_nop (struct mips_sim *);
322 static void mips_sim_finish_insn (struct mips_sim *, rtx);
323 static void vr4130_avoid_branch_rt_conflict (rtx);
324 static void vr4130_align_insns (void);
325 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
326 static void mips_avoid_hazards (void);
327 static void mips_reorg (void);
328 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
329 static bool mips_matching_cpu_name_p (const char *, const char *);
330 static const struct mips_cpu_info *mips_parse_cpu (const char *, const char *);
331 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
332 static bool mips_return_in_memory (tree, tree);
333 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
334 static void mips_macc_chains_record (rtx);
335 static void mips_macc_chains_reorder (rtx *, int);
336 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
337 static bool vr4130_true_reg_dependence_p (rtx);
338 static bool vr4130_swap_insns_p (rtx, rtx);
339 static void vr4130_reorder (rtx *, int);
340 static void mips_promote_ready (rtx *, int, int);
341 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
342 static int mips_variable_issue (FILE *, int, rtx, int);
343 static int mips_adjust_cost (rtx, rtx, rtx, int);
344 static int mips_issue_rate (void);
345 static int mips_multipass_dfa_lookahead (void);
346 static void mips_init_libfuncs (void);
347 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
349 static tree mips_build_builtin_va_list (void);
350 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
351 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
353 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
355 static bool mips_vector_mode_supported_p (enum machine_mode);
356 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
357 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
358 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
359 static void mips_init_builtins (void);
360 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree);
361 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
362 enum insn_code, enum mips_fp_condition,
364 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
365 enum insn_code, enum mips_fp_condition,
368 /* Structure to be filled in by compute_frame_size with register
369 save masks, and offsets for the current function. */
371 struct mips_frame_info GTY(())
373 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
374 HOST_WIDE_INT var_size; /* # bytes that variables take up */
375 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
376 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
377 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
378 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
379 unsigned int mask; /* mask of saved gp registers */
380 unsigned int fmask; /* mask of saved fp registers */
381 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
382 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
383 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
384 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
385 bool initialized; /* true if frame size already calculated */
386 int num_gp; /* number of gp registers saved */
387 int num_fp; /* number of fp registers saved */
390 struct machine_function GTY(()) {
391 /* Pseudo-reg holding the value of $28 in a mips16 function which
392 refers to GP relative global variables. */
393 rtx mips16_gp_pseudo_rtx;
395 /* Current frame information, calculated by compute_frame_size. */
396 struct mips_frame_info frame;
398 /* The register to use as the global pointer within this function. */
399 unsigned int global_pointer;
401 /* True if mips_adjust_insn_length should ignore an instruction's
403 bool ignore_hazard_length_p;
405 /* True if the whole function is suitable for .set noreorder and
407 bool all_noreorder_p;
409 /* True if the function is known to have an instruction that needs $gp. */
413 /* Information about a single argument. */
416 /* True if the argument is passed in a floating-point register, or
417 would have been if we hadn't run out of registers. */
420 /* The number of words passed in registers, rounded up. */
421 unsigned int reg_words;
423 /* For EABI, the offset of the first register from GP_ARG_FIRST or
424 FP_ARG_FIRST. For other ABIs, the offset of the first register from
425 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
426 comment for details).
428 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
430 unsigned int reg_offset;
432 /* The number of words that must be passed on the stack, rounded up. */
433 unsigned int stack_words;
435 /* The offset from the start of the stack overflow area of the argument's
436 first stack word. Only meaningful when STACK_WORDS is nonzero. */
437 unsigned int stack_offset;
441 /* Information about an address described by mips_address_type.
447 REG is the base register and OFFSET is the constant offset.
450 REG is the register that contains the high part of the address,
451 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
452 is the type of OFFSET's symbol.
455 SYMBOL_TYPE is the type of symbol being referenced. */
457 struct mips_address_info
459 enum mips_address_type type;
462 enum mips_symbol_type symbol_type;
466 /* One stage in a constant building sequence. These sequences have
470 A = A CODE[1] VALUE[1]
471 A = A CODE[2] VALUE[2]
474 where A is an accumulator, each CODE[i] is a binary rtl operation
475 and each VALUE[i] is a constant integer. */
476 struct mips_integer_op {
478 unsigned HOST_WIDE_INT value;
482 /* The largest number of operations needed to load an integer constant.
483 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
484 When the lowest bit is clear, we can try, but reject a sequence with
485 an extra SLL at the end. */
486 #define MIPS_MAX_INTEGER_OPS 7
489 /* Global variables for machine-dependent things. */
491 /* Threshold for data being put into the small data/bss area, instead
492 of the normal data area. */
493 int mips_section_threshold = -1;
495 /* Count the number of .file directives, so that .loc is up to date. */
496 int num_source_filenames = 0;
498 /* Count the number of sdb related labels are generated (to find block
499 start and end boundaries). */
500 int sdb_label_count = 0;
502 /* Next label # for each statement for Silicon Graphics IRIS systems. */
505 /* Linked list of all externals that are to be emitted when optimizing
506 for the global pointer if they haven't been declared by the end of
507 the program with an appropriate .comm or initialization. */
509 struct extern_list GTY (())
511 struct extern_list *next; /* next external */
512 const char *name; /* name of the external */
513 int size; /* size in bytes */
516 static GTY (()) struct extern_list *extern_head = 0;
518 /* Name of the file containing the current function. */
519 const char *current_function_file = "";
521 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
527 /* The next branch instruction is a branch likely, not branch normal. */
528 int mips_branch_likely;
530 /* The operands passed to the last cmpMM expander. */
533 /* The target cpu for code generation. */
534 enum processor_type mips_arch;
535 const struct mips_cpu_info *mips_arch_info;
537 /* The target cpu for optimization and scheduling. */
538 enum processor_type mips_tune;
539 const struct mips_cpu_info *mips_tune_info;
541 /* Which instruction set architecture to use. */
544 /* Which ABI to use. */
547 /* Strings to hold which cpu and instruction set architecture to use. */
548 const char *mips_arch_string; /* for -march=<xxx> */
549 const char *mips_tune_string; /* for -mtune=<xxx> */
550 const char *mips_isa_string; /* for -mips{1,2,3,4} */
551 const char *mips_abi_string; /* for -mabi={32,n32,64,eabi} */
553 /* Whether we are generating mips16 hard float code. In mips16 mode
554 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
555 -msoft-float was not specified by the user, which means that we
556 should arrange to call mips32 hard floating point code. */
557 int mips16_hard_float;
559 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
561 /* If TRUE, we split addresses into their high and low parts in the RTL. */
562 int mips_split_addresses;
564 /* Mode used for saving/restoring general purpose registers. */
565 static enum machine_mode gpr_mode;
567 /* Array giving truth value on whether or not a given hard register
568 can support a given mode. */
569 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
571 /* List of all MIPS punctuation characters used by print_operand. */
572 char mips_print_operand_punct[256];
574 /* Map GCC register number to debugger register number. */
575 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
577 /* A copy of the original flag_delayed_branch: see override_options. */
578 static int mips_flag_delayed_branch;
580 static GTY (()) int mips_output_filename_first_time = 1;
582 /* mips_split_p[X] is true if symbols of type X can be split by
583 mips_split_symbol(). */
584 static bool mips_split_p[NUM_SYMBOL_TYPES];
586 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
587 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
588 if they are matched by a special .md file pattern. */
589 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
591 /* Likewise for HIGHs. */
592 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
594 /* Map hard register number to register class */
595 const enum reg_class mips_regno_to_class[] =
597 LEA_REGS, LEA_REGS, M16_NA_REGS, M16_NA_REGS,
598 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
599 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
600 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
601 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
602 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
603 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
604 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
605 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
606 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
607 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
608 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
609 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
610 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
611 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
612 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
613 HI_REG, LO_REG, NO_REGS, ST_REGS,
614 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
615 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
616 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
617 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
618 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
619 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
620 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
621 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
622 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
623 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
624 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
625 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
626 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
627 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
628 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
629 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
630 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
631 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
632 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
633 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
634 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
635 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
636 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
637 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
638 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
639 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
640 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
643 /* Map register constraint character to register class. */
644 enum reg_class mips_char_to_class[256];
646 /* A table describing all the processors gcc knows about. Names are
647 matched in the order listed. The first mention of an ISA level is
648 taken as the canonical name for that ISA.
650 To ease comparison, please keep this table in the same order as
651 gas's mips_cpu_info_table[]. */
652 const struct mips_cpu_info mips_cpu_info_table[] = {
653 /* Entries for generic ISAs */
654 { "mips1", PROCESSOR_R3000, 1 },
655 { "mips2", PROCESSOR_R6000, 2 },
656 { "mips3", PROCESSOR_R4000, 3 },
657 { "mips4", PROCESSOR_R8000, 4 },
658 { "mips32", PROCESSOR_4KC, 32 },
659 { "mips32r2", PROCESSOR_M4K, 33 },
660 { "mips64", PROCESSOR_5KC, 64 },
663 { "r3000", PROCESSOR_R3000, 1 },
664 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
665 { "r3900", PROCESSOR_R3900, 1 },
668 { "r6000", PROCESSOR_R6000, 2 },
671 { "r4000", PROCESSOR_R4000, 3 },
672 { "vr4100", PROCESSOR_R4100, 3 },
673 { "vr4111", PROCESSOR_R4111, 3 },
674 { "vr4120", PROCESSOR_R4120, 3 },
675 { "vr4130", PROCESSOR_R4130, 3 },
676 { "vr4300", PROCESSOR_R4300, 3 },
677 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
678 { "r4600", PROCESSOR_R4600, 3 },
679 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
680 { "r4650", PROCESSOR_R4650, 3 },
683 { "r8000", PROCESSOR_R8000, 4 },
684 { "vr5000", PROCESSOR_R5000, 4 },
685 { "vr5400", PROCESSOR_R5400, 4 },
686 { "vr5500", PROCESSOR_R5500, 4 },
687 { "rm7000", PROCESSOR_R7000, 4 },
688 { "rm9000", PROCESSOR_R9000, 4 },
691 { "4kc", PROCESSOR_4KC, 32 },
692 { "4kp", PROCESSOR_4KC, 32 }, /* = 4kc */
694 /* MIPS32 Release 2 */
695 { "m4k", PROCESSOR_M4K, 33 },
698 { "5kc", PROCESSOR_5KC, 64 },
699 { "20kc", PROCESSOR_20KC, 64 },
700 { "sb1", PROCESSOR_SB1, 64 },
701 { "sr71000", PROCESSOR_SR71000, 64 },
707 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
708 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
709 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
712 /* Initialize the GCC target structure. */
713 #undef TARGET_ASM_ALIGNED_HI_OP
714 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
715 #undef TARGET_ASM_ALIGNED_SI_OP
716 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
717 #undef TARGET_ASM_ALIGNED_DI_OP
718 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
720 #undef TARGET_ASM_FUNCTION_PROLOGUE
721 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
722 #undef TARGET_ASM_FUNCTION_EPILOGUE
723 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
724 #undef TARGET_ASM_SELECT_RTX_SECTION
725 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
726 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
727 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
729 #undef TARGET_SCHED_REORDER
730 #define TARGET_SCHED_REORDER mips_sched_reorder
731 #undef TARGET_SCHED_VARIABLE_ISSUE
732 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
733 #undef TARGET_SCHED_ADJUST_COST
734 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
735 #undef TARGET_SCHED_ISSUE_RATE
736 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
737 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
738 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
739 mips_multipass_dfa_lookahead
741 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
742 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
744 #undef TARGET_VALID_POINTER_MODE
745 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
746 #undef TARGET_RTX_COSTS
747 #define TARGET_RTX_COSTS mips_rtx_costs
748 #undef TARGET_ADDRESS_COST
749 #define TARGET_ADDRESS_COST mips_address_cost
751 #undef TARGET_IN_SMALL_DATA_P
752 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
754 #undef TARGET_MACHINE_DEPENDENT_REORG
755 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
757 #undef TARGET_ASM_FILE_START
758 #undef TARGET_ASM_FILE_END
759 #define TARGET_ASM_FILE_START mips_file_start
760 #define TARGET_ASM_FILE_END mips_file_end
761 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
762 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
764 #undef TARGET_INIT_LIBFUNCS
765 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
767 #undef TARGET_BUILD_BUILTIN_VA_LIST
768 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
769 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
770 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
772 #undef TARGET_PROMOTE_FUNCTION_ARGS
773 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
774 #undef TARGET_PROMOTE_FUNCTION_RETURN
775 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
776 #undef TARGET_PROMOTE_PROTOTYPES
777 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
779 #undef TARGET_RETURN_IN_MEMORY
780 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
781 #undef TARGET_RETURN_IN_MSB
782 #define TARGET_RETURN_IN_MSB mips_return_in_msb
784 #undef TARGET_ASM_OUTPUT_MI_THUNK
785 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
786 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
787 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
789 #undef TARGET_SETUP_INCOMING_VARARGS
790 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
791 #undef TARGET_STRICT_ARGUMENT_NAMING
792 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
793 #undef TARGET_MUST_PASS_IN_STACK
794 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
795 #undef TARGET_PASS_BY_REFERENCE
796 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
797 #undef TARGET_CALLEE_COPIES
798 #define TARGET_CALLEE_COPIES mips_callee_copies
800 #undef TARGET_VECTOR_MODE_SUPPORTED_P
801 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
803 #undef TARGET_INIT_BUILTINS
804 #define TARGET_INIT_BUILTINS mips_init_builtins
805 #undef TARGET_EXPAND_BUILTIN
806 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
808 struct gcc_target targetm = TARGET_INITIALIZER;
810 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
812 static enum mips_symbol_type
813 mips_classify_symbol (rtx x)
815 if (GET_CODE (x) == LABEL_REF)
818 return SYMBOL_CONSTANT_POOL;
820 return SYMBOL_GOT_LOCAL;
821 return SYMBOL_GENERAL;
824 gcc_assert (GET_CODE (x) == SYMBOL_REF);
826 if (CONSTANT_POOL_ADDRESS_P (x))
829 return SYMBOL_CONSTANT_POOL;
832 return SYMBOL_GOT_LOCAL;
834 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
835 return SYMBOL_SMALL_DATA;
837 return SYMBOL_GENERAL;
840 if (SYMBOL_REF_SMALL_P (x))
841 return SYMBOL_SMALL_DATA;
845 if (SYMBOL_REF_DECL (x) == 0)
846 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
848 /* There are three cases to consider:
850 - o32 PIC (either with or without explicit relocs)
851 - n32/n64 PIC without explicit relocs
852 - n32/n64 PIC with explicit relocs
854 In the first case, both local and global accesses will use an
855 R_MIPS_GOT16 relocation. We must correctly predict which of
856 the two semantics (local or global) the assembler and linker
857 will apply. The choice doesn't depend on the symbol's
858 visibility, so we deliberately ignore decl_visibility and
861 In the second case, the assembler will not use R_MIPS_GOT16
862 relocations, but it chooses between local and global accesses
863 in the same way as for o32 PIC.
865 In the third case we have more freedom since both forms of
866 access will work for any kind of symbol. However, there seems
867 little point in doing things differently. */
868 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
869 return SYMBOL_GOT_GLOBAL;
871 return SYMBOL_GOT_LOCAL;
874 return SYMBOL_GENERAL;
878 /* Split X into a base and a constant offset, storing them in *BASE
879 and *OFFSET respectively. */
882 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
886 if (GET_CODE (x) == CONST)
889 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
891 *offset += INTVAL (XEXP (x, 1));
898 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
899 to the same object as SYMBOL. */
902 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
904 if (GET_CODE (symbol) != SYMBOL_REF)
907 if (CONSTANT_POOL_ADDRESS_P (symbol)
909 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
912 if (SYMBOL_REF_DECL (symbol) != 0
914 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
921 /* Return true if X is a symbolic constant that can be calculated in
922 the same way as a bare symbol. If it is, store the type of the
923 symbol in *SYMBOL_TYPE. */
926 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
928 HOST_WIDE_INT offset;
930 mips_split_const (x, &x, &offset);
931 if (UNSPEC_ADDRESS_P (x))
932 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
933 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
934 *symbol_type = mips_classify_symbol (x);
941 /* Check whether a nonzero offset is valid for the underlying
943 switch (*symbol_type)
949 /* If the target has 64-bit pointers and the object file only
950 supports 32-bit symbols, the values of those symbols will be
951 sign-extended. In this case we can't allow an arbitrary offset
952 in case the 32-bit value X + OFFSET has a different sign from X. */
953 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
954 return mips_offset_within_object_p (x, offset);
956 /* In other cases the relocations can handle any offset. */
959 case SYMBOL_CONSTANT_POOL:
960 /* Allow constant pool references to be converted to LABEL+CONSTANT.
961 In this case, we no longer have access to the underlying constant,
962 but the original symbol-based access was known to be valid. */
963 if (GET_CODE (x) == LABEL_REF)
968 case SYMBOL_SMALL_DATA:
969 /* Make sure that the offset refers to something within the
970 underlying object. This should guarantee that the final
971 PC- or GP-relative offset is within the 16-bit limit. */
972 return mips_offset_within_object_p (x, offset);
974 case SYMBOL_GOT_LOCAL:
975 case SYMBOL_GOTOFF_PAGE:
976 /* The linker should provide enough local GOT entries for a
977 16-bit offset. Larger offsets may lead to GOT overflow. */
978 return SMALL_OPERAND (offset);
980 case SYMBOL_GOT_GLOBAL:
981 case SYMBOL_GOTOFF_GLOBAL:
982 case SYMBOL_GOTOFF_CALL:
983 case SYMBOL_GOTOFF_LOADGP:
990 /* Return true if X is a symbolic constant whose value is not split
991 into separate relocations. */
994 mips_atomic_symbolic_constant_p (rtx x)
996 enum mips_symbol_type type;
997 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
1001 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1004 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1006 if (regno >= FIRST_PSEUDO_REGISTER)
1010 regno = reg_renumber[regno];
1013 /* These fake registers will be eliminated to either the stack or
1014 hard frame pointer, both of which are usually valid base registers.
1015 Reload deals with the cases where the eliminated form isn't valid. */
1016 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1019 /* In mips16 mode, the stack pointer can only address word and doubleword
1020 values, nothing smaller. There are two problems here:
1022 (a) Instantiating virtual registers can introduce new uses of the
1023 stack pointer. If these virtual registers are valid addresses,
1024 the stack pointer should be too.
1026 (b) Most uses of the stack pointer are not made explicit until
1027 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1028 We don't know until that stage whether we'll be eliminating to the
1029 stack pointer (which needs the restriction) or the hard frame
1030 pointer (which doesn't).
1032 All in all, it seems more consistent to only enforce this restriction
1033 during and after reload. */
1034 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1035 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1037 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1041 /* Return true if X is a valid base register for the given mode.
1042 Allow only hard registers if STRICT. */
1045 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1047 if (!strict && GET_CODE (x) == SUBREG)
1050 return (GET_CODE (x) == REG
1051 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1055 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1056 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1059 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1060 enum machine_mode mode)
1062 switch (symbol_type)
1064 case SYMBOL_GENERAL:
1065 return !TARGET_MIPS16;
1067 case SYMBOL_SMALL_DATA:
1070 case SYMBOL_CONSTANT_POOL:
1071 /* PC-relative addressing is only available for lw and ld. */
1072 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1074 case SYMBOL_GOT_LOCAL:
1077 case SYMBOL_GOT_GLOBAL:
1078 /* The address will have to be loaded from the GOT first. */
1081 case SYMBOL_GOTOFF_PAGE:
1082 case SYMBOL_GOTOFF_GLOBAL:
1083 case SYMBOL_GOTOFF_CALL:
1084 case SYMBOL_GOTOFF_LOADGP:
1085 case SYMBOL_64_HIGH:
1094 /* Return true if X is a valid address for machine mode MODE. If it is,
1095 fill in INFO appropriately. STRICT is true if we should only accept
1096 hard base registers. */
1099 mips_classify_address (struct mips_address_info *info, rtx x,
1100 enum machine_mode mode, int strict)
1102 switch (GET_CODE (x))
1106 info->type = ADDRESS_REG;
1108 info->offset = const0_rtx;
1109 return mips_valid_base_register_p (info->reg, mode, strict);
1112 info->type = ADDRESS_REG;
1113 info->reg = XEXP (x, 0);
1114 info->offset = XEXP (x, 1);
1115 return (mips_valid_base_register_p (info->reg, mode, strict)
1116 && const_arith_operand (info->offset, VOIDmode));
1119 info->type = ADDRESS_LO_SUM;
1120 info->reg = XEXP (x, 0);
1121 info->offset = XEXP (x, 1);
1122 return (mips_valid_base_register_p (info->reg, mode, strict)
1123 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1124 && mips_symbolic_address_p (info->symbol_type, mode)
1125 && mips_lo_relocs[info->symbol_type] != 0);
1128 /* Small-integer addresses don't occur very often, but they
1129 are legitimate if $0 is a valid base register. */
1130 info->type = ADDRESS_CONST_INT;
1131 return !TARGET_MIPS16 && SMALL_INT (x);
1136 info->type = ADDRESS_SYMBOLIC;
1137 return (mips_symbolic_constant_p (x, &info->symbol_type)
1138 && mips_symbolic_address_p (info->symbol_type, mode)
1139 && !mips_split_p[info->symbol_type]);
1146 /* Return the number of instructions needed to load a symbol of the
1147 given type into a register. If valid in an address, the same number
1148 of instructions are needed for loads and stores. Treat extended
1149 mips16 instructions as two instructions. */
1152 mips_symbol_insns (enum mips_symbol_type type)
1156 case SYMBOL_GENERAL:
1157 /* In mips16 code, general symbols must be fetched from the
1162 /* When using 64-bit symbols, we need 5 preparatory instructions,
1165 lui $at,%highest(symbol)
1166 daddiu $at,$at,%higher(symbol)
1168 daddiu $at,$at,%hi(symbol)
1171 The final address is then $at + %lo(symbol). With 32-bit
1172 symbols we just need a preparatory lui. */
1173 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1175 case SYMBOL_SMALL_DATA:
1178 case SYMBOL_CONSTANT_POOL:
1179 /* This case is for mips16 only. Assume we'll need an
1180 extended instruction. */
1183 case SYMBOL_GOT_LOCAL:
1184 case SYMBOL_GOT_GLOBAL:
1185 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1186 the local/global classification is accurate. See override_options
1189 The worst cases are:
1191 (1) For local symbols when generating o32 or o64 code. The assembler
1197 ...and the final address will be $at + %lo(symbol).
1199 (2) For global symbols when -mxgot. The assembler will use:
1201 lui $at,%got_hi(symbol)
1204 ...and the final address will be $at + %got_lo(symbol). */
1207 case SYMBOL_GOTOFF_PAGE:
1208 case SYMBOL_GOTOFF_GLOBAL:
1209 case SYMBOL_GOTOFF_CALL:
1210 case SYMBOL_GOTOFF_LOADGP:
1211 case SYMBOL_64_HIGH:
1214 /* Check whether the offset is a 16- or 32-bit value. */
1215 return mips_split_p[type] ? 2 : 1;
1220 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1223 mips_stack_address_p (rtx x, enum machine_mode mode)
1225 struct mips_address_info addr;
1227 return (mips_classify_address (&addr, x, mode, false)
1228 && addr.type == ADDRESS_REG
1229 && addr.reg == stack_pointer_rtx);
1232 /* Return true if a value at OFFSET bytes from BASE can be accessed
1233 using an unextended mips16 instruction. MODE is the mode of the
1236 Usually the offset in an unextended instruction is a 5-bit field.
1237 The offset is unsigned and shifted left once for HIs, twice
1238 for SIs, and so on. An exception is SImode accesses off the
1239 stack pointer, which have an 8-bit immediate field. */
1242 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1245 && GET_CODE (offset) == CONST_INT
1246 && INTVAL (offset) >= 0
1247 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1249 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1250 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1251 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1257 /* Return the number of instructions needed to load or store a value
1258 of mode MODE at X. Return 0 if X isn't valid for MODE.
1260 For mips16 code, count extended instructions as two instructions. */
1263 mips_address_insns (rtx x, enum machine_mode mode)
1265 struct mips_address_info addr;
1268 if (mode == BLKmode)
1269 /* BLKmode is used for single unaligned loads and stores. */
1272 /* Each word of a multi-word value will be accessed individually. */
1273 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1275 if (mips_classify_address (&addr, x, mode, false))
1280 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1284 case ADDRESS_LO_SUM:
1285 return (TARGET_MIPS16 ? factor * 2 : factor);
1287 case ADDRESS_CONST_INT:
1290 case ADDRESS_SYMBOLIC:
1291 return factor * mips_symbol_insns (addr.symbol_type);
1297 /* Likewise for constant X. */
1300 mips_const_insns (rtx x)
1302 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1303 enum mips_symbol_type symbol_type;
1304 HOST_WIDE_INT offset;
1306 switch (GET_CODE (x))
1310 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1311 || !mips_split_p[symbol_type])
1318 /* Unsigned 8-bit constants can be loaded using an unextended
1319 LI instruction. Unsigned 16-bit constants can be loaded
1320 using an extended LI. Negative constants must be loaded
1321 using LI and then negated. */
1322 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1323 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1324 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1325 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1328 return mips_build_integer (codes, INTVAL (x));
1332 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1338 /* See if we can refer to X directly. */
1339 if (mips_symbolic_constant_p (x, &symbol_type))
1340 return mips_symbol_insns (symbol_type);
1342 /* Otherwise try splitting the constant into a base and offset.
1343 16-bit offsets can be added using an extra addiu. Larger offsets
1344 must be calculated separately and then added to the base. */
1345 mips_split_const (x, &x, &offset);
1348 int n = mips_const_insns (x);
1351 if (SMALL_OPERAND (offset))
1354 return n + 1 + mips_build_integer (codes, offset);
1361 return mips_symbol_insns (mips_classify_symbol (x));
1369 /* Return the number of instructions needed for memory reference X.
1370 Count extended mips16 instructions as two instructions. */
1373 mips_fetch_insns (rtx x)
1375 gcc_assert (GET_CODE (x) == MEM);
1376 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1380 /* Return the number of instructions needed for an integer division. */
1383 mips_idiv_insns (void)
1388 if (TARGET_CHECK_ZERO_DIV)
1390 if (GENERATE_DIVIDE_TRAPS)
1396 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1401 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1402 returns a nonzero value if X is a legitimate address for a memory
1403 operand of the indicated MODE. STRICT is nonzero if this function
1404 is called during reload. */
1407 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1409 struct mips_address_info addr;
1411 return mips_classify_address (&addr, x, mode, strict);
1415 /* Copy VALUE to a register and return that register. If new psuedos
1416 are allowed, copy it into a new register, otherwise use DEST. */
1419 mips_force_temporary (rtx dest, rtx value)
1421 if (!no_new_pseudos)
1422 return force_reg (Pmode, value);
1425 emit_move_insn (copy_rtx (dest), value);
1431 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1432 and is used to load the high part into a register. */
1435 mips_split_symbol (rtx temp, rtx addr)
1440 high = mips16_gp_pseudo_reg ();
1442 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1443 return gen_rtx_LO_SUM (Pmode, high, addr);
1447 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1448 type SYMBOL_TYPE. */
1451 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1454 HOST_WIDE_INT offset;
1456 mips_split_const (address, &base, &offset);
1457 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1458 UNSPEC_ADDRESS_FIRST + symbol_type);
1459 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1463 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1464 high part to BASE and return the result. Just return BASE otherwise.
1465 TEMP is available as a temporary register if needed.
1467 The returned expression can be used as the first operand to a LO_SUM. */
1470 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1471 enum mips_symbol_type symbol_type)
1473 if (mips_split_p[symbol_type])
1475 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1476 addr = mips_force_temporary (temp, addr);
1477 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1483 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1484 mips_force_temporary; it is only needed when OFFSET is not a
1488 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1490 if (!SMALL_OPERAND (offset))
1495 /* Load the full offset into a register so that we can use
1496 an unextended instruction for the address itself. */
1497 high = GEN_INT (offset);
1502 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1503 high = GEN_INT (CONST_HIGH_PART (offset));
1504 offset = CONST_LOW_PART (offset);
1506 high = mips_force_temporary (temp, high);
1507 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1509 return plus_constant (reg, offset);
1513 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1514 be legitimized in a way that the generic machinery might not expect,
1515 put the new address in *XLOC and return true. MODE is the mode of
1516 the memory being accessed. */
1519 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1521 enum mips_symbol_type symbol_type;
1523 /* See if the address can split into a high part and a LO_SUM. */
1524 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1525 && mips_symbolic_address_p (symbol_type, mode)
1526 && mips_split_p[symbol_type])
1528 *xloc = mips_split_symbol (0, *xloc);
1532 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1534 /* Handle REG + CONSTANT using mips_add_offset. */
1537 reg = XEXP (*xloc, 0);
1538 if (!mips_valid_base_register_p (reg, mode, 0))
1539 reg = copy_to_mode_reg (Pmode, reg);
1540 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1548 /* Subroutine of mips_build_integer (with the same interface).
1549 Assume that the final action in the sequence should be a left shift. */
1552 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1554 unsigned int i, shift;
1556 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1557 since signed numbers are easier to load than unsigned ones. */
1559 while ((value & 1) == 0)
1560 value /= 2, shift++;
1562 i = mips_build_integer (codes, value);
1563 codes[i].code = ASHIFT;
1564 codes[i].value = shift;
1569 /* As for mips_build_shift, but assume that the final action will be
1570 an IOR or PLUS operation. */
1573 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1575 unsigned HOST_WIDE_INT high;
1578 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1579 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1581 /* The constant is too complex to load with a simple lui/ori pair
1582 so our goal is to clear as many trailing zeros as possible.
1583 In this case, we know bit 16 is set and that the low 16 bits
1584 form a negative number. If we subtract that number from VALUE,
1585 we will clear at least the lowest 17 bits, maybe more. */
1586 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1587 codes[i].code = PLUS;
1588 codes[i].value = CONST_LOW_PART (value);
1592 i = mips_build_integer (codes, high);
1593 codes[i].code = IOR;
1594 codes[i].value = value & 0xffff;
1600 /* Fill CODES with a sequence of rtl operations to load VALUE.
1601 Return the number of operations needed. */
1604 mips_build_integer (struct mips_integer_op *codes,
1605 unsigned HOST_WIDE_INT value)
1607 if (SMALL_OPERAND (value)
1608 || SMALL_OPERAND_UNSIGNED (value)
1609 || LUI_OPERAND (value))
1611 /* The value can be loaded with a single instruction. */
1612 codes[0].code = UNKNOWN;
1613 codes[0].value = value;
1616 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1618 /* Either the constant is a simple LUI/ORI combination or its
1619 lowest bit is set. We don't want to shift in this case. */
1620 return mips_build_lower (codes, value);
1622 else if ((value & 0xffff) == 0)
1624 /* The constant will need at least three actions. The lowest
1625 16 bits are clear, so the final action will be a shift. */
1626 return mips_build_shift (codes, value);
1630 /* The final action could be a shift, add or inclusive OR.
1631 Rather than use a complex condition to select the best
1632 approach, try both mips_build_shift and mips_build_lower
1633 and pick the one that gives the shortest sequence.
1634 Note that this case is only used once per constant. */
1635 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1636 unsigned int cost, alt_cost;
1638 cost = mips_build_shift (codes, value);
1639 alt_cost = mips_build_lower (alt_codes, value);
1640 if (alt_cost < cost)
1642 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1650 /* Move VALUE into register DEST. */
1653 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1655 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1656 enum machine_mode mode;
1657 unsigned int i, cost;
1660 mode = GET_MODE (dest);
1661 cost = mips_build_integer (codes, value);
1663 /* Apply each binary operation to X. Invariant: X is a legitimate
1664 source operand for a SET pattern. */
1665 x = GEN_INT (codes[0].value);
1666 for (i = 1; i < cost; i++)
1669 emit_move_insn (dest, x), x = dest;
1671 x = force_reg (mode, x);
1672 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1675 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1679 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1680 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1684 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1687 HOST_WIDE_INT offset;
1688 enum mips_symbol_type symbol_type;
1690 /* Split moves of big integers into smaller pieces. In mips16 code,
1691 it's better to force the constant into memory instead. */
1692 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1694 mips_move_integer (dest, INTVAL (src));
1698 /* See if the symbol can be split. For mips16, this is often worse than
1699 forcing it in the constant pool since it needs the single-register form
1700 of addiu or daddiu. */
1702 && mips_symbolic_constant_p (src, &symbol_type)
1703 && mips_split_p[symbol_type])
1705 emit_move_insn (dest, mips_split_symbol (dest, src));
1709 /* If we have (const (plus symbol offset)), load the symbol first
1710 and then add in the offset. This is usually better than forcing
1711 the constant into memory, at least in non-mips16 code. */
1712 mips_split_const (src, &base, &offset);
1715 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1717 base = mips_force_temporary (dest, base);
1718 emit_move_insn (dest, mips_add_offset (0, base, offset));
1722 src = force_const_mem (mode, src);
1724 /* When using explicit relocs, constant pool references are sometimes
1725 not legitimate addresses. */
1726 if (!memory_operand (src, VOIDmode))
1727 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1728 emit_move_insn (dest, src);
1732 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1733 sequence that is valid. */
1736 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1738 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1740 emit_move_insn (dest, force_reg (mode, src));
1744 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1745 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1746 && REG_P (src) && MD_REG_P (REGNO (src))
1747 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1749 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1750 if (GET_MODE_SIZE (mode) <= 4)
1751 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1752 gen_rtx_REG (SImode, REGNO (src)),
1753 gen_rtx_REG (SImode, other_regno)));
1755 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1756 gen_rtx_REG (DImode, REGNO (src)),
1757 gen_rtx_REG (DImode, other_regno)));
1761 /* We need to deal with constants that would be legitimate
1762 immediate_operands but not legitimate move_operands. */
1763 if (CONSTANT_P (src) && !move_operand (src, mode))
1765 mips_legitimize_const_move (mode, dest, src);
1766 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1772 /* We need a lot of little routines to check constant values on the
1773 mips16. These are used to figure out how long the instruction will
1774 be. It would be much better to do this using constraints, but
1775 there aren't nearly enough letters available. */
1778 m16_check_op (rtx op, int low, int high, int mask)
1780 return (GET_CODE (op) == CONST_INT
1781 && INTVAL (op) >= low
1782 && INTVAL (op) <= high
1783 && (INTVAL (op) & mask) == 0);
1787 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1789 return m16_check_op (op, 0x1, 0x8, 0);
1793 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1795 return m16_check_op (op, - 0x8, 0x7, 0);
1799 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1801 return m16_check_op (op, - 0x7, 0x8, 0);
1805 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1807 return m16_check_op (op, - 0x10, 0xf, 0);
1811 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1813 return m16_check_op (op, - 0xf, 0x10, 0);
1817 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1819 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
1823 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1825 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
1829 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1831 return m16_check_op (op, - 0x80, 0x7f, 0);
1835 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1837 return m16_check_op (op, - 0x7f, 0x80, 0);
1841 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1843 return m16_check_op (op, 0x0, 0xff, 0);
1847 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1849 return m16_check_op (op, - 0xff, 0x0, 0);
1853 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1855 return m16_check_op (op, - 0x1, 0xfe, 0);
1859 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1861 return m16_check_op (op, 0x0, 0xff << 2, 3);
1865 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1867 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
1871 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1873 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
1877 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1879 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
1883 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
1885 enum machine_mode mode = GET_MODE (x);
1892 /* Always return 0, since we don't have different sized
1893 instructions, hence different costs according to Richard
1899 /* A number between 1 and 8 inclusive is efficient for a shift.
1900 Otherwise, we will need an extended instruction. */
1901 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
1902 || (outer_code) == LSHIFTRT)
1904 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
1907 *total = COSTS_N_INSNS (1);
1911 /* We can use cmpi for an xor with an unsigned 16 bit value. */
1912 if ((outer_code) == XOR
1913 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
1919 /* We may be able to use slt or sltu for a comparison with a
1920 signed 16 bit value. (The boundary conditions aren't quite
1921 right, but this is just a heuristic anyhow.) */
1922 if (((outer_code) == LT || (outer_code) == LE
1923 || (outer_code) == GE || (outer_code) == GT
1924 || (outer_code) == LTU || (outer_code) == LEU
1925 || (outer_code) == GEU || (outer_code) == GTU)
1926 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
1932 /* Equality comparisons with 0 are cheap. */
1933 if (((outer_code) == EQ || (outer_code) == NE)
1940 /* Constants in the range 0...255 can be loaded with an unextended
1941 instruction. They are therefore as cheap as a register move.
1943 Given the choice between "li R1,0...255" and "move R1,R2"
1944 (where R2 is a known constant), it is usually better to use "li",
1945 since we do not want to unnecessarily extend the lifetime of R2. */
1946 if (outer_code == SET
1948 && INTVAL (x) < 256)
1954 /* Otherwise fall through to the handling below. */
1960 if (LEGITIMATE_CONSTANT_P (x))
1962 *total = COSTS_N_INSNS (1);
1967 /* The value will need to be fetched from the constant pool. */
1968 *total = CONSTANT_POOL_COST;
1974 /* If the address is legitimate, return the number of
1975 instructions it needs, otherwise use the default handling. */
1976 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
1979 *total = COSTS_N_INSNS (1 + n);
1986 *total = COSTS_N_INSNS (6);
1990 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
1996 if (mode == DImode && !TARGET_64BIT)
1998 *total = COSTS_N_INSNS (2);
2006 if (mode == DImode && !TARGET_64BIT)
2008 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2015 if (mode == SFmode || mode == DFmode)
2016 *total = COSTS_N_INSNS (1);
2018 *total = COSTS_N_INSNS (4);
2022 *total = COSTS_N_INSNS (1);
2027 if (mode == SFmode || mode == DFmode)
2029 if (TUNE_MIPS3000 || TUNE_MIPS3900)
2030 *total = COSTS_N_INSNS (2);
2031 else if (TUNE_MIPS6000)
2032 *total = COSTS_N_INSNS (3);
2034 *total = COSTS_N_INSNS (4);
2036 *total = COSTS_N_INSNS (6);
2039 if (mode == DImode && !TARGET_64BIT)
2041 *total = COSTS_N_INSNS (4);
2047 if (mode == DImode && !TARGET_64BIT)
2061 *total = COSTS_N_INSNS (4);
2062 else if (TUNE_MIPS6000
2065 *total = COSTS_N_INSNS (5);
2067 *total = COSTS_N_INSNS (7);
2074 *total = COSTS_N_INSNS (4);
2075 else if (TUNE_MIPS3000
2078 *total = COSTS_N_INSNS (5);
2079 else if (TUNE_MIPS6000
2082 *total = COSTS_N_INSNS (6);
2084 *total = COSTS_N_INSNS (8);
2089 *total = COSTS_N_INSNS (12);
2090 else if (TUNE_MIPS3900)
2091 *total = COSTS_N_INSNS (2);
2092 else if (TUNE_MIPS4130)
2093 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
2094 else if (TUNE_MIPS5400 || TUNE_SB1)
2095 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
2096 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
2097 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
2098 else if (TUNE_MIPS9000)
2099 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
2100 else if (TUNE_MIPS6000)
2101 *total = COSTS_N_INSNS (17);
2102 else if (TUNE_MIPS5000)
2103 *total = COSTS_N_INSNS (5);
2105 *total = COSTS_N_INSNS (10);
2114 *total = COSTS_N_INSNS (12);
2115 else if (TUNE_MIPS6000)
2116 *total = COSTS_N_INSNS (15);
2118 *total = COSTS_N_INSNS (24);
2119 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2120 *total = COSTS_N_INSNS (30);
2122 *total = COSTS_N_INSNS (23);
2130 *total = COSTS_N_INSNS (19);
2131 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2132 *total = COSTS_N_INSNS (59);
2133 else if (TUNE_MIPS6000)
2134 *total = COSTS_N_INSNS (16);
2136 *total = COSTS_N_INSNS (32);
2138 *total = COSTS_N_INSNS (36);
2147 *total = COSTS_N_INSNS (35);
2148 else if (TUNE_MIPS6000)
2149 *total = COSTS_N_INSNS (38);
2150 else if (TUNE_MIPS5000)
2151 *total = COSTS_N_INSNS (36);
2153 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2154 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2155 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2157 *total = COSTS_N_INSNS (69);
2161 /* A sign extend from SImode to DImode in 64 bit mode is often
2162 zero instructions, because the result can often be used
2163 directly by another instruction; we'll call it one. */
2164 if (TARGET_64BIT && mode == DImode
2165 && GET_MODE (XEXP (x, 0)) == SImode)
2166 *total = COSTS_N_INSNS (1);
2168 *total = COSTS_N_INSNS (2);
2172 if (TARGET_64BIT && mode == DImode
2173 && GET_MODE (XEXP (x, 0)) == SImode)
2174 *total = COSTS_N_INSNS (2);
2176 *total = COSTS_N_INSNS (1);
2184 /* Provide the costs of an addressing mode that contains ADDR.
2185 If ADDR is not a valid address, its cost is irrelevant. */
2188 mips_address_cost (rtx addr)
2190 return mips_address_insns (addr, SImode);
2193 /* Return one word of double-word value OP, taking into account the fixed
2194 endianness of certain registers. HIGH_P is true to select the high part,
2195 false to select the low part. */
2198 mips_subword (rtx op, int high_p)
2201 enum machine_mode mode;
2203 mode = GET_MODE (op);
2204 if (mode == VOIDmode)
2207 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2208 byte = UNITS_PER_WORD;
2212 if (GET_CODE (op) == REG)
2214 if (FP_REG_P (REGNO (op)))
2215 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2216 if (REGNO (op) == HI_REGNUM)
2217 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2220 if (GET_CODE (op) == MEM)
2221 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2223 return simplify_gen_subreg (word_mode, op, mode, byte);
2227 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2230 mips_split_64bit_move_p (rtx dest, rtx src)
2235 /* FP->FP moves can be done in a single instruction. */
2236 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2239 /* Check for floating-point loads and stores. They can be done using
2240 ldc1 and sdc1 on MIPS II and above. */
2243 if (FP_REG_RTX_P (dest) && GET_CODE (src) == MEM)
2245 if (FP_REG_RTX_P (src) && GET_CODE (dest) == MEM)
2252 /* Split a 64-bit move from SRC to DEST assuming that
2253 mips_split_64bit_move_p holds.
2255 Moves into and out of FPRs cause some difficulty here. Such moves
2256 will always be DFmode, since paired FPRs are not allowed to store
2257 DImode values. The most natural representation would be two separate
2258 32-bit moves, such as:
2260 (set (reg:SI $f0) (mem:SI ...))
2261 (set (reg:SI $f1) (mem:SI ...))
2263 However, the second insn is invalid because odd-numbered FPRs are
2264 not allowed to store independent values. Use the patterns load_df_low,
2265 load_df_high and store_df_high instead. */
2268 mips_split_64bit_move (rtx dest, rtx src)
2270 if (FP_REG_RTX_P (dest))
2272 /* Loading an FPR from memory or from GPRs. */
2273 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2274 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2277 else if (FP_REG_RTX_P (src))
2279 /* Storing an FPR into memory or GPRs. */
2280 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2281 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2285 /* The operation can be split into two normal moves. Decide in
2286 which order to do them. */
2289 low_dest = mips_subword (dest, 0);
2290 if (GET_CODE (low_dest) == REG
2291 && reg_overlap_mentioned_p (low_dest, src))
2293 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2294 emit_move_insn (low_dest, mips_subword (src, 0));
2298 emit_move_insn (low_dest, mips_subword (src, 0));
2299 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2304 /* Return the appropriate instructions to move SRC into DEST. Assume
2305 that SRC is operand 1 and DEST is operand 0. */
2308 mips_output_move (rtx dest, rtx src)
2310 enum rtx_code dest_code, src_code;
2313 dest_code = GET_CODE (dest);
2314 src_code = GET_CODE (src);
2315 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2317 if (dbl_p && mips_split_64bit_move_p (dest, src))
2320 if ((src_code == REG && GP_REG_P (REGNO (src)))
2321 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2323 if (dest_code == REG)
2325 if (GP_REG_P (REGNO (dest)))
2326 return "move\t%0,%z1";
2328 if (MD_REG_P (REGNO (dest)))
2331 if (FP_REG_P (REGNO (dest)))
2332 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2334 if (ALL_COP_REG_P (REGNO (dest)))
2336 static char retval[] = "dmtc_\t%z1,%0";
2338 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2339 return (dbl_p ? retval : retval + 1);
2342 if (dest_code == MEM)
2343 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2345 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2347 if (src_code == REG)
2349 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2350 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2352 if (FP_REG_P (REGNO (src)))
2353 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2355 if (ALL_COP_REG_P (REGNO (src)))
2357 static char retval[] = "dmfc_\t%0,%1";
2359 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2360 return (dbl_p ? retval : retval + 1);
2364 if (src_code == MEM)
2365 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2367 if (src_code == CONST_INT)
2369 /* Don't use the X format, because that will give out of
2370 range numbers for 64 bit hosts and 32 bit targets. */
2372 return "li\t%0,%1\t\t\t# %X1";
2374 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2377 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2381 if (src_code == HIGH)
2382 return "lui\t%0,%h1";
2384 if (CONST_GP_P (src))
2385 return "move\t%0,%1";
2387 if (symbolic_operand (src, VOIDmode))
2388 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2390 if (src_code == REG && FP_REG_P (REGNO (src)))
2392 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2394 if (GET_MODE (dest) == V2SFmode)
2395 return "mov.ps\t%0,%1";
2397 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2400 if (dest_code == MEM)
2401 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2403 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2405 if (src_code == MEM)
2406 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2408 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2410 static char retval[] = "l_c_\t%0,%1";
2412 retval[1] = (dbl_p ? 'd' : 'w');
2413 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2416 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2418 static char retval[] = "s_c_\t%1,%0";
2420 retval[1] = (dbl_p ? 'd' : 'w');
2421 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2427 /* Restore $gp from its save slot. Valid only when using o32 or
2431 mips_restore_gp (void)
2435 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2437 address = mips_add_offset (pic_offset_table_rtx,
2438 frame_pointer_needed
2439 ? hard_frame_pointer_rtx
2440 : stack_pointer_rtx,
2441 current_function_outgoing_args_size);
2442 slot = gen_rtx_MEM (Pmode, address);
2444 emit_move_insn (pic_offset_table_rtx, slot);
2445 if (!TARGET_EXPLICIT_RELOCS)
2446 emit_insn (gen_blockage ());
2449 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2452 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2454 emit_insn (gen_rtx_SET (VOIDmode, target,
2455 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2458 /* Return true if CMP1 is a suitable second operand for relational
2459 operator CODE. See also the *sCC patterns in mips.md. */
2462 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2468 return reg_or_0_operand (cmp1, VOIDmode);
2472 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2476 return arith_operand (cmp1, VOIDmode);
2479 return sle_operand (cmp1, VOIDmode);
2482 return sleu_operand (cmp1, VOIDmode);
2489 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2490 result in TARGET. CMP0 and TARGET are register_operands that have
2491 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2492 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2495 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2496 rtx target, rtx cmp0, rtx cmp1)
2498 /* First see if there is a MIPS instruction that can do this operation
2499 with CMP1 in its current form. If not, try doing the same for the
2500 inverse operation. If that also fails, force CMP1 into a register
2502 if (mips_relational_operand_ok_p (code, cmp1))
2503 mips_emit_binary (code, target, cmp0, cmp1);
2506 enum rtx_code inv_code = reverse_condition (code);
2507 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2509 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2510 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2512 else if (invert_ptr == 0)
2514 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2515 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2516 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2520 *invert_ptr = !*invert_ptr;
2521 mips_emit_binary (inv_code, target, cmp0, cmp1);
2526 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2527 The register will have the same mode as CMP0. */
2530 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2532 if (cmp1 == const0_rtx)
2535 if (uns_arith_operand (cmp1, VOIDmode))
2536 return expand_binop (GET_MODE (cmp0), xor_optab,
2537 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2539 return expand_binop (GET_MODE (cmp0), sub_optab,
2540 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2543 /* Convert a comparison into something that can be used in a branch or
2544 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2545 being compared and *CODE is the code used to compare them.
2547 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2548 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2549 otherwise any standard branch condition can be used. The standard branch
2552 - EQ/NE between two registers.
2553 - any comparison between a register and zero. */
2556 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2558 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2560 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2562 *op0 = cmp_operands[0];
2563 *op1 = cmp_operands[1];
2565 else if (*code == EQ || *code == NE)
2569 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2574 *op0 = cmp_operands[0];
2575 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2580 /* The comparison needs a separate scc instruction. Store the
2581 result of the scc in *OP0 and compare it against zero. */
2582 bool invert = false;
2583 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2585 mips_emit_int_relational (*code, &invert, *op0,
2586 cmp_operands[0], cmp_operands[1]);
2587 *code = (invert ? EQ : NE);
2592 enum rtx_code cmp_code;
2594 /* Floating-point tests use a separate c.cond.fmt comparison to
2595 set a condition code register. The branch or conditional move
2596 will then compare that register against zero.
2598 Set CMP_CODE to the code of the comparison instruction and
2599 *CODE to the code that the branch or move should use. */
2607 cmp_code = reverse_condition_maybe_unordered (*code);
2617 ? gen_reg_rtx (CCmode)
2618 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2620 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2624 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2625 Store the result in TARGET and return true if successful.
2627 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2630 mips_emit_scc (enum rtx_code code, rtx target)
2632 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2635 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2636 if (code == EQ || code == NE)
2638 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2639 mips_emit_binary (code, target, zie, const0_rtx);
2642 mips_emit_int_relational (code, 0, target,
2643 cmp_operands[0], cmp_operands[1]);
2647 /* Emit the common code for doing conditional branches.
2648 operand[0] is the label to jump to.
2649 The comparison operands are saved away by cmp{si,di,sf,df}. */
2652 gen_conditional_branch (rtx *operands, enum rtx_code code)
2654 rtx op0, op1, target;
2656 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2657 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2658 gen_rtx_fmt_ee (code, GET_MODE (op0),
2660 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2662 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2665 /* Emit the common code for conditional moves. OPERANDS is the array
2666 of operands passed to the conditional move define_expand. */
2669 gen_conditional_move (rtx *operands)
2674 code = GET_CODE (operands[1]);
2675 mips_emit_compare (&code, &op0, &op1, true);
2676 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2677 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2678 gen_rtx_fmt_ee (code,
2681 operands[2], operands[3])));
2684 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2685 the conditional_trap expander. */
2688 mips_gen_conditional_trap (rtx *operands)
2691 enum rtx_code cmp_code = GET_CODE (operands[0]);
2692 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2694 /* MIPS conditional trap machine instructions don't have GT or LE
2695 flavors, so we must invert the comparison and convert to LT and
2696 GE, respectively. */
2699 case GT: cmp_code = LT; break;
2700 case LE: cmp_code = GE; break;
2701 case GTU: cmp_code = LTU; break;
2702 case LEU: cmp_code = GEU; break;
2705 if (cmp_code == GET_CODE (operands[0]))
2707 op0 = cmp_operands[0];
2708 op1 = cmp_operands[1];
2712 op0 = cmp_operands[1];
2713 op1 = cmp_operands[0];
2715 op0 = force_reg (mode, op0);
2716 if (!arith_operand (op1, mode))
2717 op1 = force_reg (mode, op1);
2719 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2720 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2724 /* Load function address ADDR into register DEST. SIBCALL_P is true
2725 if the address is needed for a sibling call. */
2728 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2730 /* If we're generating PIC, and this call is to a global function,
2731 try to allow its address to be resolved lazily. This isn't
2732 possible for NewABI sibcalls since the value of $gp on entry
2733 to the stub would be our caller's gp, not ours. */
2734 if (TARGET_EXPLICIT_RELOCS
2735 && !(sibcall_p && TARGET_NEWABI)
2736 && global_got_operand (addr, VOIDmode))
2738 rtx high, lo_sum_symbol;
2740 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2741 addr, SYMBOL_GOTOFF_CALL);
2742 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2743 if (Pmode == SImode)
2744 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2746 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2749 emit_move_insn (dest, addr);
2753 /* Expand a call or call_value instruction. RESULT is where the
2754 result will go (null for calls), ADDR is the address of the
2755 function, ARGS_SIZE is the size of the arguments and AUX is
2756 the value passed to us by mips_function_arg. SIBCALL_P is true
2757 if we are expanding a sibling call, false if we're expanding
2761 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2763 rtx orig_addr, pattern, insn;
2766 if (!call_insn_operand (addr, VOIDmode))
2768 addr = gen_reg_rtx (Pmode);
2769 mips_load_call_address (addr, orig_addr, sibcall_p);
2773 && mips16_hard_float
2774 && build_mips16_call_stub (result, addr, args_size,
2775 aux == 0 ? 0 : (int) GET_MODE (aux)))
2779 pattern = (sibcall_p
2780 ? gen_sibcall_internal (addr, args_size)
2781 : gen_call_internal (addr, args_size));
2782 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2786 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2787 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2790 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2791 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2794 pattern = (sibcall_p
2795 ? gen_sibcall_value_internal (result, addr, args_size)
2796 : gen_call_value_internal (result, addr, args_size));
2798 insn = emit_call_insn (pattern);
2800 /* Lazy-binding stubs require $gp to be valid on entry. */
2801 if (global_got_operand (orig_addr, VOIDmode))
2802 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2806 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
2809 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
2810 tree exp ATTRIBUTE_UNUSED)
2812 return TARGET_SIBCALLS;
2815 /* Emit code to move general operand SRC into condition-code
2816 register DEST. SCRATCH is a scratch TFmode float register.
2823 where FP1 and FP2 are single-precision float registers
2824 taken from SCRATCH. */
2827 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
2831 /* Change the source to SFmode. */
2832 if (GET_CODE (src) == MEM)
2833 src = adjust_address (src, SFmode, 0);
2834 else if (GET_CODE (src) == REG || GET_CODE (src) == SUBREG)
2835 src = gen_rtx_REG (SFmode, true_regnum (src));
2837 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
2838 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
2840 emit_move_insn (copy_rtx (fp1), src);
2841 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
2842 emit_insn (gen_slt_sf (dest, fp2, fp1));
2845 /* Emit code to change the current function's return address to
2846 ADDRESS. SCRATCH is available as a scratch register, if needed.
2847 ADDRESS and SCRATCH are both word-mode GPRs. */
2850 mips_set_return_address (rtx address, rtx scratch)
2854 compute_frame_size (get_frame_size ());
2855 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
2856 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
2857 cfun->machine->frame.gp_sp_offset);
2859 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
2862 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2863 Assume that the areas do not overlap. */
2866 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2868 HOST_WIDE_INT offset, delta;
2869 unsigned HOST_WIDE_INT bits;
2871 enum machine_mode mode;
2874 /* Work out how many bits to move at a time. If both operands have
2875 half-word alignment, it is usually better to move in half words.
2876 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
2877 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
2878 Otherwise move word-sized chunks. */
2879 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
2880 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
2881 bits = BITS_PER_WORD / 2;
2883 bits = BITS_PER_WORD;
2885 mode = mode_for_size (bits, MODE_INT, 0);
2886 delta = bits / BITS_PER_UNIT;
2888 /* Allocate a buffer for the temporary registers. */
2889 regs = alloca (sizeof (rtx) * length / delta);
2891 /* Load as many BITS-sized chunks as possible. Use a normal load if
2892 the source has enough alignment, otherwise use left/right pairs. */
2893 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2895 regs[i] = gen_reg_rtx (mode);
2896 if (MEM_ALIGN (src) >= bits)
2897 emit_move_insn (regs[i], adjust_address (src, mode, offset));
2900 rtx part = adjust_address (src, BLKmode, offset);
2901 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
2906 /* Copy the chunks to the destination. */
2907 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2908 if (MEM_ALIGN (dest) >= bits)
2909 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
2912 rtx part = adjust_address (dest, BLKmode, offset);
2913 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
2917 /* Mop up any left-over bytes. */
2918 if (offset < length)
2920 src = adjust_address (src, BLKmode, offset);
2921 dest = adjust_address (dest, BLKmode, offset);
2922 move_by_pieces (dest, src, length - offset,
2923 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2927 #define MAX_MOVE_REGS 4
2928 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
2931 /* Helper function for doing a loop-based block operation on memory
2932 reference MEM. Each iteration of the loop will operate on LENGTH
2935 Create a new base register for use within the loop and point it to
2936 the start of MEM. Create a new memory reference that uses this
2937 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2940 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2941 rtx *loop_reg, rtx *loop_mem)
2943 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2945 /* Although the new mem does not refer to a known location,
2946 it does keep up to LENGTH bytes of alignment. */
2947 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2948 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2952 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
2953 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
2954 memory regions do not overlap. */
2957 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
2959 rtx label, src_reg, dest_reg, final_src;
2960 HOST_WIDE_INT leftover;
2962 leftover = length % MAX_MOVE_BYTES;
2965 /* Create registers and memory references for use within the loop. */
2966 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
2967 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
2969 /* Calculate the value that SRC_REG should have after the last iteration
2971 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2974 /* Emit the start of the loop. */
2975 label = gen_label_rtx ();
2978 /* Emit the loop body. */
2979 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
2981 /* Move on to the next block. */
2982 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
2983 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
2985 /* Emit the loop condition. */
2986 if (Pmode == DImode)
2987 emit_insn (gen_cmpdi (src_reg, final_src));
2989 emit_insn (gen_cmpsi (src_reg, final_src));
2990 emit_jump_insn (gen_bne (label));
2992 /* Mop up any left-over bytes. */
2994 mips_block_move_straight (dest, src, leftover);
2997 /* Expand a movmemsi instruction. */
3000 mips_expand_block_move (rtx dest, rtx src, rtx length)
3002 if (GET_CODE (length) == CONST_INT)
3004 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3006 mips_block_move_straight (dest, src, INTVAL (length));
3011 mips_block_move_loop (dest, src, INTVAL (length));
3018 /* Argument support functions. */
3020 /* Initialize CUMULATIVE_ARGS for a function. */
3023 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3024 rtx libname ATTRIBUTE_UNUSED)
3026 static CUMULATIVE_ARGS zero_cum;
3027 tree param, next_param;
3030 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3032 /* Determine if this function has variable arguments. This is
3033 indicated by the last argument being 'void_type_mode' if there
3034 are no variable arguments. The standard MIPS calling sequence
3035 passes all arguments in the general purpose registers in this case. */
3037 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3038 param != 0; param = next_param)
3040 next_param = TREE_CHAIN (param);
3041 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3042 cum->gp_reg_found = 1;
3047 /* Fill INFO with information about a single argument. CUM is the
3048 cumulative state for earlier arguments. MODE is the mode of this
3049 argument and TYPE is its type (if known). NAMED is true if this
3050 is a named (fixed) argument rather than a variable one. */
3053 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3054 tree type, int named, struct mips_arg_info *info)
3056 bool doubleword_aligned_p;
3057 unsigned int num_bytes, num_words, max_regs;
3059 /* Work out the size of the argument. */
3060 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3061 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3063 /* Decide whether it should go in a floating-point register, assuming
3064 one is free. Later code checks for availability.
3066 The checks against UNITS_PER_FPVALUE handle the soft-float and
3067 single-float cases. */
3071 /* The EABI conventions have traditionally been defined in terms
3072 of TYPE_MODE, regardless of the actual type. */
3073 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3074 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3075 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3080 /* Only leading floating-point scalars are passed in
3081 floating-point registers. We also handle vector floats the same
3082 say, which is OK because they are not covered by the standard ABI. */
3083 info->fpr_p = (!cum->gp_reg_found
3084 && cum->arg_number < 2
3085 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3086 || VECTOR_FLOAT_TYPE_P (type))
3087 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3088 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3089 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3094 /* Scalar and complex floating-point types are passed in
3095 floating-point registers. */
3096 info->fpr_p = (named
3097 && (type == 0 || FLOAT_TYPE_P (type))
3098 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3099 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3100 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3101 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3103 /* ??? According to the ABI documentation, the real and imaginary
3104 parts of complex floats should be passed in individual registers.
3105 The real and imaginary parts of stack arguments are supposed
3106 to be contiguous and there should be an extra word of padding
3109 This has two problems. First, it makes it impossible to use a
3110 single "void *" va_list type, since register and stack arguments
3111 are passed differently. (At the time of writing, MIPSpro cannot
3112 handle complex float varargs correctly.) Second, it's unclear
3113 what should happen when there is only one register free.
3115 For now, we assume that named complex floats should go into FPRs
3116 if there are two FPRs free, otherwise they should be passed in the
3117 same way as a struct containing two floats. */
3119 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3120 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3122 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3123 info->fpr_p = false;
3133 /* See whether the argument has doubleword alignment. */
3134 doubleword_aligned_p = (type
3135 ? TYPE_ALIGN (type) > BITS_PER_WORD
3136 : GET_MODE_UNIT_SIZE (mode) > UNITS_PER_WORD);
3138 /* Set REG_OFFSET to the register count we're interested in.
3139 The EABI allocates the floating-point registers separately,
3140 but the other ABIs allocate them like integer registers. */
3141 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3145 /* Advance to an even register if the argument is doubleword-aligned. */
3146 if (doubleword_aligned_p)
3147 info->reg_offset += info->reg_offset & 1;
3149 /* Work out the offset of a stack argument. */
3150 info->stack_offset = cum->stack_words;
3151 if (doubleword_aligned_p)
3152 info->stack_offset += info->stack_offset & 1;
3154 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3156 /* Partition the argument between registers and stack. */
3157 info->reg_words = MIN (num_words, max_regs);
3158 info->stack_words = num_words - info->reg_words;
3162 /* Implement FUNCTION_ARG_ADVANCE. */
3165 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3166 tree type, int named)
3168 struct mips_arg_info info;
3170 mips_arg_info (cum, mode, type, named, &info);
3173 cum->gp_reg_found = true;
3175 /* See the comment above the cumulative args structure in mips.h
3176 for an explanation of what this code does. It assumes the O32
3177 ABI, which passes at most 2 arguments in float registers. */
3178 if (cum->arg_number < 2 && info.fpr_p)
3179 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3181 if (mips_abi != ABI_EABI || !info.fpr_p)
3182 cum->num_gprs = info.reg_offset + info.reg_words;
3183 else if (info.reg_words > 0)
3184 cum->num_fprs += FP_INC;
3186 if (info.stack_words > 0)
3187 cum->stack_words = info.stack_offset + info.stack_words;
3192 /* Implement FUNCTION_ARG. */
3195 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3196 tree type, int named)
3198 struct mips_arg_info info;
3200 /* We will be called with a mode of VOIDmode after the last argument
3201 has been seen. Whatever we return will be passed to the call
3202 insn. If we need a mips16 fp_code, return a REG with the code
3203 stored as the mode. */
3204 if (mode == VOIDmode)
3206 if (TARGET_MIPS16 && cum->fp_code != 0)
3207 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3213 mips_arg_info (cum, mode, type, named, &info);
3215 /* Return straight away if the whole argument is passed on the stack. */
3216 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3220 && TREE_CODE (type) == RECORD_TYPE
3222 && TYPE_SIZE_UNIT (type)
3223 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3226 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3227 structure contains a double in its entirety, then that 64 bit
3228 chunk is passed in a floating point register. */
3231 /* First check to see if there is any such field. */
3232 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3233 if (TREE_CODE (field) == FIELD_DECL
3234 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3235 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3236 && host_integerp (bit_position (field), 0)
3237 && int_bit_position (field) % BITS_PER_WORD == 0)
3242 /* Now handle the special case by returning a PARALLEL
3243 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3244 chunks are passed in registers. */
3246 HOST_WIDE_INT bitpos;
3249 /* assign_parms checks the mode of ENTRY_PARM, so we must
3250 use the actual mode here. */
3251 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3254 field = TYPE_FIELDS (type);
3255 for (i = 0; i < info.reg_words; i++)
3259 for (; field; field = TREE_CHAIN (field))
3260 if (TREE_CODE (field) == FIELD_DECL
3261 && int_bit_position (field) >= bitpos)
3265 && int_bit_position (field) == bitpos
3266 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3267 && !TARGET_SOFT_FLOAT
3268 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3269 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3271 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3274 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3275 GEN_INT (bitpos / BITS_PER_UNIT));
3277 bitpos += BITS_PER_WORD;
3283 /* Handle the n32/n64 conventions for passing complex floating-point
3284 arguments in FPR pairs. The real part goes in the lower register
3285 and the imaginary part goes in the upper register. */
3288 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3291 enum machine_mode inner;
3294 inner = GET_MODE_INNER (mode);
3295 reg = FP_ARG_FIRST + info.reg_offset;
3296 real = gen_rtx_EXPR_LIST (VOIDmode,
3297 gen_rtx_REG (inner, reg),
3299 imag = gen_rtx_EXPR_LIST (VOIDmode,
3300 gen_rtx_REG (inner, reg + info.reg_words / 2),
3301 GEN_INT (GET_MODE_SIZE (inner)));
3302 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3306 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3307 else if (info.reg_offset == 1)
3308 /* This code handles the special o32 case in which the second word
3309 of the argument structure is passed in floating-point registers. */
3310 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3312 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3316 /* Implement FUNCTION_ARG_PARTIAL_NREGS. */
3319 function_arg_partial_nregs (const CUMULATIVE_ARGS *cum,
3320 enum machine_mode mode, tree type, int named)
3322 struct mips_arg_info info;
3324 mips_arg_info (cum, mode, type, named, &info);
3325 return info.stack_words > 0 ? info.reg_words : 0;
3329 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3330 upward rather than downward. In other words, return true if the
3331 first byte of the stack slot has useful data, false if the last
3335 mips_pad_arg_upward (enum machine_mode mode, tree type)
3337 /* On little-endian targets, the first byte of every stack argument
3338 is passed in the first byte of the stack slot. */
3339 if (!BYTES_BIG_ENDIAN)
3342 /* Otherwise, integral types are padded downward: the last byte of a
3343 stack argument is passed in the last byte of the stack slot. */
3345 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3346 : GET_MODE_CLASS (mode) == MODE_INT)
3349 /* Big-endian o64 pads floating-point arguments downward. */
3350 if (mips_abi == ABI_O64)
3351 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3354 /* Other types are padded upward for o32, o64, n32 and n64. */
3355 if (mips_abi != ABI_EABI)
3358 /* Arguments smaller than a stack slot are padded downward. */
3359 if (mode != BLKmode)
3360 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3362 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3366 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3367 if the least significant byte of the register has useful data. Return
3368 the opposite if the most significant byte does. */
3371 mips_pad_reg_upward (enum machine_mode mode, tree type)
3373 /* No shifting is required for floating-point arguments. */
3374 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3375 return !BYTES_BIG_ENDIAN;
3377 /* Otherwise, apply the same padding to register arguments as we do
3378 to stack arguments. */
3379 return mips_pad_arg_upward (mode, type);
3383 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3384 tree type, int *pretend_size, int no_rtl)
3386 CUMULATIVE_ARGS local_cum;
3387 int gp_saved, fp_saved;
3389 /* The caller has advanced CUM up to, but not beyond, the last named
3390 argument. Advance a local copy of CUM past the last "real" named
3391 argument, to find out how many registers are left over. */
3394 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3396 /* Found out how many registers we need to save. */
3397 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3398 fp_saved = (EABI_FLOAT_VARARGS_P
3399 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3408 ptr = virtual_incoming_args_rtx;
3413 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3417 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3420 mem = gen_rtx_MEM (BLKmode, ptr);
3421 set_mem_alias_set (mem, get_varargs_alias_set ());
3423 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3428 /* We can't use move_block_from_reg, because it will use
3430 enum machine_mode mode;
3433 /* Set OFF to the offset from virtual_incoming_args_rtx of
3434 the first float register. The FP save area lies below
3435 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3436 off = -gp_saved * UNITS_PER_WORD;
3437 off &= ~(UNITS_PER_FPVALUE - 1);
3438 off -= fp_saved * UNITS_PER_FPREG;
3440 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3442 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3446 ptr = plus_constant (virtual_incoming_args_rtx, off);
3447 mem = gen_rtx_MEM (mode, ptr);
3448 set_mem_alias_set (mem, get_varargs_alias_set ());
3449 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3450 off += UNITS_PER_HWFPVALUE;
3456 /* No need for pretend arguments: the register parameter area was
3457 allocated by the caller. */
3461 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3464 /* Create the va_list data type.
3465 We keep 3 pointers, and two offsets.
3466 Two pointers are to the overflow area, which starts at the CFA.
3467 One of these is constant, for addressing into the GPR save area below it.
3468 The other is advanced up the stack through the overflow region.
3469 The third pointer is to the GPR save area. Since the FPR save area
3470 is just below it, we can address FPR slots off this pointer.
3471 We also keep two one-byte offsets, which are to be subtracted from the
3472 constant pointers to yield addresses in the GPR and FPR save areas.
3473 These are downcounted as float or non-float arguments are used,
3474 and when they get to zero, the argument must be obtained from the
3476 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3477 pointer is enough. It's started at the GPR save area, and is
3479 Note that the GPR save area is not constant size, due to optimization
3480 in the prologue. Hence, we can't use a design with two pointers
3481 and two offsets, although we could have designed this with two pointers
3482 and three offsets. */
3485 mips_build_builtin_va_list (void)
3487 if (EABI_FLOAT_VARARGS_P)
3489 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3492 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3494 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3496 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3498 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3500 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3501 unsigned_char_type_node);
3502 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3503 unsigned_char_type_node);
3504 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3505 warn on every user file. */
3506 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
3507 array = build_array_type (unsigned_char_type_node,
3508 build_index_type (index));
3509 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3511 DECL_FIELD_CONTEXT (f_ovfl) = record;
3512 DECL_FIELD_CONTEXT (f_gtop) = record;
3513 DECL_FIELD_CONTEXT (f_ftop) = record;
3514 DECL_FIELD_CONTEXT (f_goff) = record;
3515 DECL_FIELD_CONTEXT (f_foff) = record;
3516 DECL_FIELD_CONTEXT (f_res) = record;
3518 TYPE_FIELDS (record) = f_ovfl;
3519 TREE_CHAIN (f_ovfl) = f_gtop;
3520 TREE_CHAIN (f_gtop) = f_ftop;
3521 TREE_CHAIN (f_ftop) = f_goff;
3522 TREE_CHAIN (f_goff) = f_foff;
3523 TREE_CHAIN (f_foff) = f_res;
3525 layout_type (record);
3528 else if (TARGET_IRIX && TARGET_IRIX6)
3529 /* On IRIX 6, this type is 'char *'. */
3530 return build_pointer_type (char_type_node);
3532 /* Otherwise, we use 'void *'. */
3533 return ptr_type_node;
3536 /* Implement va_start. */
3539 mips_va_start (tree valist, rtx nextarg)
3541 const CUMULATIVE_ARGS *cum = ¤t_function_args_info;
3543 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3544 since the stack is aligned for a pair of argument-passing slots,
3545 and the beginning of a variable argument list may be an odd slot,
3546 we have to decrease its alignment. */
3547 if (cfun && cfun->emit->regno_pointer_align)
3548 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3549 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3550 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3552 if (mips_abi == ABI_EABI)
3554 int gpr_save_area_size;
3557 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3559 if (EABI_FLOAT_VARARGS_P)
3561 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3562 tree ovfl, gtop, ftop, goff, foff;
3565 int fpr_save_area_size;
3567 f_ovfl = TYPE_FIELDS (va_list_type_node);
3568 f_gtop = TREE_CHAIN (f_ovfl);
3569 f_ftop = TREE_CHAIN (f_gtop);
3570 f_goff = TREE_CHAIN (f_ftop);
3571 f_foff = TREE_CHAIN (f_goff);
3573 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3575 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3577 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3579 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3581 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3584 /* Emit code to initialize OVFL, which points to the next varargs
3585 stack argument. CUM->STACK_WORDS gives the number of stack
3586 words used by named arguments. */
3587 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3588 if (cum->stack_words > 0)
3589 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3590 build_int_cst (NULL_TREE,
3591 cum->stack_words * UNITS_PER_WORD));
3592 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3593 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3595 /* Emit code to initialize GTOP, the top of the GPR save area. */
3596 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3597 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3598 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3600 /* Emit code to initialize FTOP, the top of the FPR save area.
3601 This address is gpr_save_area_bytes below GTOP, rounded
3602 down to the next fp-aligned boundary. */
3603 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3604 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3605 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3607 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3608 build_int_cst (NULL_TREE, -fpr_offset));
3609 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3610 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3612 /* Emit code to initialize GOFF, the offset from GTOP of the
3613 next GPR argument. */
3614 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3615 build_int_cst (NULL_TREE, gpr_save_area_size));
3616 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3618 /* Likewise emit code to initialize FOFF, the offset from FTOP
3619 of the next FPR argument. */
3621 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3622 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3623 build_int_cst (NULL_TREE, fpr_save_area_size));
3624 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3628 /* Everything is in the GPR save area, or in the overflow
3629 area which is contiguous with it. */
3630 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3631 std_expand_builtin_va_start (valist, nextarg);
3635 std_expand_builtin_va_start (valist, nextarg);
3638 /* Implement va_arg. */
3641 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3643 HOST_WIDE_INT size, rsize;
3647 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3650 type = build_pointer_type (type);
3652 size = int_size_in_bytes (type);
3653 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3655 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3656 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3659 /* Not a simple merged stack. */
3661 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3662 tree ovfl, top, off, align;
3663 HOST_WIDE_INT osize;
3666 f_ovfl = TYPE_FIELDS (va_list_type_node);
3667 f_gtop = TREE_CHAIN (f_ovfl);
3668 f_ftop = TREE_CHAIN (f_gtop);
3669 f_goff = TREE_CHAIN (f_ftop);
3670 f_foff = TREE_CHAIN (f_goff);
3672 /* We maintain separate pointers and offsets for floating-point
3673 and integer arguments, but we need similar code in both cases.
3676 TOP be the top of the register save area;
3677 OFF be the offset from TOP of the next register;
3678 ADDR_RTX be the address of the argument;
3679 RSIZE be the number of bytes used to store the argument
3680 when it's in the register save area;
3681 OSIZE be the number of bytes used to store it when it's
3682 in the stack overflow area; and
3683 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3685 The code we want is:
3687 1: off &= -rsize; // round down
3690 4: addr_rtx = top - off;
3695 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3696 10: addr_rtx = ovfl + PADDING;
3700 [1] and [9] can sometimes be optimized away. */
3702 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3705 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3706 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3708 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3710 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3713 /* When floating-point registers are saved to the stack,
3714 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3715 of the float's precision. */
3716 rsize = UNITS_PER_HWFPVALUE;
3718 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3719 (= PARM_BOUNDARY bits). This can be different from RSIZE
3722 (1) On 32-bit targets when TYPE is a structure such as:
3724 struct s { float f; };
3726 Such structures are passed in paired FPRs, so RSIZE
3727 will be 8 bytes. However, the structure only takes
3728 up 4 bytes of memory, so OSIZE will only be 4.
3730 (2) In combinations such as -mgp64 -msingle-float
3731 -fshort-double. Doubles passed in registers
3732 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3733 but those passed on the stack take up
3734 UNITS_PER_WORD bytes. */
3735 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3739 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3741 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3743 if (rsize > UNITS_PER_WORD)
3745 /* [1] Emit code for: off &= -rsize. */
3746 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3747 build_int_cst (NULL_TREE, -rsize));
3748 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3749 gimplify_and_add (t, pre_p);
3754 /* [2] Emit code to branch if off == 0. */
3755 t = lang_hooks.truthvalue_conversion (off);
3756 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3758 /* [5] Emit code for: off -= rsize. We do this as a form of
3759 post-increment not available to C. Also widen for the
3760 coming pointer arithmetic. */
3761 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
3762 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3763 t = fold_convert (sizetype, t);
3764 t = fold_convert (TREE_TYPE (top), t);
3766 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3767 the argument has RSIZE - SIZE bytes of leading padding. */
3768 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3769 if (BYTES_BIG_ENDIAN && rsize > size)
3771 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3773 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3775 COND_EXPR_THEN (addr) = t;
3777 if (osize > UNITS_PER_WORD)
3779 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
3780 u = fold_convert (TREE_TYPE (ovfl),
3781 build_int_cst (NULL_TREE, osize - 1));
3782 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
3783 u = fold_convert (TREE_TYPE (ovfl),
3784 build_int_cst (NULL_TREE, -osize));
3785 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
3786 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3791 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
3792 post-increment ovfl by osize. On big-endian machines,
3793 the argument has OSIZE - SIZE bytes of leading padding. */
3794 u = fold_convert (TREE_TYPE (ovfl),
3795 build_int_cst (NULL_TREE, osize));
3796 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
3797 if (BYTES_BIG_ENDIAN && osize > size)
3799 u = fold_convert (TREE_TYPE (t),
3800 build_int_cst (NULL_TREE, osize - size));
3801 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3804 /* String [9] and [10,11] together. */
3806 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
3807 COND_EXPR_ELSE (addr) = t;
3809 addr = fold_convert (build_pointer_type (type), addr);
3810 addr = build_fold_indirect_ref (addr);
3814 addr = build_fold_indirect_ref (addr);
3819 /* Return true if it is possible to use left/right accesses for a
3820 bitfield of WIDTH bits starting BITPOS bits into *OP. When
3821 returning true, update *OP, *LEFT and *RIGHT as follows:
3823 *OP is a BLKmode reference to the whole field.
3825 *LEFT is a QImode reference to the first byte if big endian or
3826 the last byte if little endian. This address can be used in the
3827 left-side instructions (lwl, swl, ldl, sdl).
3829 *RIGHT is a QImode reference to the opposite end of the field and
3830 can be used in the parterning right-side instruction. */
3833 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
3834 rtx *left, rtx *right)
3838 /* Check that the operand really is a MEM. Not all the extv and
3839 extzv predicates are checked. */
3840 if (GET_CODE (*op) != MEM)
3843 /* Check that the size is valid. */
3844 if (width != 32 && (!TARGET_64BIT || width != 64))
3847 /* We can only access byte-aligned values. Since we are always passed
3848 a reference to the first byte of the field, it is not necessary to
3849 do anything with BITPOS after this check. */
3850 if (bitpos % BITS_PER_UNIT != 0)
3853 /* Reject aligned bitfields: we want to use a normal load or store
3854 instead of a left/right pair. */
3855 if (MEM_ALIGN (*op) >= width)
3858 /* Adjust *OP to refer to the whole field. This also has the effect
3859 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
3860 *op = adjust_address (*op, BLKmode, 0);
3861 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
3863 /* Get references to both ends of the field. We deliberately don't
3864 use the original QImode *OP for FIRST since the new BLKmode one
3865 might have a simpler address. */
3866 first = adjust_address (*op, QImode, 0);
3867 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
3869 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
3870 be the upper word and RIGHT the lower word. */
3871 if (TARGET_BIG_ENDIAN)
3872 *left = first, *right = last;
3874 *left = last, *right = first;
3880 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
3881 Return true on success. We only handle cases where zero_extract is
3882 equivalent to sign_extract. */
3885 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
3887 rtx left, right, temp;
3889 /* If TARGET_64BIT, the destination of a 32-bit load will be a
3890 paradoxical word_mode subreg. This is the only case in which
3891 we allow the destination to be larger than the source. */
3892 if (GET_CODE (dest) == SUBREG
3893 && GET_MODE (dest) == DImode
3894 && SUBREG_BYTE (dest) == 0
3895 && GET_MODE (SUBREG_REG (dest)) == SImode)
3896 dest = SUBREG_REG (dest);
3898 /* After the above adjustment, the destination must be the same
3899 width as the source. */
3900 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
3903 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
3906 temp = gen_reg_rtx (GET_MODE (dest));
3907 if (GET_MODE (dest) == DImode)
3909 emit_insn (gen_mov_ldl (temp, src, left));
3910 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
3914 emit_insn (gen_mov_lwl (temp, src, left));
3915 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
3921 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
3925 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
3929 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
3932 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
3934 if (GET_MODE (src) == DImode)
3936 emit_insn (gen_mov_sdl (dest, src, left));
3937 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
3941 emit_insn (gen_mov_swl (dest, src, left));
3942 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
3947 /* Set up globals to generate code for the ISA or processor
3948 described by INFO. */
3951 mips_set_architecture (const struct mips_cpu_info *info)
3955 mips_arch_info = info;
3956 mips_arch = info->cpu;
3957 mips_isa = info->isa;
3962 /* Likewise for tuning. */
3965 mips_set_tune (const struct mips_cpu_info *info)
3969 mips_tune_info = info;
3970 mips_tune = info->cpu;
3975 /* Set up the threshold for data to go into the small data area, instead
3976 of the normal data area, and detect any conflicts in the switches. */
3979 override_options (void)
3981 int i, start, regno;
3982 enum machine_mode mode;
3984 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
3986 /* Interpret -mabi. */
3987 mips_abi = MIPS_ABI_DEFAULT;
3988 if (mips_abi_string != 0)
3990 if (strcmp (mips_abi_string, "32") == 0)
3992 else if (strcmp (mips_abi_string, "o64") == 0)
3994 else if (strcmp (mips_abi_string, "n32") == 0)
3996 else if (strcmp (mips_abi_string, "64") == 0)
3998 else if (strcmp (mips_abi_string, "eabi") == 0)
3999 mips_abi = ABI_EABI;
4001 fatal_error ("bad value (%s) for -mabi= switch", mips_abi_string);
4004 /* The following code determines the architecture and register size.
4005 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4006 The GAS and GCC code should be kept in sync as much as possible. */
4008 if (mips_arch_string != 0)
4009 mips_set_architecture (mips_parse_cpu ("-march", mips_arch_string));
4011 if (mips_isa_string != 0)
4013 /* Handle -mipsN. */
4014 char *whole_isa_str = concat ("mips", mips_isa_string, NULL);
4015 const struct mips_cpu_info *isa_info;
4017 isa_info = mips_parse_cpu ("-mips option", whole_isa_str);
4018 free (whole_isa_str);
4020 /* -march takes precedence over -mipsN, since it is more descriptive.
4021 There's no harm in specifying both as long as the ISA levels
4023 if (mips_arch_info != 0 && mips_isa != isa_info->isa)
4024 error ("-mips%s conflicts with the other architecture options, "
4025 "which specify a MIPS%d processor",
4026 mips_isa_string, mips_isa);
4028 /* Set architecture based on the given option. */
4029 mips_set_architecture (isa_info);
4032 if (mips_arch_info == 0)
4034 #ifdef MIPS_CPU_STRING_DEFAULT
4035 mips_set_architecture (mips_parse_cpu ("default CPU",
4036 MIPS_CPU_STRING_DEFAULT));
4038 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4042 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4043 error ("-march=%s is not compatible with the selected ABI",
4044 mips_arch_info->name);
4046 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4047 if (mips_tune_string != 0)
4048 mips_set_tune (mips_parse_cpu ("-mtune", mips_tune_string));
4050 if (mips_tune_info == 0)
4051 mips_set_tune (mips_arch_info);
4053 if ((target_flags_explicit & MASK_64BIT) != 0)
4055 /* The user specified the size of the integer registers. Make sure
4056 it agrees with the ABI and ISA. */
4057 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4058 error ("-mgp64 used with a 32-bit processor");
4059 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4060 error ("-mgp32 used with a 64-bit ABI");
4061 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4062 error ("-mgp64 used with a 32-bit ABI");
4066 /* Infer the integer register size from the ABI and processor.
4067 Restrict ourselves to 32-bit registers if that's all the
4068 processor has, or if the ABI cannot handle 64-bit registers. */
4069 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4070 target_flags &= ~MASK_64BIT;
4072 target_flags |= MASK_64BIT;
4075 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4077 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4078 only one right answer here. */
4079 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4080 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4081 else if (!TARGET_64BIT && TARGET_FLOAT64)
4082 error ("unsupported combination: %s", "-mgp32 -mfp64");
4083 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4084 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4088 /* -msingle-float selects 32-bit float registers. Otherwise the
4089 float registers should be the same size as the integer ones. */
4090 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4091 target_flags |= MASK_FLOAT64;
4093 target_flags &= ~MASK_FLOAT64;
4096 /* End of code shared with GAS. */
4098 if ((target_flags_explicit & MASK_LONG64) == 0)
4100 /* If no type size setting options (-mlong64,-mint64,-mlong32)
4101 were used, then set the type sizes. In the EABI in 64 bit mode,
4102 longs and pointers are 64 bits. Likewise for the SGI Irix6 N64
4104 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4105 target_flags |= MASK_LONG64;
4107 target_flags &= ~MASK_LONG64;
4110 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4111 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4113 /* For some configurations, it is useful to have -march control
4114 the default setting of MASK_SOFT_FLOAT. */
4115 switch ((int) mips_arch)
4117 case PROCESSOR_R4100:
4118 case PROCESSOR_R4111:
4119 case PROCESSOR_R4120:
4120 case PROCESSOR_R4130:
4121 target_flags |= MASK_SOFT_FLOAT;
4125 target_flags &= ~MASK_SOFT_FLOAT;
4131 flag_pcc_struct_return = 0;
4133 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4135 /* If neither -mbranch-likely nor -mno-branch-likely was given
4136 on the command line, set MASK_BRANCHLIKELY based on the target
4139 By default, we enable use of Branch Likely instructions on
4140 all architectures which support them with the following
4141 exceptions: when creating MIPS32 or MIPS64 code, and when
4142 tuning for architectures where their use tends to hurt
4145 The MIPS32 and MIPS64 architecture specifications say "Software
4146 is strongly encouraged to avoid use of Branch Likely
4147 instructions, as they will be removed from a future revision
4148 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4149 issue those instructions unless instructed to do so by
4151 if (ISA_HAS_BRANCHLIKELY
4152 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4153 && !(TUNE_MIPS5500 || TUNE_SB1))
4154 target_flags |= MASK_BRANCHLIKELY;
4156 target_flags &= ~MASK_BRANCHLIKELY;
4158 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4159 warning ("generation of Branch Likely instructions enabled, but not supported by architecture");
4161 /* The effect of -mabicalls isn't defined for the EABI. */
4162 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4164 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4165 target_flags &= ~MASK_ABICALLS;
4168 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4169 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4170 /* ??? -non_shared turns off pic code generation, but this is not
4172 if (TARGET_ABICALLS)
4175 if (mips_section_threshold > 0)
4176 warning ("-G is incompatible with PIC code which is the default");
4179 /* mips_split_addresses is a half-way house between explicit
4180 relocations and the traditional assembler macros. It can
4181 split absolute 32-bit symbolic constants into a high/lo_sum
4182 pair but uses macros for other sorts of access.
4184 Like explicit relocation support for REL targets, it relies
4185 on GNU extensions in the assembler and the linker.
4187 Although this code should work for -O0, it has traditionally
4188 been treated as an optimization. */
4189 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4190 && optimize && !flag_pic
4191 && !ABI_HAS_64BIT_SYMBOLS)
4192 mips_split_addresses = 1;
4194 mips_split_addresses = 0;
4196 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4197 faster code, but at the expense of more nops. Enable it at -O3 and
4199 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4200 target_flags |= MASK_VR4130_ALIGN;
4202 /* When compiling for the mips16, we cannot use floating point. We
4203 record the original hard float value in mips16_hard_float. */
4206 if (TARGET_SOFT_FLOAT)
4207 mips16_hard_float = 0;
4209 mips16_hard_float = 1;
4210 target_flags |= MASK_SOFT_FLOAT;
4212 /* Don't run the scheduler before reload, since it tends to
4213 increase register pressure. */
4214 flag_schedule_insns = 0;
4216 /* Don't do hot/cold partitioning. The constant layout code expects
4217 the whole function to be in a single section. */
4218 flag_reorder_blocks_and_partition = 0;
4220 /* Silently disable -mexplicit-relocs since it doesn't apply
4221 to mips16 code. Even so, it would overly pedantic to warn
4222 about "-mips16 -mexplicit-relocs", especially given that
4223 we use a %gprel() operator. */
4224 target_flags &= ~MASK_EXPLICIT_RELOCS;
4227 /* When using explicit relocs, we call dbr_schedule from within
4229 if (TARGET_EXPLICIT_RELOCS)
4231 mips_flag_delayed_branch = flag_delayed_branch;
4232 flag_delayed_branch = 0;
4235 #ifdef MIPS_TFMODE_FORMAT
4236 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4239 /* Make sure that the user didn't turn off paired single support when
4240 MIPS-3D support is requested. */
4241 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE)
4242 && !TARGET_PAIRED_SINGLE_FLOAT)
4243 error ("-mips3d requires -mpaired-single");
4245 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE. */
4247 target_flags |= MASK_PAIRED_SINGLE;
4249 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4250 and TARGET_HARD_FLOAT are both true. */
4251 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4252 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4254 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4256 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4257 error ("-mips3d/-mpaired-single must be used with -mips64");
4259 mips_print_operand_punct['?'] = 1;
4260 mips_print_operand_punct['#'] = 1;
4261 mips_print_operand_punct['/'] = 1;
4262 mips_print_operand_punct['&'] = 1;
4263 mips_print_operand_punct['!'] = 1;
4264 mips_print_operand_punct['*'] = 1;
4265 mips_print_operand_punct['@'] = 1;
4266 mips_print_operand_punct['.'] = 1;
4267 mips_print_operand_punct['('] = 1;
4268 mips_print_operand_punct[')'] = 1;
4269 mips_print_operand_punct['['] = 1;
4270 mips_print_operand_punct[']'] = 1;
4271 mips_print_operand_punct['<'] = 1;
4272 mips_print_operand_punct['>'] = 1;
4273 mips_print_operand_punct['{'] = 1;
4274 mips_print_operand_punct['}'] = 1;
4275 mips_print_operand_punct['^'] = 1;
4276 mips_print_operand_punct['$'] = 1;
4277 mips_print_operand_punct['+'] = 1;
4278 mips_print_operand_punct['~'] = 1;
4280 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4281 mips_char_to_class['t'] = T_REG;
4282 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4283 mips_char_to_class['h'] = HI_REG;
4284 mips_char_to_class['l'] = LO_REG;
4285 mips_char_to_class['x'] = MD_REGS;
4286 mips_char_to_class['b'] = ALL_REGS;
4287 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4288 TARGET_MIPS16 ? M16_NA_REGS :
4290 mips_char_to_class['e'] = LEA_REGS;
4291 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4292 mips_char_to_class['y'] = GR_REGS;
4293 mips_char_to_class['z'] = ST_REGS;
4294 mips_char_to_class['B'] = COP0_REGS;
4295 mips_char_to_class['C'] = COP2_REGS;
4296 mips_char_to_class['D'] = COP3_REGS;
4298 /* Set up array to map GCC register number to debug register number.
4299 Ignore the special purpose register numbers. */
4301 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4302 mips_dbx_regno[i] = -1;
4304 start = GP_DBX_FIRST - GP_REG_FIRST;
4305 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4306 mips_dbx_regno[i] = i + start;
4308 start = FP_DBX_FIRST - FP_REG_FIRST;
4309 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4310 mips_dbx_regno[i] = i + start;
4312 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4313 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4315 /* Set up array giving whether a given register can hold a given mode. */
4317 for (mode = VOIDmode;
4318 mode != MAX_MACHINE_MODE;
4319 mode = (enum machine_mode) ((int)mode + 1))
4321 register int size = GET_MODE_SIZE (mode);
4322 register enum mode_class class = GET_MODE_CLASS (mode);
4324 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4328 if (mode == CCV2mode)
4331 && (regno - ST_REG_FIRST) % 2 == 0);
4333 else if (mode == CCV4mode)
4336 && (regno - ST_REG_FIRST) % 4 == 0);
4338 else if (mode == CCmode)
4341 temp = (regno == FPSW_REGNUM);
4343 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4344 || FP_REG_P (regno));
4347 else if (GP_REG_P (regno))
4348 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4350 else if (FP_REG_P (regno))
4351 temp = ((regno % FP_INC) == 0)
4352 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4353 || class == MODE_VECTOR_FLOAT)
4354 && size <= UNITS_PER_FPVALUE)
4355 /* Allow integer modes that fit into a single
4356 register. We need to put integers into FPRs
4357 when using instructions like cvt and trunc. */
4358 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4359 /* Allow TFmode for CCmode reloads. */
4360 || (ISA_HAS_8CC && mode == TFmode));
4362 else if (MD_REG_P (regno))
4363 temp = (INTEGRAL_MODE_P (mode)
4364 && (size <= UNITS_PER_WORD
4365 || (regno == MD_REG_FIRST
4366 && size == 2 * UNITS_PER_WORD)));
4368 else if (ALL_COP_REG_P (regno))
4369 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4373 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4377 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4378 initialized yet, so we can't use that here. */
4379 gpr_mode = TARGET_64BIT ? DImode : SImode;
4381 /* Provide default values for align_* for 64-bit targets. */
4382 if (TARGET_64BIT && !TARGET_MIPS16)
4384 if (align_loops == 0)
4386 if (align_jumps == 0)
4388 if (align_functions == 0)
4389 align_functions = 8;
4392 /* Function to allocate machine-dependent function status. */
4393 init_machine_status = &mips_init_machine_status;
4395 if (ABI_HAS_64BIT_SYMBOLS)
4397 if (TARGET_EXPLICIT_RELOCS)
4399 mips_split_p[SYMBOL_64_HIGH] = true;
4400 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4401 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4403 mips_split_p[SYMBOL_64_MID] = true;
4404 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4405 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4407 mips_split_p[SYMBOL_64_LOW] = true;
4408 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4409 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4411 mips_split_p[SYMBOL_GENERAL] = true;
4412 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4417 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4419 mips_split_p[SYMBOL_GENERAL] = true;
4420 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4421 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4427 /* The high part is provided by a pseudo copy of $gp. */
4428 mips_split_p[SYMBOL_SMALL_DATA] = true;
4429 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4432 if (TARGET_EXPLICIT_RELOCS)
4434 /* Small data constants are kept whole until after reload,
4435 then lowered by mips_rewrite_small_data. */
4436 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4438 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4441 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4442 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4446 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4447 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4452 /* The HIGH and LO_SUM are matched by special .md patterns. */
4453 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4455 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4456 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4457 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4459 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4460 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4461 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4466 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4468 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4469 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4475 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4476 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4477 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4480 /* Default to working around R4000 errata only if the processor
4481 was selected explicitly. */
4482 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4483 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4484 target_flags |= MASK_FIX_R4000;
4486 /* Default to working around R4400 errata only if the processor
4487 was selected explicitly. */
4488 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4489 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4490 target_flags |= MASK_FIX_R4400;
4493 /* Implement CONDITIONAL_REGISTER_USAGE. */
4496 mips_conditional_register_usage (void)
4498 if (!TARGET_HARD_FLOAT)
4502 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4503 fixed_regs[regno] = call_used_regs[regno] = 1;
4504 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4505 fixed_regs[regno] = call_used_regs[regno] = 1;
4507 else if (! ISA_HAS_8CC)
4511 /* We only have a single condition code register. We
4512 implement this by hiding all the condition code registers,
4513 and generating RTL that refers directly to ST_REG_FIRST. */
4514 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4515 fixed_regs[regno] = call_used_regs[regno] = 1;
4517 /* In mips16 mode, we permit the $t temporary registers to be used
4518 for reload. We prohibit the unused $s registers, since they
4519 are caller saved, and saving them via a mips16 register would
4520 probably waste more time than just reloading the value. */
4523 fixed_regs[18] = call_used_regs[18] = 1;
4524 fixed_regs[19] = call_used_regs[19] = 1;
4525 fixed_regs[20] = call_used_regs[20] = 1;
4526 fixed_regs[21] = call_used_regs[21] = 1;
4527 fixed_regs[22] = call_used_regs[22] = 1;
4528 fixed_regs[23] = call_used_regs[23] = 1;
4529 fixed_regs[26] = call_used_regs[26] = 1;
4530 fixed_regs[27] = call_used_regs[27] = 1;
4531 fixed_regs[30] = call_used_regs[30] = 1;
4533 /* fp20-23 are now caller saved. */
4534 if (mips_abi == ABI_64)
4537 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4538 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4540 /* Odd registers from fp21 to fp31 are now caller saved. */
4541 if (mips_abi == ABI_N32)
4544 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4545 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4549 /* Allocate a chunk of memory for per-function machine-dependent data. */
4550 static struct machine_function *
4551 mips_init_machine_status (void)
4553 return ((struct machine_function *)
4554 ggc_alloc_cleared (sizeof (struct machine_function)));
4557 /* On the mips16, we want to allocate $24 (T_REG) before other
4558 registers for instructions for which it is possible. This helps
4559 avoid shuffling registers around in order to set up for an xor,
4560 encouraging the compiler to use a cmp instead. */
4563 mips_order_regs_for_local_alloc (void)
4567 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4568 reg_alloc_order[i] = i;
4572 /* It really doesn't matter where we put register 0, since it is
4573 a fixed register anyhow. */
4574 reg_alloc_order[0] = 24;
4575 reg_alloc_order[24] = 0;
4580 /* The MIPS debug format wants all automatic variables and arguments
4581 to be in terms of the virtual frame pointer (stack pointer before
4582 any adjustment in the function), while the MIPS 3.0 linker wants
4583 the frame pointer to be the stack pointer after the initial
4584 adjustment. So, we do the adjustment here. The arg pointer (which
4585 is eliminated) points to the virtual frame pointer, while the frame
4586 pointer (which may be eliminated) points to the stack pointer after
4587 the initial adjustments. */
4590 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4592 rtx offset2 = const0_rtx;
4593 rtx reg = eliminate_constant_term (addr, &offset2);
4596 offset = INTVAL (offset2);
4598 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4599 || reg == hard_frame_pointer_rtx)
4601 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4602 ? compute_frame_size (get_frame_size ())
4603 : cfun->machine->frame.total_size;
4605 /* MIPS16 frame is smaller */
4606 if (frame_pointer_needed && TARGET_MIPS16)
4607 frame_size -= cfun->machine->frame.args_size;
4609 offset = offset - frame_size;
4612 /* sdbout_parms does not want this to crash for unrecognized cases. */
4614 else if (reg != arg_pointer_rtx)
4615 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4622 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4624 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4625 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4626 'h' OP is HIGH, prints %hi(X),
4627 'd' output integer constant in decimal,
4628 'z' if the operand is 0, use $0 instead of normal operand.
4629 'D' print second part of double-word register or memory operand.
4630 'L' print low-order register of double-word register operand.
4631 'M' print high-order register of double-word register operand.
4632 'C' print part of opcode for a branch condition.
4633 'F' print part of opcode for a floating-point branch condition.
4634 'N' print part of opcode for a branch condition, inverted.
4635 'W' print part of opcode for a floating-point branch condition, inverted.
4636 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4637 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4638 't' like 'T', but with the EQ/NE cases reversed
4639 'Y' for a CONST_INT X, print mips_fp_conditions[X]
4640 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
4641 'R' print the reloc associated with LO_SUM
4643 The punctuation characters are:
4645 '(' Turn on .set noreorder
4646 ')' Turn on .set reorder
4647 '[' Turn on .set noat
4649 '<' Turn on .set nomacro
4650 '>' Turn on .set macro
4651 '{' Turn on .set volatile (not GAS)
4652 '}' Turn on .set novolatile (not GAS)
4653 '&' Turn on .set noreorder if filling delay slots
4654 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4655 '!' Turn on .set nomacro if filling delay slots
4656 '#' Print nop if in a .set noreorder section.
4657 '/' Like '#', but does nothing within a delayed branch sequence
4658 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4659 '@' Print the name of the assembler temporary register (at or $1).
4660 '.' Print the name of the register with a hard-wired zero (zero or $0).
4661 '^' Print the name of the pic call-through register (t9 or $25).
4662 '$' Print the name of the stack pointer register (sp or $29).
4663 '+' Print the name of the gp register (usually gp or $28).
4664 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4667 print_operand (FILE *file, rtx op, int letter)
4669 register enum rtx_code code;
4671 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4676 if (mips_branch_likely)
4681 fputs (reg_names [GP_REG_FIRST + 1], file);
4685 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4689 fputs (reg_names [GP_REG_FIRST + 0], file);
4693 fputs (reg_names[STACK_POINTER_REGNUM], file);
4697 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4701 if (final_sequence != 0 && set_noreorder++ == 0)
4702 fputs (".set\tnoreorder\n\t", file);
4706 if (final_sequence != 0)
4708 if (set_noreorder++ == 0)
4709 fputs (".set\tnoreorder\n\t", file);
4711 if (set_nomacro++ == 0)
4712 fputs (".set\tnomacro\n\t", file);
4717 if (final_sequence != 0 && set_nomacro++ == 0)
4718 fputs ("\n\t.set\tnomacro", file);
4722 if (set_noreorder != 0)
4723 fputs ("\n\tnop", file);
4727 /* Print an extra newline so that the delayed insn is separated
4728 from the following ones. This looks neater and is consistent
4729 with non-nop delayed sequences. */
4730 if (set_noreorder != 0 && final_sequence == 0)
4731 fputs ("\n\tnop\n", file);
4735 if (set_noreorder++ == 0)
4736 fputs (".set\tnoreorder\n\t", file);
4740 if (set_noreorder == 0)
4741 error ("internal error: %%) found without a %%( in assembler pattern");
4743 else if (--set_noreorder == 0)
4744 fputs ("\n\t.set\treorder", file);
4749 if (set_noat++ == 0)
4750 fputs (".set\tnoat\n\t", file);
4755 error ("internal error: %%] found without a %%[ in assembler pattern");
4756 else if (--set_noat == 0)
4757 fputs ("\n\t.set\tat", file);
4762 if (set_nomacro++ == 0)
4763 fputs (".set\tnomacro\n\t", file);
4767 if (set_nomacro == 0)
4768 error ("internal error: %%> found without a %%< in assembler pattern");
4769 else if (--set_nomacro == 0)
4770 fputs ("\n\t.set\tmacro", file);
4775 if (set_volatile++ == 0)
4776 fputs ("#.set\tvolatile\n\t", file);
4780 if (set_volatile == 0)
4781 error ("internal error: %%} found without a %%{ in assembler pattern");
4782 else if (--set_volatile == 0)
4783 fputs ("\n\t#.set\tnovolatile", file);
4789 if (align_labels_log > 0)
4790 ASM_OUTPUT_ALIGN (file, align_labels_log);
4795 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
4804 error ("PRINT_OPERAND null pointer");
4808 code = GET_CODE (op);
4813 case EQ: fputs ("eq", file); break;
4814 case NE: fputs ("ne", file); break;
4815 case GT: fputs ("gt", file); break;
4816 case GE: fputs ("ge", file); break;
4817 case LT: fputs ("lt", file); break;
4818 case LE: fputs ("le", file); break;
4819 case GTU: fputs ("gtu", file); break;
4820 case GEU: fputs ("geu", file); break;
4821 case LTU: fputs ("ltu", file); break;
4822 case LEU: fputs ("leu", file); break;
4824 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
4827 else if (letter == 'N')
4830 case EQ: fputs ("ne", file); break;
4831 case NE: fputs ("eq", file); break;
4832 case GT: fputs ("le", file); break;
4833 case GE: fputs ("lt", file); break;
4834 case LT: fputs ("ge", file); break;
4835 case LE: fputs ("gt", file); break;
4836 case GTU: fputs ("leu", file); break;
4837 case GEU: fputs ("ltu", file); break;
4838 case LTU: fputs ("geu", file); break;
4839 case LEU: fputs ("gtu", file); break;
4841 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
4844 else if (letter == 'F')
4847 case EQ: fputs ("c1f", file); break;
4848 case NE: fputs ("c1t", file); break;
4850 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
4853 else if (letter == 'W')
4856 case EQ: fputs ("c1t", file); break;
4857 case NE: fputs ("c1f", file); break;
4859 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
4862 else if (letter == 'h')
4864 if (GET_CODE (op) == HIGH)
4867 print_operand_reloc (file, op, mips_hi_relocs);
4870 else if (letter == 'R')
4871 print_operand_reloc (file, op, mips_lo_relocs);
4873 else if (letter == 'Y')
4875 if (GET_CODE (op) == CONST_INT
4876 && ((unsigned HOST_WIDE_INT) INTVAL (op)
4877 < ARRAY_SIZE (mips_fp_conditions)))
4878 fputs (mips_fp_conditions[INTVAL (op)], file);
4880 output_operand_lossage ("invalid %%Y value");
4883 else if (letter == 'Z')
4887 print_operand (file, op, 0);
4892 else if (code == REG || code == SUBREG)
4894 register int regnum;
4897 regnum = REGNO (op);
4899 regnum = true_regnum (op);
4901 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
4902 || (letter == 'L' && WORDS_BIG_ENDIAN)
4906 fprintf (file, "%s", reg_names[regnum]);
4909 else if (code == MEM)
4912 output_address (plus_constant (XEXP (op, 0), 4));
4914 output_address (XEXP (op, 0));
4917 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
4918 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
4920 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
4921 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
4923 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
4924 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
4926 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
4927 fputs (reg_names[GP_REG_FIRST], file);
4929 else if (letter == 'd' || letter == 'x' || letter == 'X')
4930 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
4932 else if (letter == 'T' || letter == 't')
4934 int truth = (code == NE) == (letter == 'T');
4935 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
4938 else if (CONST_GP_P (op))
4939 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
4942 output_addr_const (file, op);
4946 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
4947 RELOCS is the array of relocations to use. */
4950 print_operand_reloc (FILE *file, rtx op, const char **relocs)
4952 enum mips_symbol_type symbol_type;
4955 HOST_WIDE_INT offset;
4957 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
4958 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
4960 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
4961 mips_split_const (op, &base, &offset);
4962 if (UNSPEC_ADDRESS_P (base))
4963 op = plus_constant (UNSPEC_ADDRESS (base), offset);
4965 fputs (relocs[symbol_type], file);
4966 output_addr_const (file, op);
4967 for (p = relocs[symbol_type]; *p != 0; p++)
4972 /* Output address operand X to FILE. */
4975 print_operand_address (FILE *file, rtx x)
4977 struct mips_address_info addr;
4979 if (mips_classify_address (&addr, x, word_mode, true))
4983 print_operand (file, addr.offset, 0);
4984 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
4987 case ADDRESS_LO_SUM:
4988 print_operand (file, addr.offset, 'R');
4989 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
4992 case ADDRESS_CONST_INT:
4993 output_addr_const (file, x);
4994 fprintf (file, "(%s)", reg_names[0]);
4997 case ADDRESS_SYMBOLIC:
4998 output_addr_const (file, x);
5004 /* When using assembler macros, keep track of all of small-data externs
5005 so that mips_file_end can emit the appropriate declarations for them.
5007 In most cases it would be safe (though pointless) to emit .externs
5008 for other symbols too. One exception is when an object is within
5009 the -G limit but declared by the user to be in a section other
5010 than .sbss or .sdata. */
5013 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5015 register struct extern_list *p;
5017 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5019 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5020 p->next = extern_head;
5022 p->size = int_size_in_bytes (TREE_TYPE (decl));
5026 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5028 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5029 p->next = extern_head;
5040 irix_output_external_libcall (rtx fun)
5042 register struct extern_list *p;
5044 if (mips_abi == ABI_32)
5046 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5047 p->next = extern_head;
5048 p->name = XSTR (fun, 0);
5055 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5056 put out a MIPS ECOFF file and a stab. */
5059 mips_output_filename (FILE *stream, const char *name)
5061 char ltext_label_name[100];
5063 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5065 if (write_symbols == DWARF2_DEBUG)
5067 else if (mips_output_filename_first_time)
5069 mips_output_filename_first_time = 0;
5070 num_source_filenames += 1;
5071 current_function_file = name;
5072 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5073 output_quoted_string (stream, name);
5074 putc ('\n', stream);
5077 /* If we are emitting stabs, let dbxout.c handle this (except for
5078 the mips_output_filename_first_time case). */
5079 else if (write_symbols == DBX_DEBUG)
5082 else if (name != current_function_file
5083 && strcmp (name, current_function_file) != 0)
5085 num_source_filenames += 1;
5086 current_function_file = name;
5087 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5088 output_quoted_string (stream, name);
5089 putc ('\n', stream);
5093 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5094 that should be written before the opening quote, such as "\t.ascii\t"
5095 for real string data or "\t# " for a comment. */
5098 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5103 register const unsigned char *string =
5104 (const unsigned char *)string_param;
5106 fprintf (stream, "%s\"", prefix);
5107 for (i = 0; i < len; i++)
5109 register int c = string[i];
5115 putc ('\\', stream);
5120 case TARGET_NEWLINE:
5121 fputs ("\\n", stream);
5123 && (((c = string[i+1]) >= '\040' && c <= '~')
5124 || c == TARGET_TAB))
5125 cur_pos = 32767; /* break right here */
5131 fputs ("\\t", stream);
5136 fputs ("\\f", stream);
5141 fputs ("\\b", stream);
5146 fputs ("\\r", stream);
5151 if (c >= ' ' && c < 0177)
5158 fprintf (stream, "\\%03o", c);
5163 if (cur_pos > 72 && i+1 < len)
5166 fprintf (stream, "\"\n%s\"", prefix);
5169 fprintf (stream, "\"\n");
5172 /* Implement TARGET_ASM_FILE_START. */
5175 mips_file_start (void)
5177 default_file_start ();
5181 /* Generate a special section to describe the ABI switches used to
5182 produce the resultant binary. This used to be done by the assembler
5183 setting bits in the ELF header's flags field, but we have run out of
5184 bits. GDB needs this information in order to be able to correctly
5185 debug these binaries. See the function mips_gdbarch_init() in
5186 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5187 causes unnecessary IRIX 6 ld warnings. */
5188 const char * abi_string = NULL;
5192 case ABI_32: abi_string = "abi32"; break;
5193 case ABI_N32: abi_string = "abiN32"; break;
5194 case ABI_64: abi_string = "abi64"; break;
5195 case ABI_O64: abi_string = "abiO64"; break;
5196 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5200 /* Note - we use fprintf directly rather than called named_section()
5201 because in this way we can avoid creating an allocated section. We
5202 do not want this section to take up any space in the running
5204 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5206 /* There is no ELF header flag to distinguish long32 forms of the
5207 EABI from long64 forms. Emit a special section to help tools
5209 if (mips_abi == ABI_EABI)
5210 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5211 TARGET_LONG64 ? 64 : 32);
5213 /* Restore the default section. */
5214 fprintf (asm_out_file, "\t.previous\n");
5217 /* Generate the pseudo ops that System V.4 wants. */
5218 if (TARGET_ABICALLS)
5219 /* ??? but do not want this (or want pic0) if -non-shared? */
5220 fprintf (asm_out_file, "\t.abicalls\n");
5223 fprintf (asm_out_file, "\t.set\tmips16\n");
5225 if (flag_verbose_asm)
5226 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5228 mips_section_threshold, mips_arch_info->name, mips_isa);
5231 #ifdef BSS_SECTION_ASM_OP
5232 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5233 in the use of sbss. */
5236 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5237 unsigned HOST_WIDE_INT size, int align)
5239 extern tree last_assemble_variable_decl;
5241 if (mips_in_small_data_p (decl))
5242 named_section (0, ".sbss", 0);
5245 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5246 last_assemble_variable_decl = decl;
5247 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5248 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5252 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5253 .externs for any small-data variables that turned out to be external. */
5256 mips_file_end (void)
5259 struct extern_list *p;
5263 fputs ("\n", asm_out_file);
5265 for (p = extern_head; p != 0; p = p->next)
5267 name_tree = get_identifier (p->name);
5269 /* Positively ensure only one .extern for any given symbol. */
5270 if (!TREE_ASM_WRITTEN (name_tree)
5271 && TREE_SYMBOL_REFERENCED (name_tree))
5273 TREE_ASM_WRITTEN (name_tree) = 1;
5274 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5275 `.global name .text' directive for every used but
5276 undefined function. If we don't, the linker may perform
5277 an optimization (skipping over the insns that set $gp)
5278 when it is unsafe. */
5279 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5281 fputs ("\t.globl ", asm_out_file);
5282 assemble_name (asm_out_file, p->name);
5283 fputs (" .text\n", asm_out_file);
5287 fputs ("\t.extern\t", asm_out_file);
5288 assemble_name (asm_out_file, p->name);
5289 fprintf (asm_out_file, ", %d\n", p->size);
5296 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5297 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5300 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5301 unsigned HOST_WIDE_INT size,
5304 /* If the target wants uninitialized const declarations in
5305 .rdata then don't put them in .comm. */
5306 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5307 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5308 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5310 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5311 targetm.asm_out.globalize_label (stream, name);
5313 readonly_data_section ();
5314 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5315 mips_declare_object (stream, name, "",
5316 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5320 mips_declare_common_object (stream, name, "\n\t.comm\t",
5324 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5325 NAME is the name of the object and ALIGN is the required alignment
5326 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5327 alignment argument. */
5330 mips_declare_common_object (FILE *stream, const char *name,
5331 const char *init_string,
5332 unsigned HOST_WIDE_INT size,
5333 unsigned int align, bool takes_alignment_p)
5335 if (!takes_alignment_p)
5337 size += (align / BITS_PER_UNIT) - 1;
5338 size -= size % (align / BITS_PER_UNIT);
5339 mips_declare_object (stream, name, init_string,
5340 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5343 mips_declare_object (stream, name, init_string,
5344 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5345 size, align / BITS_PER_UNIT);
5348 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5349 macros, mark the symbol as written so that mips_file_end won't emit an
5350 .extern for it. STREAM is the output file, NAME is the name of the
5351 symbol, INIT_STRING is the string that should be written before the
5352 symbol and FINAL_STRING is the string that should be written after it.
5353 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5356 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5357 const char *final_string, ...)
5361 fputs (init_string, stream);
5362 assemble_name (stream, name);
5363 va_start (ap, final_string);
5364 vfprintf (stream, final_string, ap);
5367 if (!TARGET_EXPLICIT_RELOCS)
5369 tree name_tree = get_identifier (name);
5370 TREE_ASM_WRITTEN (name_tree) = 1;
5374 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5375 extern int size_directive_output;
5377 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5378 definitions except that it uses mips_declare_object() to emit the label. */
5381 mips_declare_object_name (FILE *stream, const char *name,
5382 tree decl ATTRIBUTE_UNUSED)
5384 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5385 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5388 size_directive_output = 0;
5389 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5393 size_directive_output = 1;
5394 size = int_size_in_bytes (TREE_TYPE (decl));
5395 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5398 mips_declare_object (stream, name, "", ":\n", 0);
5401 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5404 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5408 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5409 if (!flag_inhibit_size_directive
5410 && DECL_SIZE (decl) != 0
5411 && !at_end && top_level
5412 && DECL_INITIAL (decl) == error_mark_node
5413 && !size_directive_output)
5417 size_directive_output = 1;
5418 size = int_size_in_bytes (TREE_TYPE (decl));
5419 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5424 /* Return true if X is a small data address that can be rewritten
5428 mips_rewrite_small_data_p (rtx x)
5430 enum mips_symbol_type symbol_type;
5432 return (TARGET_EXPLICIT_RELOCS
5433 && mips_symbolic_constant_p (x, &symbol_type)
5434 && symbol_type == SYMBOL_SMALL_DATA);
5438 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5441 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5443 if (GET_CODE (*loc) == LO_SUM)
5446 return mips_rewrite_small_data_p (*loc);
5449 /* Return true if OP refers to small data symbols directly, not through
5453 mips_small_data_pattern_p (rtx op)
5455 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5458 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5461 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5463 if (mips_rewrite_small_data_p (*loc))
5464 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5466 if (GET_CODE (*loc) == LO_SUM)
5472 /* If possible, rewrite OP so that it refers to small data using
5473 explicit relocations. */
5476 mips_rewrite_small_data (rtx op)
5478 op = copy_insn (op);
5479 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5483 /* Return true if the current function has an insn that implicitly
5487 mips_function_has_gp_insn (void)
5489 /* Don't bother rechecking if we found one last time. */
5490 if (!cfun->machine->has_gp_insn_p)
5494 push_topmost_sequence ();
5495 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5497 && GET_CODE (PATTERN (insn)) != USE
5498 && GET_CODE (PATTERN (insn)) != CLOBBER
5499 && (get_attr_got (insn) != GOT_UNSET
5500 || small_data_pattern (PATTERN (insn), VOIDmode)))
5502 pop_topmost_sequence ();
5504 cfun->machine->has_gp_insn_p = (insn != 0);
5506 return cfun->machine->has_gp_insn_p;
5510 /* Return the register that should be used as the global pointer
5511 within this function. Return 0 if the function doesn't need
5512 a global pointer. */
5515 mips_global_pointer (void)
5519 /* $gp is always available in non-abicalls code. */
5520 if (!TARGET_ABICALLS)
5521 return GLOBAL_POINTER_REGNUM;
5523 /* We must always provide $gp when it is used implicitly. */
5524 if (!TARGET_EXPLICIT_RELOCS)
5525 return GLOBAL_POINTER_REGNUM;
5527 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5529 if (current_function_profile)
5530 return GLOBAL_POINTER_REGNUM;
5532 /* If the function has a nonlocal goto, $gp must hold the correct
5533 global pointer for the target function. */
5534 if (current_function_has_nonlocal_goto)
5535 return GLOBAL_POINTER_REGNUM;
5537 /* If the gp is never referenced, there's no need to initialize it.
5538 Note that reload can sometimes introduce constant pool references
5539 into a function that otherwise didn't need them. For example,
5540 suppose we have an instruction like:
5542 (set (reg:DF R1) (float:DF (reg:SI R2)))
5544 If R2 turns out to be constant such as 1, the instruction may have a
5545 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5546 using this constant if R2 doesn't get allocated to a register.
5548 In cases like these, reload will have added the constant to the pool
5549 but no instruction will yet refer to it. */
5550 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5551 && !current_function_uses_const_pool
5552 && !mips_function_has_gp_insn ())
5555 /* We need a global pointer, but perhaps we can use a call-clobbered
5556 register instead of $gp. */
5557 if (TARGET_NEWABI && current_function_is_leaf)
5558 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5559 if (!regs_ever_live[regno]
5560 && call_used_regs[regno]
5561 && !fixed_regs[regno]
5562 && regno != PIC_FUNCTION_ADDR_REGNUM)
5565 return GLOBAL_POINTER_REGNUM;
5569 /* Return true if the current function must save REGNO. */
5572 mips_save_reg_p (unsigned int regno)
5574 /* We only need to save $gp for NewABI PIC. */
5575 if (regno == GLOBAL_POINTER_REGNUM)
5576 return (TARGET_ABICALLS && TARGET_NEWABI
5577 && cfun->machine->global_pointer == regno);
5579 /* Check call-saved registers. */
5580 if (regs_ever_live[regno] && !call_used_regs[regno])
5583 /* We need to save the old frame pointer before setting up a new one. */
5584 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5587 /* We need to save the incoming return address if it is ever clobbered
5588 within the function. */
5589 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5596 return_type = DECL_RESULT (current_function_decl);
5598 /* $18 is a special case in mips16 code. It may be used to call
5599 a function which returns a floating point value, but it is
5600 marked in call_used_regs. */
5601 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5604 /* $31 is also a special case. It will be used to copy a return
5605 value into the floating point registers if the return value is
5607 if (regno == GP_REG_FIRST + 31
5608 && mips16_hard_float
5609 && !aggregate_value_p (return_type, current_function_decl)
5610 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5611 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5619 /* Return the bytes needed to compute the frame pointer from the current
5620 stack pointer. SIZE is the size (in bytes) of the local variables.
5622 Mips stack frames look like:
5624 Before call After call
5625 +-----------------------+ +-----------------------+
5628 | caller's temps. | | caller's temps. |
5630 +-----------------------+ +-----------------------+
5632 | arguments on stack. | | arguments on stack. |
5634 +-----------------------+ +-----------------------+
5635 | 4 words to save | | 4 words to save |
5636 | arguments passed | | arguments passed |
5637 | in registers, even | | in registers, even |
5638 SP->| if not passed. | VFP->| if not passed. |
5639 +-----------------------+ +-----------------------+
5641 | fp register save |
5643 +-----------------------+
5645 | gp register save |
5647 +-----------------------+
5651 +-----------------------+
5653 | alloca allocations |
5655 +-----------------------+
5657 | GP save for V.4 abi |
5659 +-----------------------+
5661 | arguments on stack |
5663 +-----------------------+
5665 | arguments passed |
5666 | in registers, even |
5667 low SP->| if not passed. |
5668 memory +-----------------------+
5673 compute_frame_size (HOST_WIDE_INT size)
5676 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5677 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5678 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5679 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5680 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5681 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5682 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5683 unsigned int mask; /* mask of saved gp registers */
5684 unsigned int fmask; /* mask of saved fp registers */
5686 cfun->machine->global_pointer = mips_global_pointer ();
5692 var_size = MIPS_STACK_ALIGN (size);
5693 args_size = current_function_outgoing_args_size;
5694 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5696 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5697 functions. If the function has local variables, we're committed
5698 to allocating it anyway. Otherwise reclaim it here. */
5699 if (var_size == 0 && current_function_is_leaf)
5700 cprestore_size = args_size = 0;
5702 /* The MIPS 3.0 linker does not like functions that dynamically
5703 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5704 looks like we are trying to create a second frame pointer to the
5705 function, so allocate some stack space to make it happy. */
5707 if (args_size == 0 && current_function_calls_alloca)
5708 args_size = 4 * UNITS_PER_WORD;
5710 total_size = var_size + args_size + cprestore_size;
5712 /* Calculate space needed for gp registers. */
5713 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5714 if (mips_save_reg_p (regno))
5716 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5717 mask |= 1 << (regno - GP_REG_FIRST);
5720 /* We need to restore these for the handler. */
5721 if (current_function_calls_eh_return)
5726 regno = EH_RETURN_DATA_REGNO (i);
5727 if (regno == INVALID_REGNUM)
5729 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5730 mask |= 1 << (regno - GP_REG_FIRST);
5734 /* This loop must iterate over the same space as its companion in
5735 save_restore_insns. */
5736 for (regno = (FP_REG_LAST - FP_INC + 1);
5737 regno >= FP_REG_FIRST;
5740 if (mips_save_reg_p (regno))
5742 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5743 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5747 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5748 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5750 /* Add in space reserved on the stack by the callee for storing arguments
5751 passed in registers. */
5753 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5755 /* Save other computed information. */
5756 cfun->machine->frame.total_size = total_size;
5757 cfun->machine->frame.var_size = var_size;
5758 cfun->machine->frame.args_size = args_size;
5759 cfun->machine->frame.cprestore_size = cprestore_size;
5760 cfun->machine->frame.gp_reg_size = gp_reg_size;
5761 cfun->machine->frame.fp_reg_size = fp_reg_size;
5762 cfun->machine->frame.mask = mask;
5763 cfun->machine->frame.fmask = fmask;
5764 cfun->machine->frame.initialized = reload_completed;
5765 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5766 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5770 HOST_WIDE_INT offset;
5772 offset = (args_size + cprestore_size + var_size
5773 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
5774 cfun->machine->frame.gp_sp_offset = offset;
5775 cfun->machine->frame.gp_save_offset = offset - total_size;
5779 cfun->machine->frame.gp_sp_offset = 0;
5780 cfun->machine->frame.gp_save_offset = 0;
5785 HOST_WIDE_INT offset;
5787 offset = (args_size + cprestore_size + var_size
5788 + gp_reg_rounded + fp_reg_size
5789 - FP_INC * UNITS_PER_FPREG);
5790 cfun->machine->frame.fp_sp_offset = offset;
5791 cfun->machine->frame.fp_save_offset = offset - total_size;
5795 cfun->machine->frame.fp_sp_offset = 0;
5796 cfun->machine->frame.fp_save_offset = 0;
5799 /* Ok, we're done. */
5803 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
5804 pointer or argument pointer. TO is either the stack pointer or
5805 hard frame pointer. */
5808 mips_initial_elimination_offset (int from, int to)
5810 HOST_WIDE_INT offset;
5812 compute_frame_size (get_frame_size ());
5814 /* Set OFFSET to the offset from the stack pointer. */
5817 case FRAME_POINTER_REGNUM:
5821 case ARG_POINTER_REGNUM:
5822 offset = cfun->machine->frame.total_size;
5824 offset -= current_function_pretend_args_size;
5831 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
5832 offset -= cfun->machine->frame.args_size;
5837 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
5838 back to a previous frame. */
5840 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5845 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
5848 /* Use FN to save or restore register REGNO. MODE is the register's
5849 mode and OFFSET is the offset of its save slot from the current
5853 mips_save_restore_reg (enum machine_mode mode, int regno,
5854 HOST_WIDE_INT offset, mips_save_restore_fn fn)
5858 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
5860 fn (gen_rtx_REG (mode, regno), mem);
5864 /* Call FN for each register that is saved by the current function.
5865 SP_OFFSET is the offset of the current stack pointer from the start
5869 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
5871 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
5873 enum machine_mode fpr_mode;
5874 HOST_WIDE_INT offset;
5877 /* Save registers starting from high to low. The debuggers prefer at least
5878 the return register be stored at func+4, and also it allows us not to
5879 need a nop in the epilog if at least one register is reloaded in
5880 addition to return address. */
5881 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
5882 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
5883 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
5885 mips_save_restore_reg (gpr_mode, regno, offset, fn);
5886 offset -= GET_MODE_SIZE (gpr_mode);
5889 /* This loop must iterate over the same space as its companion in
5890 compute_frame_size. */
5891 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
5892 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
5893 for (regno = (FP_REG_LAST - FP_INC + 1);
5894 regno >= FP_REG_FIRST;
5896 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
5898 mips_save_restore_reg (fpr_mode, regno, offset, fn);
5899 offset -= GET_MODE_SIZE (fpr_mode);
5904 /* If we're generating n32 or n64 abicalls, and the current function
5905 does not use $28 as its global pointer, emit a cplocal directive.
5906 Use pic_offset_table_rtx as the argument to the directive. */
5909 mips_output_cplocal (void)
5911 if (!TARGET_EXPLICIT_RELOCS
5912 && cfun->machine->global_pointer > 0
5913 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
5914 output_asm_insn (".cplocal %+", 0);
5917 /* If we're generating n32 or n64 abicalls, emit instructions
5918 to set up the global pointer. */
5921 mips_emit_loadgp (void)
5923 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
5925 rtx addr, offset, incoming_address;
5927 addr = XEXP (DECL_RTL (current_function_decl), 0);
5928 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
5929 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
5930 emit_insn (gen_loadgp (offset, incoming_address));
5931 if (!TARGET_EXPLICIT_RELOCS)
5932 emit_insn (gen_loadgp_blockage ());
5936 /* Set up the stack and frame (if desired) for the function. */
5939 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5942 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
5944 #ifdef SDB_DEBUGGING_INFO
5945 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
5946 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
5949 /* In mips16 mode, we may need to generate a 32 bit to handle
5950 floating point arguments. The linker will arrange for any 32 bit
5951 functions to call this stub, which will then jump to the 16 bit
5953 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
5954 && current_function_args_info.fp_code != 0)
5955 build_mips16_function_stub (file);
5957 if (!FUNCTION_NAME_ALREADY_DECLARED)
5959 /* Get the function name the same way that toplev.c does before calling
5960 assemble_start_function. This is needed so that the name used here
5961 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
5962 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5964 if (!flag_inhibit_size_directive)
5966 fputs ("\t.ent\t", file);
5967 assemble_name (file, fnname);
5971 assemble_name (file, fnname);
5972 fputs (":\n", file);
5975 /* Stop mips_file_end from treating this function as external. */
5976 if (TARGET_IRIX && mips_abi == ABI_32)
5977 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
5979 if (!flag_inhibit_size_directive)
5981 /* .frame FRAMEREG, FRAMESIZE, RETREG */
5983 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
5984 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
5985 ", args= " HOST_WIDE_INT_PRINT_DEC
5986 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
5987 (reg_names[(frame_pointer_needed)
5988 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
5989 ((frame_pointer_needed && TARGET_MIPS16)
5990 ? tsize - cfun->machine->frame.args_size
5992 reg_names[GP_REG_FIRST + 31],
5993 cfun->machine->frame.var_size,
5994 cfun->machine->frame.num_gp,
5995 cfun->machine->frame.num_fp,
5996 cfun->machine->frame.args_size,
5997 cfun->machine->frame.cprestore_size);
5999 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6000 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6001 cfun->machine->frame.mask,
6002 cfun->machine->frame.gp_save_offset);
6003 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6004 cfun->machine->frame.fmask,
6005 cfun->machine->frame.fp_save_offset);
6008 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6009 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6012 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
6014 /* Handle the initialization of $gp for SVR4 PIC. */
6015 if (!cfun->machine->all_noreorder_p)
6016 output_asm_insn ("%(.cpload\t%^%)", 0);
6018 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6020 else if (cfun->machine->all_noreorder_p)
6021 output_asm_insn ("%(%<", 0);
6023 /* Tell the assembler which register we're using as the global
6024 pointer. This is needed for thunks, since they can use either
6025 explicit relocs or assembler macros. */
6026 mips_output_cplocal ();
6029 /* Make the last instruction frame related and note that it performs
6030 the operation described by FRAME_PATTERN. */
6033 mips_set_frame_expr (rtx frame_pattern)
6037 insn = get_last_insn ();
6038 RTX_FRAME_RELATED_P (insn) = 1;
6039 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6045 /* Return a frame-related rtx that stores REG at MEM.
6046 REG must be a single register. */
6049 mips_frame_set (rtx mem, rtx reg)
6051 rtx set = gen_rtx_SET (VOIDmode, mem, reg);
6052 RTX_FRAME_RELATED_P (set) = 1;
6057 /* Save register REG to MEM. Make the instruction frame-related. */
6060 mips_save_reg (rtx reg, rtx mem)
6062 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6066 if (mips_split_64bit_move_p (mem, reg))
6067 mips_split_64bit_move (mem, reg);
6069 emit_move_insn (mem, reg);
6071 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6072 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6073 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6078 && REGNO (reg) != GP_REG_FIRST + 31
6079 && !M16_REG_P (REGNO (reg)))
6081 /* Save a non-mips16 register by moving it through a temporary.
6082 We don't need to do this for $31 since there's a special
6083 instruction for it. */
6084 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6085 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6088 emit_move_insn (mem, reg);
6090 mips_set_frame_expr (mips_frame_set (mem, reg));
6095 /* Expand the prologue into a bunch of separate insns. */
6098 mips_expand_prologue (void)
6102 if (cfun->machine->global_pointer > 0)
6103 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6105 size = compute_frame_size (get_frame_size ());
6107 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6108 bytes beforehand; this is enough to cover the register save area
6109 without going out of range. */
6110 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6112 HOST_WIDE_INT step1;
6114 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6115 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6117 GEN_INT (-step1)))) = 1;
6119 mips_for_each_saved_reg (size, mips_save_reg);
6122 /* Allocate the rest of the frame. */
6125 if (SMALL_OPERAND (-size))
6126 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6128 GEN_INT (-size)))) = 1;
6131 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6134 /* There are no instructions to add or subtract registers
6135 from the stack pointer, so use the frame pointer as a
6136 temporary. We should always be using a frame pointer
6137 in this case anyway. */
6138 gcc_assert (frame_pointer_needed);
6139 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6140 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6141 hard_frame_pointer_rtx,
6142 MIPS_PROLOGUE_TEMP (Pmode)));
6143 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6146 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6148 MIPS_PROLOGUE_TEMP (Pmode)));
6150 /* Describe the combined effect of the previous instructions. */
6152 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6153 plus_constant (stack_pointer_rtx, -size)));
6157 /* Set up the frame pointer, if we're using one. In mips16 code,
6158 we point the frame pointer ahead of the outgoing argument area.
6159 This should allow more variables & incoming arguments to be
6160 accessed with unextended instructions. */
6161 if (frame_pointer_needed)
6163 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6165 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6167 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6172 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6173 stack_pointer_rtx)) = 1;
6176 /* If generating o32/o64 abicalls, save $gp on the stack. */
6177 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6178 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6180 mips_emit_loadgp ();
6182 /* If we are profiling, make sure no instructions are scheduled before
6183 the call to mcount. */
6185 if (current_function_profile)
6186 emit_insn (gen_blockage ());
6189 /* Do any necessary cleanup after a function to restore stack, frame,
6192 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6195 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6196 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6198 /* Reinstate the normal $gp. */
6199 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6200 mips_output_cplocal ();
6202 if (cfun->machine->all_noreorder_p)
6204 /* Avoid using %>%) since it adds excess whitespace. */
6205 output_asm_insn (".set\tmacro", 0);
6206 output_asm_insn (".set\treorder", 0);
6207 set_noreorder = set_nomacro = 0;
6210 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6214 /* Get the function name the same way that toplev.c does before calling
6215 assemble_start_function. This is needed so that the name used here
6216 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6217 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6218 fputs ("\t.end\t", file);
6219 assemble_name (file, fnname);
6224 /* Emit instructions to restore register REG from slot MEM. */
6227 mips_restore_reg (rtx reg, rtx mem)
6229 /* There's no mips16 instruction to load $31 directly. Load into
6230 $7 instead and adjust the return insn appropriately. */
6231 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6232 reg = gen_rtx_REG (GET_MODE (reg), 7);
6234 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6236 /* Can't restore directly; move through a temporary. */
6237 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6238 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6241 emit_move_insn (reg, mem);
6245 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6246 if this epilogue precedes a sibling call, false if it is for a normal
6247 "epilogue" pattern. */
6250 mips_expand_epilogue (int sibcall_p)
6252 HOST_WIDE_INT step1, step2;
6255 if (!sibcall_p && mips_can_use_return_insn ())
6257 emit_jump_insn (gen_return ());
6261 /* Split the frame into two. STEP1 is the amount of stack we should
6262 deallocate before restoring the registers. STEP2 is the amount we
6263 should deallocate afterwards.
6265 Start off by assuming that no registers need to be restored. */
6266 step1 = cfun->machine->frame.total_size;
6269 /* Work out which register holds the frame address. Account for the
6270 frame pointer offset used by mips16 code. */
6271 if (!frame_pointer_needed)
6272 base = stack_pointer_rtx;
6275 base = hard_frame_pointer_rtx;
6277 step1 -= cfun->machine->frame.args_size;
6280 /* If we need to restore registers, deallocate as much stack as
6281 possible in the second step without going out of range. */
6282 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6284 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6288 /* Set TARGET to BASE + STEP1. */
6294 /* Get an rtx for STEP1 that we can add to BASE. */
6295 adjust = GEN_INT (step1);
6296 if (!SMALL_OPERAND (step1))
6298 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6299 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6302 /* Normal mode code can copy the result straight into $sp. */
6304 target = stack_pointer_rtx;
6306 emit_insn (gen_add3_insn (target, base, adjust));
6309 /* Copy TARGET into the stack pointer. */
6310 if (target != stack_pointer_rtx)
6311 emit_move_insn (stack_pointer_rtx, target);
6313 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6314 implicitly used by all SYMBOL_REFs. We must emit a blockage
6315 insn before restoring it. */
6316 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6317 emit_insn (gen_blockage ());
6319 /* Restore the registers. */
6320 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6323 /* Deallocate the final bit of the frame. */
6325 emit_insn (gen_add3_insn (stack_pointer_rtx,
6329 /* Add in the __builtin_eh_return stack adjustment. We need to
6330 use a temporary in mips16 code. */
6331 if (current_function_calls_eh_return)
6335 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6336 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6337 MIPS_EPILOGUE_TEMP (Pmode),
6338 EH_RETURN_STACKADJ_RTX));
6339 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6342 emit_insn (gen_add3_insn (stack_pointer_rtx,
6344 EH_RETURN_STACKADJ_RTX));
6349 /* The mips16 loads the return address into $7, not $31. */
6350 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6351 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6352 GP_REG_FIRST + 7)));
6354 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6355 GP_REG_FIRST + 31)));
6359 /* Return nonzero if this function is known to have a null epilogue.
6360 This allows the optimizer to omit jumps to jumps if no stack
6364 mips_can_use_return_insn (void)
6368 if (! reload_completed)
6371 if (regs_ever_live[31] || current_function_profile)
6374 return_type = DECL_RESULT (current_function_decl);
6376 /* In mips16 mode, a function which returns a floating point value
6377 needs to arrange to copy the return value into the floating point
6380 && mips16_hard_float
6381 && ! aggregate_value_p (return_type, current_function_decl)
6382 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6383 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6386 if (cfun->machine->frame.initialized)
6387 return cfun->machine->frame.total_size == 0;
6389 return compute_frame_size (get_frame_size ()) == 0;
6392 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6393 in order to avoid duplicating too much logic from elsewhere. */
6396 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6397 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6400 rtx this, temp1, temp2, insn, fnaddr;
6402 /* Pretend to be a post-reload pass while generating rtl. */
6404 reload_completed = 1;
6405 reset_block_changes ();
6407 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6408 for TARGET_NEWABI since the latter is a call-saved register. */
6409 if (TARGET_ABICALLS)
6410 cfun->machine->global_pointer
6411 = REGNO (pic_offset_table_rtx)
6412 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6414 /* Set up the global pointer for n32 or n64 abicalls. */
6415 mips_emit_loadgp ();
6417 /* We need two temporary registers in some cases. */
6418 temp1 = gen_rtx_REG (Pmode, 2);
6419 temp2 = gen_rtx_REG (Pmode, 3);
6421 /* Find out which register contains the "this" pointer. */
6422 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6423 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6425 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6427 /* Add DELTA to THIS. */
6430 rtx offset = GEN_INT (delta);
6431 if (!SMALL_OPERAND (delta))
6433 emit_move_insn (temp1, offset);
6436 emit_insn (gen_add3_insn (this, this, offset));
6439 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6440 if (vcall_offset != 0)
6444 /* Set TEMP1 to *THIS. */
6445 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6447 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6448 addr = mips_add_offset (temp2, temp1, vcall_offset);
6450 /* Load the offset and add it to THIS. */
6451 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6452 emit_insn (gen_add3_insn (this, this, temp1));
6455 /* Jump to the target function. Use a sibcall if direct jumps are
6456 allowed, otherwise load the address into a register first. */
6457 fnaddr = XEXP (DECL_RTL (function), 0);
6458 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6460 /* This is messy. gas treats "la $25,foo" as part of a call
6461 sequence and may allow a global "foo" to be lazily bound.
6462 The general move patterns therefore reject this combination.
6464 In this context, lazy binding would actually be OK for o32 and o64,
6465 but it's still wrong for n32 and n64; see mips_load_call_address.
6466 We must therefore load the address via a temporary register if
6467 mips_dangerous_for_la25_p.
6469 If we jump to the temporary register rather than $25, the assembler
6470 can use the move insn to fill the jump's delay slot. */
6471 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6472 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6473 mips_load_call_address (temp1, fnaddr, true);
6475 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6476 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6477 emit_jump_insn (gen_indirect_jump (temp1));
6481 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6482 SIBLING_CALL_P (insn) = 1;
6485 /* Run just enough of rest_of_compilation. This sequence was
6486 "borrowed" from alpha.c. */
6487 insn = get_insns ();
6488 insn_locators_initialize ();
6489 split_all_insns_noflow ();
6491 mips16_lay_out_constants ();
6492 shorten_branches (insn);
6493 final_start_function (insn, file, 1);
6494 final (insn, file, 1, 0);
6495 final_end_function ();
6497 /* Clean up the vars set above. Note that final_end_function resets
6498 the global pointer for us. */
6499 reload_completed = 0;
6503 /* Returns nonzero if X contains a SYMBOL_REF. */
6506 symbolic_expression_p (rtx x)
6508 if (GET_CODE (x) == SYMBOL_REF)
6511 if (GET_CODE (x) == CONST)
6512 return symbolic_expression_p (XEXP (x, 0));
6515 return symbolic_expression_p (XEXP (x, 0));
6517 if (ARITHMETIC_P (x))
6518 return (symbolic_expression_p (XEXP (x, 0))
6519 || symbolic_expression_p (XEXP (x, 1)));
6524 /* Choose the section to use for the constant rtx expression X that has
6528 mips_select_rtx_section (enum machine_mode mode, rtx x,
6529 unsigned HOST_WIDE_INT align)
6533 /* In mips16 mode, the constant table always goes in the same section
6534 as the function, so that constants can be loaded using PC relative
6536 function_section (current_function_decl);
6538 else if (TARGET_EMBEDDED_DATA)
6540 /* For embedded applications, always put constants in read-only data,
6541 in order to reduce RAM usage. */
6542 mergeable_constant_section (mode, align, 0);
6546 /* For hosted applications, always put constants in small data if
6547 possible, as this gives the best performance. */
6548 /* ??? Consider using mergeable small data sections. */
6550 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6551 && mips_section_threshold > 0)
6552 named_section (0, ".sdata", 0);
6553 else if (flag_pic && symbolic_expression_p (x))
6554 named_section (0, ".data.rel.ro", 3);
6556 mergeable_constant_section (mode, align, 0);
6560 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6562 The complication here is that, with the combination TARGET_ABICALLS
6563 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6564 therefore not be included in the read-only part of a DSO. Handle such
6565 cases by selecting a normal data section instead of a read-only one.
6566 The logic apes that in default_function_rodata_section. */
6569 mips_function_rodata_section (tree decl)
6571 if (!TARGET_ABICALLS || TARGET_GPWORD)
6572 default_function_rodata_section (decl);
6573 else if (decl && DECL_SECTION_NAME (decl))
6575 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6576 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6578 char *rname = ASTRDUP (name);
6580 named_section_real (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6582 else if (flag_function_sections && flag_data_sections
6583 && strncmp (name, ".text.", 6) == 0)
6585 char *rname = ASTRDUP (name);
6586 memcpy (rname + 1, "data", 4);
6587 named_section_flags (rname, SECTION_WRITE);
6596 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6597 access DECL using %gp_rel(...)($gp). */
6600 mips_in_small_data_p (tree decl)
6604 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6607 /* We don't yet generate small-data references for -mabicalls. See related
6608 -G handling in override_options. */
6609 if (TARGET_ABICALLS)
6612 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6616 /* Reject anything that isn't in a known small-data section. */
6617 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6618 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6621 /* If a symbol is defined externally, the assembler will use the
6622 usual -G rules when deciding how to implement macros. */
6623 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6626 else if (TARGET_EMBEDDED_DATA)
6628 /* Don't put constants into the small data section: we want them
6629 to be in ROM rather than RAM. */
6630 if (TREE_CODE (decl) != VAR_DECL)
6633 if (TREE_READONLY (decl)
6634 && !TREE_SIDE_EFFECTS (decl)
6635 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6639 size = int_size_in_bytes (TREE_TYPE (decl));
6640 return (size > 0 && size <= mips_section_threshold);
6643 /* See whether VALTYPE is a record whose fields should be returned in
6644 floating-point registers. If so, return the number of fields and
6645 list them in FIELDS (which should have two elements). Return 0
6648 For n32 & n64, a structure with one or two fields is returned in
6649 floating-point registers as long as every field has a floating-point
6653 mips_fpr_return_fields (tree valtype, tree *fields)
6661 if (TREE_CODE (valtype) != RECORD_TYPE)
6665 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6667 if (TREE_CODE (field) != FIELD_DECL)
6670 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6676 fields[i++] = field;
6682 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6683 a value in the most significant part of $2/$3 if:
6685 - the target is big-endian;
6687 - the value has a structure or union type (we generalize this to
6688 cover aggregates from other languages too); and
6690 - the structure is not returned in floating-point registers. */
6693 mips_return_in_msb (tree valtype)
6697 return (TARGET_NEWABI
6698 && TARGET_BIG_ENDIAN
6699 && AGGREGATE_TYPE_P (valtype)
6700 && mips_fpr_return_fields (valtype, fields) == 0);
6704 /* Return a composite value in a pair of floating-point registers.
6705 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6706 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6709 For n32 & n64, $f0 always holds the first value and $f2 the second.
6710 Otherwise the values are packed together as closely as possible. */
6713 mips_return_fpr_pair (enum machine_mode mode,
6714 enum machine_mode mode1, HOST_WIDE_INT offset1,
6715 enum machine_mode mode2, HOST_WIDE_INT offset2)
6719 inc = (TARGET_NEWABI ? 2 : FP_INC);
6720 return gen_rtx_PARALLEL
6723 gen_rtx_EXPR_LIST (VOIDmode,
6724 gen_rtx_REG (mode1, FP_RETURN),
6726 gen_rtx_EXPR_LIST (VOIDmode,
6727 gen_rtx_REG (mode2, FP_RETURN + inc),
6728 GEN_INT (offset2))));
6733 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6734 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6735 VALTYPE is null and MODE is the mode of the return value. */
6738 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6739 enum machine_mode mode)
6746 mode = TYPE_MODE (valtype);
6747 unsignedp = TYPE_UNSIGNED (valtype);
6749 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6750 true, we must promote the mode just as PROMOTE_MODE does. */
6751 mode = promote_mode (valtype, mode, &unsignedp, 1);
6753 /* Handle structures whose fields are returned in $f0/$f2. */
6754 switch (mips_fpr_return_fields (valtype, fields))
6757 return gen_rtx_REG (mode, FP_RETURN);
6760 return mips_return_fpr_pair (mode,
6761 TYPE_MODE (TREE_TYPE (fields[0])),
6762 int_byte_position (fields[0]),
6763 TYPE_MODE (TREE_TYPE (fields[1])),
6764 int_byte_position (fields[1]));
6767 /* If a value is passed in the most significant part of a register, see
6768 whether we have to round the mode up to a whole number of words. */
6769 if (mips_return_in_msb (valtype))
6771 HOST_WIDE_INT size = int_size_in_bytes (valtype);
6772 if (size % UNITS_PER_WORD != 0)
6774 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6775 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6779 /* For EABI, the class of return register depends entirely on MODE.
6780 For example, "struct { some_type x; }" and "union { some_type x; }"
6781 are returned in the same way as a bare "some_type" would be.
6782 Other ABIs only use FPRs for scalar, complex or vector types. */
6783 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
6784 return gen_rtx_REG (mode, GP_RETURN);
6787 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
6788 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
6789 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
6790 return gen_rtx_REG (mode, FP_RETURN);
6792 /* Handle long doubles for n32 & n64. */
6794 return mips_return_fpr_pair (mode,
6796 DImode, GET_MODE_SIZE (mode) / 2);
6798 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6799 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
6800 return mips_return_fpr_pair (mode,
6801 GET_MODE_INNER (mode), 0,
6802 GET_MODE_INNER (mode),
6803 GET_MODE_SIZE (mode) / 2);
6805 return gen_rtx_REG (mode, GP_RETURN);
6808 /* Return nonzero when an argument must be passed by reference. */
6811 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6812 enum machine_mode mode, tree type,
6813 bool named ATTRIBUTE_UNUSED)
6815 if (mips_abi == ABI_EABI)
6819 /* ??? How should SCmode be handled? */
6820 if (type == NULL_TREE || mode == DImode || mode == DFmode)
6823 size = int_size_in_bytes (type);
6824 return size == -1 || size > UNITS_PER_WORD;
6828 /* If we have a variable-sized parameter, we have no choice. */
6829 return targetm.calls.must_pass_in_stack (mode, type);
6834 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6835 enum machine_mode mode ATTRIBUTE_UNUSED,
6836 tree type ATTRIBUTE_UNUSED, bool named)
6838 return mips_abi == ABI_EABI && named;
6841 /* Return the class of registers for which a mode change from FROM to TO
6844 In little-endian mode, the hi-lo registers are numbered backwards,
6845 so (subreg:SI (reg:DI hi) 0) gets the high word instead of the low
6848 Similarly, when using paired floating-point registers, the first
6849 register holds the low word, regardless of endianness. So in big
6850 endian mode, (subreg:SI (reg:DF $f0) 0) does not get the high word
6853 Also, loading a 32-bit value into a 64-bit floating-point register
6854 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
6855 We can't allow 64-bit float registers to change from a 32-bit
6856 mode to a 64-bit mode. */
6859 mips_cannot_change_mode_class (enum machine_mode from,
6860 enum machine_mode to, enum reg_class class)
6862 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
6864 if (TARGET_BIG_ENDIAN)
6865 return reg_classes_intersect_p (FP_REGS, class);
6867 return reg_classes_intersect_p (HI_AND_FP_REGS, class);
6868 return reg_classes_intersect_p (HI_REG, class);
6873 /* Return true if X should not be moved directly into register $25.
6874 We need this because many versions of GAS will treat "la $25,foo" as
6875 part of a call sequence and so allow a global "foo" to be lazily bound. */
6878 mips_dangerous_for_la25_p (rtx x)
6880 HOST_WIDE_INT offset;
6882 if (TARGET_EXPLICIT_RELOCS)
6885 mips_split_const (x, &x, &offset);
6886 return global_got_operand (x, VOIDmode);
6889 /* Implement PREFERRED_RELOAD_CLASS. */
6892 mips_preferred_reload_class (rtx x, enum reg_class class)
6894 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
6897 if (TARGET_HARD_FLOAT
6898 && FLOAT_MODE_P (GET_MODE (x))
6899 && reg_class_subset_p (FP_REGS, class))
6902 if (reg_class_subset_p (GR_REGS, class))
6905 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
6911 /* This function returns the register class required for a secondary
6912 register when copying between one of the registers in CLASS, and X,
6913 using MODE. If IN_P is nonzero, the copy is going from X to the
6914 register, otherwise the register is the source. A return value of
6915 NO_REGS means that no secondary register is required. */
6918 mips_secondary_reload_class (enum reg_class class,
6919 enum machine_mode mode, rtx x, int in_p)
6921 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
6925 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
6926 regno = true_regnum (x);
6928 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
6930 if (mips_dangerous_for_la25_p (x))
6933 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
6937 /* Copying from HI or LO to anywhere other than a general register
6938 requires a general register. */
6939 if (class == HI_REG || class == LO_REG || class == MD_REGS)
6941 if (TARGET_MIPS16 && in_p)
6943 /* We can't really copy to HI or LO at all in mips16 mode. */
6946 return gp_reg_p ? NO_REGS : gr_regs;
6948 if (MD_REG_P (regno))
6950 if (TARGET_MIPS16 && ! in_p)
6952 /* We can't really copy to HI or LO at all in mips16 mode. */
6955 return class == gr_regs ? NO_REGS : gr_regs;
6958 /* We can only copy a value to a condition code register from a
6959 floating point register, and even then we require a scratch
6960 floating point register. We can only copy a value out of a
6961 condition code register into a general register. */
6962 if (class == ST_REGS)
6966 return gp_reg_p ? NO_REGS : gr_regs;
6968 if (ST_REG_P (regno))
6972 return class == gr_regs ? NO_REGS : gr_regs;
6975 if (class == FP_REGS)
6977 if (GET_CODE (x) == MEM)
6979 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
6982 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
6984 /* We can use the l.s and l.d macros to load floating-point
6985 constants. ??? For l.s, we could probably get better
6986 code by returning GR_REGS here. */
6989 else if (gp_reg_p || x == CONST0_RTX (mode))
6991 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
6994 else if (FP_REG_P (regno))
6996 /* In this case we can use mov.s or mov.d. */
7001 /* Otherwise, we need to reload through an integer register. */
7006 /* In mips16 mode, going between memory and anything but M16_REGS
7007 requires an M16_REG. */
7010 if (class != M16_REGS && class != M16_NA_REGS)
7018 if (class == M16_REGS || class == M16_NA_REGS)
7027 /* Implement CLASS_MAX_NREGS.
7029 Usually all registers are word-sized. The only supported exception
7030 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7031 registers. A word-based calculation is correct even in that case,
7032 since -msingle-float disallows multi-FPR values.
7034 The FP status registers are an exception to this rule. They are always
7035 4 bytes wide as they only hold condition code modes, and CCmode is always
7036 considered to be 4 bytes wide. */
7039 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7040 enum machine_mode mode)
7042 if (class == ST_REGS)
7043 return (GET_MODE_SIZE (mode) + 3) / 4;
7045 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7049 mips_valid_pointer_mode (enum machine_mode mode)
7051 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7054 /* Target hook for vector_mode_supported_p. */
7056 mips_vector_mode_supported_p (enum machine_mode mode)
7058 if (mode == V2SFmode && TARGET_PAIRED_SINGLE_FLOAT)
7064 /* If we can access small data directly (using gp-relative relocation
7065 operators) return the small data pointer, otherwise return null.
7067 For each mips16 function which refers to GP relative symbols, we
7068 use a pseudo register, initialized at the start of the function, to
7069 hold the $gp value. */
7072 mips16_gp_pseudo_reg (void)
7074 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7079 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7081 /* We want to initialize this to a value which gcc will believe
7084 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7085 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7086 gen_rtx_CONST (Pmode, unspec));
7087 insn = get_insns ();
7090 push_topmost_sequence ();
7091 /* We need to emit the initialization after the FUNCTION_BEG
7092 note, so that it will be integrated. */
7093 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7094 if (GET_CODE (scan) == NOTE
7095 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7097 if (scan == NULL_RTX)
7098 scan = get_insns ();
7099 insn = emit_insn_after (insn, scan);
7100 pop_topmost_sequence ();
7103 return cfun->machine->mips16_gp_pseudo_rtx;
7106 /* Write out code to move floating point arguments in or out of
7107 general registers. Output the instructions to FILE. FP_CODE is
7108 the code describing which arguments are present (see the comment at
7109 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7110 we are copying from the floating point registers. */
7113 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7119 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7120 gcc_assert (TARGET_OLDABI);
7126 gparg = GP_ARG_FIRST;
7127 fparg = FP_ARG_FIRST;
7128 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7132 if ((fparg & 1) != 0)
7134 fprintf (file, "\t%s\t%s,%s\n", s,
7135 reg_names[gparg], reg_names[fparg]);
7137 else if ((f & 3) == 2)
7140 fprintf (file, "\td%s\t%s,%s\n", s,
7141 reg_names[gparg], reg_names[fparg]);
7144 if ((fparg & 1) != 0)
7146 if (TARGET_BIG_ENDIAN)
7147 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7148 reg_names[gparg], reg_names[fparg + 1], s,
7149 reg_names[gparg + 1], reg_names[fparg]);
7151 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7152 reg_names[gparg], reg_names[fparg], s,
7153 reg_names[gparg + 1], reg_names[fparg + 1]);
7166 /* Build a mips16 function stub. This is used for functions which
7167 take arguments in the floating point registers. It is 32 bit code
7168 that moves the floating point args into the general registers, and
7169 then jumps to the 16 bit code. */
7172 build_mips16_function_stub (FILE *file)
7175 char *secname, *stubname;
7176 tree stubid, stubdecl;
7180 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7181 secname = (char *) alloca (strlen (fnname) + 20);
7182 sprintf (secname, ".mips16.fn.%s", fnname);
7183 stubname = (char *) alloca (strlen (fnname) + 20);
7184 sprintf (stubname, "__fn_stub_%s", fnname);
7185 stubid = get_identifier (stubname);
7186 stubdecl = build_decl (FUNCTION_DECL, stubid,
7187 build_function_type (void_type_node, NULL_TREE));
7188 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7190 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7192 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7194 fprintf (file, "%s%s",
7195 need_comma ? ", " : "",
7196 (f & 3) == 1 ? "float" : "double");
7199 fprintf (file, ")\n");
7201 fprintf (file, "\t.set\tnomips16\n");
7202 function_section (stubdecl);
7203 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7205 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7206 within a .ent, and we cannot emit another .ent. */
7207 if (!FUNCTION_NAME_ALREADY_DECLARED)
7209 fputs ("\t.ent\t", file);
7210 assemble_name (file, stubname);
7214 assemble_name (file, stubname);
7215 fputs (":\n", file);
7217 /* We don't want the assembler to insert any nops here. */
7218 fprintf (file, "\t.set\tnoreorder\n");
7220 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7222 fprintf (asm_out_file, "\t.set\tnoat\n");
7223 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7224 assemble_name (file, fnname);
7225 fprintf (file, "\n");
7226 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7227 fprintf (asm_out_file, "\t.set\tat\n");
7229 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7230 with one of the mfc1 instructions, because the result is not
7231 available for one instruction, so if the very first instruction
7232 in the function refers to the register, it will see the wrong
7234 fprintf (file, "\tnop\n");
7236 fprintf (file, "\t.set\treorder\n");
7238 if (!FUNCTION_NAME_ALREADY_DECLARED)
7240 fputs ("\t.end\t", file);
7241 assemble_name (file, stubname);
7245 fprintf (file, "\t.set\tmips16\n");
7247 function_section (current_function_decl);
7250 /* We keep a list of functions for which we have already built stubs
7251 in build_mips16_call_stub. */
7255 struct mips16_stub *next;
7260 static struct mips16_stub *mips16_stubs;
7262 /* Build a call stub for a mips16 call. A stub is needed if we are
7263 passing any floating point values which should go into the floating
7264 point registers. If we are, and the call turns out to be to a 32
7265 bit function, the stub will be used to move the values into the
7266 floating point registers before calling the 32 bit function. The
7267 linker will magically adjust the function call to either the 16 bit
7268 function or the 32 bit stub, depending upon where the function call
7269 is actually defined.
7271 Similarly, we need a stub if the return value might come back in a
7272 floating point register.
7274 RETVAL is the location of the return value, or null if this is
7275 a call rather than a call_value. FN is the address of the
7276 function and ARG_SIZE is the size of the arguments. FP_CODE
7277 is the code built by function_arg. This function returns a nonzero
7278 value if it builds the call instruction itself. */
7281 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7285 char *secname, *stubname;
7286 struct mips16_stub *l;
7287 tree stubid, stubdecl;
7291 /* We don't need to do anything if we aren't in mips16 mode, or if
7292 we were invoked with the -msoft-float option. */
7293 if (! TARGET_MIPS16 || ! mips16_hard_float)
7296 /* Figure out whether the value might come back in a floating point
7298 fpret = (retval != 0
7299 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7300 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7302 /* We don't need to do anything if there were no floating point
7303 arguments and the value will not be returned in a floating point
7305 if (fp_code == 0 && ! fpret)
7308 /* We don't need to do anything if this is a call to a special
7309 mips16 support function. */
7310 if (GET_CODE (fn) == SYMBOL_REF
7311 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7314 /* This code will only work for o32 and o64 abis. The other ABI's
7315 require more sophisticated support. */
7316 gcc_assert (TARGET_OLDABI);
7318 /* We can only handle SFmode and DFmode floating point return
7321 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
7323 /* If we're calling via a function pointer, then we must always call
7324 via a stub. There are magic stubs provided in libgcc.a for each
7325 of the required cases. Each of them expects the function address
7326 to arrive in register $2. */
7328 if (GET_CODE (fn) != SYMBOL_REF)
7334 /* ??? If this code is modified to support other ABI's, we need
7335 to handle PARALLEL return values here. */
7337 sprintf (buf, "__mips16_call_stub_%s%d",
7339 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7342 id = get_identifier (buf);
7343 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7345 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7347 if (retval == NULL_RTX)
7348 insn = gen_call_internal (stub_fn, arg_size);
7350 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7351 insn = emit_call_insn (insn);
7353 /* Put the register usage information on the CALL. */
7354 CALL_INSN_FUNCTION_USAGE (insn) =
7355 gen_rtx_EXPR_LIST (VOIDmode,
7356 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7357 CALL_INSN_FUNCTION_USAGE (insn));
7359 /* If we are handling a floating point return value, we need to
7360 save $18 in the function prologue. Putting a note on the
7361 call will mean that regs_ever_live[$18] will be true if the
7362 call is not eliminated, and we can check that in the prologue
7365 CALL_INSN_FUNCTION_USAGE (insn) =
7366 gen_rtx_EXPR_LIST (VOIDmode,
7367 gen_rtx_USE (VOIDmode,
7368 gen_rtx_REG (word_mode, 18)),
7369 CALL_INSN_FUNCTION_USAGE (insn));
7371 /* Return 1 to tell the caller that we've generated the call
7376 /* We know the function we are going to call. If we have already
7377 built a stub, we don't need to do anything further. */
7379 fnname = XSTR (fn, 0);
7380 for (l = mips16_stubs; l != NULL; l = l->next)
7381 if (strcmp (l->name, fnname) == 0)
7386 /* Build a special purpose stub. When the linker sees a
7387 function call in mips16 code, it will check where the target
7388 is defined. If the target is a 32 bit call, the linker will
7389 search for the section defined here. It can tell which
7390 symbol this section is associated with by looking at the
7391 relocation information (the name is unreliable, since this
7392 might be a static function). If such a section is found, the
7393 linker will redirect the call to the start of the magic
7396 If the function does not return a floating point value, the
7397 special stub section is named
7400 If the function does return a floating point value, the stub
7402 .mips16.call.fp.FNNAME
7405 secname = (char *) alloca (strlen (fnname) + 40);
7406 sprintf (secname, ".mips16.call.%s%s",
7409 stubname = (char *) alloca (strlen (fnname) + 20);
7410 sprintf (stubname, "__call_stub_%s%s",
7413 stubid = get_identifier (stubname);
7414 stubdecl = build_decl (FUNCTION_DECL, stubid,
7415 build_function_type (void_type_node, NULL_TREE));
7416 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7418 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7420 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7424 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7426 fprintf (asm_out_file, "%s%s",
7427 need_comma ? ", " : "",
7428 (f & 3) == 1 ? "float" : "double");
7431 fprintf (asm_out_file, ")\n");
7433 fprintf (asm_out_file, "\t.set\tnomips16\n");
7434 assemble_start_function (stubdecl, stubname);
7436 if (!FUNCTION_NAME_ALREADY_DECLARED)
7438 fputs ("\t.ent\t", asm_out_file);
7439 assemble_name (asm_out_file, stubname);
7440 fputs ("\n", asm_out_file);
7442 assemble_name (asm_out_file, stubname);
7443 fputs (":\n", asm_out_file);
7446 /* We build the stub code by hand. That's the only way we can
7447 do it, since we can't generate 32 bit code during a 16 bit
7450 /* We don't want the assembler to insert any nops here. */
7451 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7453 mips16_fp_args (asm_out_file, fp_code, 0);
7457 fprintf (asm_out_file, "\t.set\tnoat\n");
7458 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7460 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7461 fprintf (asm_out_file, "\t.set\tat\n");
7462 /* Unfortunately, we can't fill the jump delay slot. We
7463 can't fill with one of the mtc1 instructions, because the
7464 result is not available for one instruction, so if the
7465 very first instruction in the function refers to the
7466 register, it will see the wrong value. */
7467 fprintf (asm_out_file, "\tnop\n");
7471 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7472 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7473 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7474 /* As above, we can't fill the delay slot. */
7475 fprintf (asm_out_file, "\tnop\n");
7476 if (GET_MODE (retval) == SFmode)
7477 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7478 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7481 if (TARGET_BIG_ENDIAN)
7483 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7484 reg_names[GP_REG_FIRST + 2],
7485 reg_names[FP_REG_FIRST + 1]);
7486 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7487 reg_names[GP_REG_FIRST + 3],
7488 reg_names[FP_REG_FIRST + 0]);
7492 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7493 reg_names[GP_REG_FIRST + 2],
7494 reg_names[FP_REG_FIRST + 0]);
7495 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7496 reg_names[GP_REG_FIRST + 3],
7497 reg_names[FP_REG_FIRST + 1]);
7500 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7501 /* As above, we can't fill the delay slot. */
7502 fprintf (asm_out_file, "\tnop\n");
7505 fprintf (asm_out_file, "\t.set\treorder\n");
7507 #ifdef ASM_DECLARE_FUNCTION_SIZE
7508 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7511 if (!FUNCTION_NAME_ALREADY_DECLARED)
7513 fputs ("\t.end\t", asm_out_file);
7514 assemble_name (asm_out_file, stubname);
7515 fputs ("\n", asm_out_file);
7518 fprintf (asm_out_file, "\t.set\tmips16\n");
7520 /* Record this stub. */
7521 l = (struct mips16_stub *) xmalloc (sizeof *l);
7522 l->name = xstrdup (fnname);
7524 l->next = mips16_stubs;
7528 /* If we expect a floating point return value, but we've built a
7529 stub which does not expect one, then we're in trouble. We can't
7530 use the existing stub, because it won't handle the floating point
7531 value. We can't build a new stub, because the linker won't know
7532 which stub to use for the various calls in this object file.
7533 Fortunately, this case is illegal, since it means that a function
7534 was declared in two different ways in a single compilation. */
7535 if (fpret && ! l->fpret)
7536 error ("cannot handle inconsistent calls to `%s'", fnname);
7538 /* If we are calling a stub which handles a floating point return
7539 value, we need to arrange to save $18 in the prologue. We do
7540 this by marking the function call as using the register. The
7541 prologue will later see that it is used, and emit code to save
7548 if (retval == NULL_RTX)
7549 insn = gen_call_internal (fn, arg_size);
7551 insn = gen_call_value_internal (retval, fn, arg_size);
7552 insn = emit_call_insn (insn);
7554 CALL_INSN_FUNCTION_USAGE (insn) =
7555 gen_rtx_EXPR_LIST (VOIDmode,
7556 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7557 CALL_INSN_FUNCTION_USAGE (insn));
7559 /* Return 1 to tell the caller that we've generated the call
7564 /* Return 0 to let the caller generate the call insn. */
7568 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7569 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7571 struct mips16_constant {
7572 struct mips16_constant *next;
7575 enum machine_mode mode;
7578 /* Information about an incomplete mips16 constant pool. FIRST is the
7579 first constant, HIGHEST_ADDRESS is the highest address that the first
7580 byte of the pool can have, and INSN_ADDRESS is the current instruction
7583 struct mips16_constant_pool {
7584 struct mips16_constant *first;
7585 int highest_address;
7589 /* Add constant VALUE to POOL and return its label. MODE is the
7590 value's mode (used for CONST_INTs, etc.). */
7593 add_constant (struct mips16_constant_pool *pool,
7594 rtx value, enum machine_mode mode)
7596 struct mips16_constant **p, *c;
7597 bool first_of_size_p;
7599 /* See whether the constant is already in the pool. If so, return the
7600 existing label, otherwise leave P pointing to the place where the
7601 constant should be added.
7603 Keep the pool sorted in increasing order of mode size so that we can
7604 reduce the number of alignments needed. */
7605 first_of_size_p = true;
7606 for (p = &pool->first; *p != 0; p = &(*p)->next)
7608 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7610 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7612 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7613 first_of_size_p = false;
7616 /* In the worst case, the constant needed by the earliest instruction
7617 will end up at the end of the pool. The entire pool must then be
7618 accessible from that instruction.
7620 When adding the first constant, set the pool's highest address to
7621 the address of the first out-of-range byte. Adjust this address
7622 downwards each time a new constant is added. */
7623 if (pool->first == 0)
7624 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7625 is the address of the instruction with the lowest two bits clear.
7626 The base PC value for ld has the lowest three bits clear. Assume
7627 the worst case here. */
7628 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7629 pool->highest_address -= GET_MODE_SIZE (mode);
7630 if (first_of_size_p)
7631 /* Take into account the worst possible padding due to alignment. */
7632 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7634 /* Create a new entry. */
7635 c = (struct mips16_constant *) xmalloc (sizeof *c);
7638 c->label = gen_label_rtx ();
7645 /* Output constant VALUE after instruction INSN and return the last
7646 instruction emitted. MODE is the mode of the constant. */
7649 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7651 switch (GET_MODE_CLASS (mode))
7655 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7656 return emit_insn_after (gen_consttable_int (value, size), insn);
7660 return emit_insn_after (gen_consttable_float (value), insn);
7662 case MODE_VECTOR_FLOAT:
7663 case MODE_VECTOR_INT:
7666 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7667 insn = dump_constants_1 (GET_MODE_INNER (mode),
7668 CONST_VECTOR_ELT (value, i), insn);
7678 /* Dump out the constants in CONSTANTS after INSN. */
7681 dump_constants (struct mips16_constant *constants, rtx insn)
7683 struct mips16_constant *c, *next;
7687 for (c = constants; c != NULL; c = next)
7689 /* If necessary, increase the alignment of PC. */
7690 if (align < GET_MODE_SIZE (c->mode))
7692 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7693 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7695 align = GET_MODE_SIZE (c->mode);
7697 insn = emit_label_after (c->label, insn);
7698 insn = dump_constants_1 (c->mode, c->value, insn);
7704 emit_barrier_after (insn);
7707 /* Return the length of instruction INSN.
7709 ??? MIPS16 switch tables go in .text, but we don't define
7710 JUMP_TABLES_IN_TEXT_SECTION, so get_attr_length will not
7711 compute their lengths correctly. */
7714 mips16_insn_length (rtx insn)
7716 if (GET_CODE (insn) == JUMP_INSN)
7718 rtx body = PATTERN (insn);
7719 if (GET_CODE (body) == ADDR_VEC)
7720 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
7721 if (GET_CODE (body) == ADDR_DIFF_VEC)
7722 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
7724 return get_attr_length (insn);
7727 /* Rewrite *X so that constant pool references refer to the constant's
7728 label instead. DATA points to the constant pool structure. */
7731 mips16_rewrite_pool_refs (rtx *x, void *data)
7733 struct mips16_constant_pool *pool = data;
7734 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
7735 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
7736 get_pool_constant (*x),
7737 get_pool_mode (*x)));
7741 /* Build MIPS16 constant pools. */
7744 mips16_lay_out_constants (void)
7746 struct mips16_constant_pool pool;
7750 memset (&pool, 0, sizeof (pool));
7751 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7753 /* Rewrite constant pool references in INSN. */
7755 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
7757 pool.insn_address += mips16_insn_length (insn);
7759 if (pool.first != NULL)
7761 /* If there are no natural barriers between the first user of
7762 the pool and the highest acceptable address, we'll need to
7763 create a new instruction to jump around the constant pool.
7764 In the worst case, this instruction will be 4 bytes long.
7766 If it's too late to do this transformation after INSN,
7767 do it immediately before INSN. */
7768 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
7772 label = gen_label_rtx ();
7774 jump = emit_jump_insn_before (gen_jump (label), insn);
7775 JUMP_LABEL (jump) = label;
7776 LABEL_NUSES (label) = 1;
7777 barrier = emit_barrier_after (jump);
7779 emit_label_after (label, barrier);
7780 pool.insn_address += 4;
7783 /* See whether the constant pool is now out of range of the first
7784 user. If so, output the constants after the previous barrier.
7785 Note that any instructions between BARRIER and INSN (inclusive)
7786 will use negative offsets to refer to the pool. */
7787 if (pool.insn_address > pool.highest_address)
7789 dump_constants (pool.first, barrier);
7793 else if (BARRIER_P (insn))
7797 dump_constants (pool.first, get_last_insn ());
7800 /* A temporary variable used by for_each_rtx callbacks, etc. */
7801 static rtx mips_sim_insn;
7803 /* A structure representing the state of the processor pipeline.
7804 Used by the mips_sim_* family of functions. */
7806 /* The maximum number of instructions that can be issued in a cycle.
7807 (Caches mips_issue_rate.) */
7808 unsigned int issue_rate;
7810 /* The current simulation time. */
7813 /* How many more instructions can be issued in the current cycle. */
7814 unsigned int insns_left;
7816 /* LAST_SET[X].INSN is the last instruction to set register X.
7817 LAST_SET[X].TIME is the time at which that instruction was issued.
7818 INSN is null if no instruction has yet set register X. */
7822 } last_set[FIRST_PSEUDO_REGISTER];
7824 /* The pipeline's current DFA state. */
7828 /* Reset STATE to the initial simulation state. */
7831 mips_sim_reset (struct mips_sim *state)
7834 state->insns_left = state->issue_rate;
7835 memset (&state->last_set, 0, sizeof (state->last_set));
7836 state_reset (state->dfa_state);
7839 /* Initialize STATE before its first use. DFA_STATE points to an
7840 allocated but uninitialized DFA state. */
7843 mips_sim_init (struct mips_sim *state, state_t dfa_state)
7845 state->issue_rate = mips_issue_rate ();
7846 state->dfa_state = dfa_state;
7847 mips_sim_reset (state);
7850 /* Advance STATE by one clock cycle. */
7853 mips_sim_next_cycle (struct mips_sim *state)
7856 state->insns_left = state->issue_rate;
7857 state_transition (state->dfa_state, 0);
7860 /* Advance simulation state STATE until instruction INSN can read
7864 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
7868 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
7869 if (state->last_set[REGNO (reg) + i].insn != 0)
7873 t = state->last_set[REGNO (reg) + i].time;
7874 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
7875 while (state->time < t)
7876 mips_sim_next_cycle (state);
7880 /* A for_each_rtx callback. If *X is a register, advance simulation state
7881 DATA until mips_sim_insn can read the register's value. */
7884 mips_sim_wait_regs_2 (rtx *x, void *data)
7887 mips_sim_wait_reg (data, mips_sim_insn, *x);
7891 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
7894 mips_sim_wait_regs_1 (rtx *x, void *data)
7896 for_each_rtx (x, mips_sim_wait_regs_2, data);
7899 /* Advance simulation state STATE until all of INSN's register
7900 dependencies are satisfied. */
7903 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
7905 mips_sim_insn = insn;
7906 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
7909 /* Advance simulation state STATE until the units required by
7910 instruction INSN are available. */
7913 mips_sim_wait_units (struct mips_sim *state, rtx insn)
7917 tmp_state = alloca (state_size ());
7918 while (state->insns_left == 0
7919 || (memcpy (tmp_state, state->dfa_state, state_size ()),
7920 state_transition (tmp_state, insn) >= 0))
7921 mips_sim_next_cycle (state);
7924 /* Advance simulation state STATE until INSN is ready to issue. */
7927 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
7929 mips_sim_wait_regs (state, insn);
7930 mips_sim_wait_units (state, insn);
7933 /* mips_sim_insn has just set X. Update the LAST_SET array
7934 in simulation state DATA. */
7937 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
7939 struct mips_sim *state;
7944 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
7946 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
7947 state->last_set[REGNO (x) + i].time = state->time;
7951 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
7952 can issue immediately (i.e., that mips_sim_wait_insn has already
7956 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
7958 state_transition (state->dfa_state, insn);
7959 state->insns_left--;
7961 mips_sim_insn = insn;
7962 note_stores (PATTERN (insn), mips_sim_record_set, state);
7965 /* Simulate issuing a NOP in state STATE. */
7968 mips_sim_issue_nop (struct mips_sim *state)
7970 if (state->insns_left == 0)
7971 mips_sim_next_cycle (state);
7972 state->insns_left--;
7975 /* Update simulation state STATE so that it's ready to accept the instruction
7976 after INSN. INSN should be part of the main rtl chain, not a member of a
7980 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
7982 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
7984 mips_sim_issue_nop (state);
7986 switch (GET_CODE (SEQ_BEGIN (insn)))
7990 /* We can't predict the processor state after a call or label. */
7991 mips_sim_reset (state);
7995 /* The delay slots of branch likely instructions are only executed
7996 when the branch is taken. Therefore, if the caller has simulated
7997 the delay slot instruction, STATE does not really reflect the state
7998 of the pipeline for the instruction after the delay slot. Also,
7999 branch likely instructions tend to incur a penalty when not taken,
8000 so there will probably be an extra delay between the branch and
8001 the instruction after the delay slot. */
8002 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8003 mips_sim_reset (state);
8011 /* The VR4130 pipeline issues aligned pairs of instructions together,
8012 but it stalls the second instruction if it depends on the first.
8013 In order to cut down the amount of logic required, this dependence
8014 check is not based on a full instruction decode. Instead, any non-SPECIAL
8015 instruction is assumed to modify the register specified by bits 20-16
8016 (which is usually the "rt" field).
8018 In beq, beql, bne and bnel instructions, the rt field is actually an
8019 input, so we can end up with a false dependence between the branch
8020 and its delay slot. If this situation occurs in instruction INSN,
8021 try to avoid it by swapping rs and rt. */
8024 vr4130_avoid_branch_rt_conflict (rtx insn)
8028 first = SEQ_BEGIN (insn);
8029 second = SEQ_END (insn);
8030 if (GET_CODE (first) == JUMP_INSN
8031 && GET_CODE (second) == INSN
8032 && GET_CODE (PATTERN (first)) == SET
8033 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8034 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8036 /* Check for the right kind of condition. */
8037 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8038 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8039 && REG_P (XEXP (cond, 0))
8040 && REG_P (XEXP (cond, 1))
8041 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8042 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8044 /* SECOND mentions the rt register but not the rs register. */
8045 rtx tmp = XEXP (cond, 0);
8046 XEXP (cond, 0) = XEXP (cond, 1);
8047 XEXP (cond, 1) = tmp;
8052 /* Implement -mvr4130-align. Go through each basic block and simulate the
8053 processor pipeline. If we find that a pair of instructions could execute
8054 in parallel, and the first of those instruction is not 8-byte aligned,
8055 insert a nop to make it aligned. */
8058 vr4130_align_insns (void)
8060 struct mips_sim state;
8061 rtx insn, subinsn, last, last2, next;
8066 /* LAST is the last instruction before INSN to have a nonzero length.
8067 LAST2 is the last such instruction before LAST. */
8071 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8074 mips_sim_init (&state, alloca (state_size ()));
8075 for (insn = get_insns (); insn != 0; insn = next)
8077 unsigned int length;
8079 next = NEXT_INSN (insn);
8081 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8082 This isn't really related to the alignment pass, but we do it on
8083 the fly to avoid a separate instruction walk. */
8084 vr4130_avoid_branch_rt_conflict (insn);
8086 if (USEFUL_INSN_P (insn))
8087 FOR_EACH_SUBINSN (subinsn, insn)
8089 mips_sim_wait_insn (&state, subinsn);
8091 /* If we want this instruction to issue in parallel with the
8092 previous one, make sure that the previous instruction is
8093 aligned. There are several reasons why this isn't worthwhile
8094 when the second instruction is a call:
8096 - Calls are less likely to be performance critical,
8097 - There's a good chance that the delay slot can execute
8098 in parallel with the call.
8099 - The return address would then be unaligned.
8101 In general, if we're going to insert a nop between instructions
8102 X and Y, it's better to insert it immediately after X. That
8103 way, if the nop makes Y aligned, it will also align any labels
8105 if (state.insns_left != state.issue_rate
8106 && GET_CODE (subinsn) != CALL_INSN)
8108 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8110 /* SUBINSN is the first instruction in INSN and INSN is
8111 aligned. We want to align the previous instruction
8112 instead, so insert a nop between LAST2 and LAST.
8114 Note that LAST could be either a single instruction
8115 or a branch with a delay slot. In the latter case,
8116 LAST, like INSN, is already aligned, but the delay
8117 slot must have some extra delay that stops it from
8118 issuing at the same time as the branch. We therefore
8119 insert a nop before the branch in order to align its
8121 emit_insn_after (gen_nop (), last2);
8124 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8126 /* SUBINSN is the delay slot of INSN, but INSN is
8127 currently unaligned. Insert a nop between
8128 LAST and INSN to align it. */
8129 emit_insn_after (gen_nop (), last);
8133 mips_sim_issue_insn (&state, subinsn);
8135 mips_sim_finish_insn (&state, insn);
8137 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8138 length = get_attr_length (insn);
8141 /* If the instruction is an asm statement or multi-instruction
8142 mips.md patern, the length is only an estimate. Insert an
8143 8 byte alignment after it so that the following instructions
8144 can be handled correctly. */
8145 if (GET_CODE (SEQ_BEGIN (insn)) == INSN
8146 && (recog_memoized (insn) < 0 || length >= 8))
8148 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8149 next = NEXT_INSN (next);
8150 mips_sim_next_cycle (&state);
8153 else if (length & 4)
8154 aligned_p = !aligned_p;
8159 /* See whether INSN is an aligned label. */
8160 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8166 /* Subroutine of mips_reorg. If there is a hazard between INSN
8167 and a previous instruction, avoid it by inserting nops after
8170 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8171 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8172 before using the value of that register. *HILO_DELAY counts the
8173 number of instructions since the last hilo hazard (that is,
8174 the number of instructions since the last mflo or mfhi).
8176 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8177 for the next instruction.
8179 LO_REG is an rtx for the LO register, used in dependence checking. */
8182 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8183 rtx *delayed_reg, rtx lo_reg)
8191 pattern = PATTERN (insn);
8193 /* Do not put the whole function in .set noreorder if it contains
8194 an asm statement. We don't know whether there will be hazards
8195 between the asm statement and the gcc-generated code. */
8196 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8197 cfun->machine->all_noreorder_p = false;
8199 /* Ignore zero-length instructions (barriers and the like). */
8200 ninsns = get_attr_length (insn) / 4;
8204 /* Work out how many nops are needed. Note that we only care about
8205 registers that are explicitly mentioned in the instruction's pattern.
8206 It doesn't matter that calls use the argument registers or that they
8207 clobber hi and lo. */
8208 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8209 nops = 2 - *hilo_delay;
8210 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8215 /* Insert the nops between this instruction and the previous one.
8216 Each new nop takes us further from the last hilo hazard. */
8217 *hilo_delay += nops;
8219 emit_insn_after (gen_hazard_nop (), after);
8221 /* Set up the state for the next instruction. */
8222 *hilo_delay += ninsns;
8224 if (INSN_CODE (insn) >= 0)
8225 switch (get_attr_hazard (insn))
8235 set = single_set (insn);
8236 gcc_assert (set != 0);
8237 *delayed_reg = SET_DEST (set);
8243 /* Go through the instruction stream and insert nops where necessary.
8244 See if the whole function can then be put into .set noreorder &
8248 mips_avoid_hazards (void)
8250 rtx insn, last_insn, lo_reg, delayed_reg;
8253 /* Force all instructions to be split into their final form. */
8254 split_all_insns_noflow ();
8256 /* Recalculate instruction lengths without taking nops into account. */
8257 cfun->machine->ignore_hazard_length_p = true;
8258 shorten_branches (get_insns ());
8260 /* The profiler code uses assembler macros. -mfix-vr4120 relies on
8261 assembler nop insertion. */
8262 cfun->machine->all_noreorder_p = (!current_function_profile
8263 && !TARGET_FIX_VR4120);
8268 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8270 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8273 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8274 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8275 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8276 &hilo_delay, &delayed_reg, lo_reg);
8278 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8279 &delayed_reg, lo_reg);
8286 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8292 mips16_lay_out_constants ();
8293 else if (TARGET_EXPLICIT_RELOCS)
8295 if (mips_flag_delayed_branch)
8296 dbr_schedule (get_insns (), dump_file);
8297 mips_avoid_hazards ();
8298 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8299 vr4130_align_insns ();
8303 /* This function does three things:
8305 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8306 - Register the mips16 hardware floating point stubs.
8307 - Register the gofast functions if selected using --enable-gofast. */
8309 #include "config/gofast.h"
8312 mips_init_libfuncs (void)
8314 if (TARGET_FIX_VR4120)
8316 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8317 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8320 if (TARGET_MIPS16 && mips16_hard_float)
8322 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8323 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8324 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8325 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8327 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8328 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8329 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8330 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8331 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8332 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8334 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8335 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8337 if (TARGET_DOUBLE_FLOAT)
8339 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8340 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8341 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8342 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8344 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8345 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8346 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8347 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8348 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8349 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8351 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8352 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8354 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8355 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8359 gofast_maybe_init_libfuncs ();
8362 /* Return a number assessing the cost of moving a register in class
8363 FROM to class TO. The classes are expressed using the enumeration
8364 values such as `GENERAL_REGS'. A value of 2 is the default; other
8365 values are interpreted relative to that.
8367 It is not required that the cost always equal 2 when FROM is the
8368 same as TO; on some machines it is expensive to move between
8369 registers if they are not general registers.
8371 If reload sees an insn consisting of a single `set' between two
8372 hard registers, and if `REGISTER_MOVE_COST' applied to their
8373 classes returns a value of 2, reload does not check to ensure that
8374 the constraints of the insn are met. Setting a cost of other than
8375 2 will allow reload to verify that the constraints are met. You
8376 should do this if the `movM' pattern's constraints do not allow
8379 ??? We make the cost of moving from HI/LO into general
8380 registers the same as for one of moving general registers to
8381 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8382 pseudo to HI/LO. This might hurt optimizations though, it
8383 isn't clear if it is wise. And it might not work in all cases. We
8384 could solve the DImode LO reg problem by using a multiply, just
8385 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8386 problem by using divide instructions. divu puts the remainder in
8387 the HI reg, so doing a divide by -1 will move the value in the HI
8388 reg for all values except -1. We could handle that case by using a
8389 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8390 a compare/branch to test the input value to see which instruction
8391 we need to use. This gets pretty messy, but it is feasible. */
8394 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8395 enum reg_class to, enum reg_class from)
8397 if (from == M16_REGS && GR_REG_CLASS_P (to))
8399 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8401 else if (GR_REG_CLASS_P (from))
8405 else if (to == M16_NA_REGS)
8407 else if (GR_REG_CLASS_P (to))
8414 else if (to == FP_REGS)
8416 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8423 else if (COP_REG_CLASS_P (to))
8427 } /* GR_REG_CLASS_P (from) */
8428 else if (from == FP_REGS)
8430 if (GR_REG_CLASS_P (to))
8432 else if (to == FP_REGS)
8434 else if (to == ST_REGS)
8436 } /* from == FP_REGS */
8437 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8439 if (GR_REG_CLASS_P (to))
8446 } /* from == HI_REG, etc. */
8447 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8449 else if (COP_REG_CLASS_P (from))
8452 } /* COP_REG_CLASS_P (from) */
8459 /* Return the length of INSN. LENGTH is the initial length computed by
8460 attributes in the machine-description file. */
8463 mips_adjust_insn_length (rtx insn, int length)
8465 /* A unconditional jump has an unfilled delay slot if it is not part
8466 of a sequence. A conditional jump normally has a delay slot, but
8467 does not on MIPS16. */
8468 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8471 /* See how many nops might be needed to avoid hardware hazards. */
8472 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8473 switch (get_attr_hazard (insn))
8487 /* All MIPS16 instructions are a measly two bytes. */
8495 /* Return an asm sequence to start a noat block and load the address
8496 of a label into $1. */
8499 mips_output_load_label (void)
8501 if (TARGET_EXPLICIT_RELOCS)
8505 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8508 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8511 if (ISA_HAS_LOAD_DELAY)
8512 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8513 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8517 if (Pmode == DImode)
8518 return "%[dla\t%@,%0";
8520 return "%[la\t%@,%0";
8525 /* Output assembly instructions to peform a conditional branch.
8527 INSN is the branch instruction. OPERANDS[0] is the condition.
8528 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8529 of the first operand to the condition. If TWO_OPERANDS_P is
8530 nonzero the comparison takes two operands; OPERANDS[3] will be the
8533 If INVERTED_P is nonzero we are to branch if the condition does
8534 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8536 LENGTH is the length (in bytes) of the sequence we are to generate.
8537 That tells us whether to generate a simple conditional branch, or a
8538 reversed conditional branch around a `jr' instruction. */
8540 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8541 int float_p, int inverted_p, int length)
8543 static char buffer[200];
8544 /* The kind of comparison we are doing. */
8545 enum rtx_code code = GET_CODE (operands[0]);
8546 /* Nonzero if the opcode for the comparison needs a `z' indicating
8547 that it is a comparison against zero. */
8549 /* A string to use in the assembly output to represent the first
8551 const char *op1 = "%z2";
8552 /* A string to use in the assembly output to represent the second
8553 operand. Use the hard-wired zero register if there's no second
8555 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8556 /* The operand-printing string for the comparison. */
8557 const char *const comp = (float_p ? "%F0" : "%C0");
8558 /* The operand-printing string for the inverted comparison. */
8559 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8561 /* The MIPS processors (for levels of the ISA at least two), have
8562 "likely" variants of each branch instruction. These instructions
8563 annul the instruction in the delay slot if the branch is not
8565 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8567 if (!two_operands_p)
8569 /* To compute whether than A > B, for example, we normally
8570 subtract B from A and then look at the sign bit. But, if we
8571 are doing an unsigned comparison, and B is zero, we don't
8572 have to do the subtraction. Instead, we can just check to
8573 see if A is nonzero. Thus, we change the CODE here to
8574 reflect the simpler comparison operation. */
8586 /* A condition which will always be true. */
8592 /* A condition which will always be false. */
8598 /* Not a special case. */
8603 /* Relative comparisons are always done against zero. But
8604 equality comparisons are done between two operands, and therefore
8605 do not require a `z' in the assembly language output. */
8606 need_z_p = (!float_p && code != EQ && code != NE);
8607 /* For comparisons against zero, the zero is not provided
8612 /* Begin by terminating the buffer. That way we can always use
8613 strcat to add to it. */
8620 /* Just a simple conditional branch. */
8622 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8623 inverted_p ? inverted_comp : comp);
8625 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8626 inverted_p ? inverted_comp : comp,
8627 need_z_p ? "z" : "",
8637 /* Generate a reversed conditional branch around ` j'
8650 If the original branch was a likely branch, the delay slot
8651 must be executed only if the branch is taken, so generate:
8663 When generating PIC, instead of:
8676 rtx target = gen_label_rtx ();
8678 orig_target = operands[1];
8679 operands[1] = target;
8680 /* Generate the reversed comparison. This takes four
8683 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8684 inverted_p ? comp : inverted_comp);
8686 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8687 inverted_p ? comp : inverted_comp,
8688 need_z_p ? "z" : "",
8691 output_asm_insn (buffer, operands);
8693 if (length != 16 && length != 28 && ! mips_branch_likely)
8695 /* Output delay slot instruction. */
8696 rtx insn = final_sequence;
8697 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8698 optimize, 0, 1, NULL);
8699 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8702 output_asm_insn ("%#", 0);
8705 output_asm_insn ("j\t%0", &orig_target);
8708 output_asm_insn (mips_output_load_label (), &orig_target);
8709 output_asm_insn ("jr\t%@%]", 0);
8712 if (length != 16 && length != 28 && mips_branch_likely)
8714 /* Output delay slot instruction. */
8715 rtx insn = final_sequence;
8716 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8717 optimize, 0, 1, NULL);
8718 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8721 output_asm_insn ("%#", 0);
8723 (*targetm.asm_out.internal_label) (asm_out_file, "L",
8724 CODE_LABEL_NUMBER (target));
8737 /* Used to output div or ddiv instruction DIVISION, which has the operands
8738 given by OPERANDS. Add in a divide-by-zero check if needed.
8740 When working around R4000 and R4400 errata, we need to make sure that
8741 the division is not immediately followed by a shift[1][2]. We also
8742 need to stop the division from being put into a branch delay slot[3].
8743 The easiest way to avoid both problems is to add a nop after the
8744 division. When a divide-by-zero check is needed, this nop can be
8745 used to fill the branch delay slot.
8747 [1] If a double-word or a variable shift executes immediately
8748 after starting an integer division, the shift may give an
8749 incorrect result. See quotations of errata #16 and #28 from
8750 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8751 in mips.md for details.
8753 [2] A similar bug to [1] exists for all revisions of the
8754 R4000 and the R4400 when run in an MC configuration.
8755 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
8757 "19. In this following sequence:
8759 ddiv (or ddivu or div or divu)
8760 dsll32 (or dsrl32, dsra32)
8762 if an MPT stall occurs, while the divide is slipping the cpu
8763 pipeline, then the following double shift would end up with an
8766 Workaround: The compiler needs to avoid generating any
8767 sequence with divide followed by extended double shift."
8769 This erratum is also present in "MIPS R4400MC Errata, Processor
8770 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
8771 & 3.0" as errata #10 and #4, respectively.
8773 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8774 (also valid for MIPS R4000MC processors):
8776 "52. R4000SC: This bug does not apply for the R4000PC.
8778 There are two flavors of this bug:
8780 1) If the instruction just after divide takes an RF exception
8781 (tlb-refill, tlb-invalid) and gets an instruction cache
8782 miss (both primary and secondary) and the line which is
8783 currently in secondary cache at this index had the first
8784 data word, where the bits 5..2 are set, then R4000 would
8785 get a wrong result for the div.
8790 ------------------- # end-of page. -tlb-refill
8795 ------------------- # end-of page. -tlb-invalid
8798 2) If the divide is in the taken branch delay slot, where the
8799 target takes RF exception and gets an I-cache miss for the
8800 exception vector or where I-cache miss occurs for the
8801 target address, under the above mentioned scenarios, the
8802 div would get wrong results.
8805 j r2 # to next page mapped or unmapped
8806 div r8,r9 # this bug would be there as long
8807 # as there is an ICache miss and
8808 nop # the "data pattern" is present
8811 beq r0, r0, NextPage # to Next page
8815 This bug is present for div, divu, ddiv, and ddivu
8818 Workaround: For item 1), OS could make sure that the next page
8819 after the divide instruction is also mapped. For item 2), the
8820 compiler could make sure that the divide instruction is not in
8821 the branch delay slot."
8823 These processors have PRId values of 0x00004220 and 0x00004300 for
8824 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
8827 mips_output_division (const char *division, rtx *operands)
8832 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
8834 output_asm_insn (s, operands);
8837 if (TARGET_CHECK_ZERO_DIV)
8841 output_asm_insn (s, operands);
8842 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
8844 else if (GENERATE_DIVIDE_TRAPS)
8846 output_asm_insn (s, operands);
8851 output_asm_insn ("%(bne\t%2,%.,1f", operands);
8852 output_asm_insn (s, operands);
8853 s = "break\t7%)\n1:";
8859 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
8860 with a final "000" replaced by "k". Ignore case.
8862 Note: this function is shared between GCC and GAS. */
8865 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
8867 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
8868 given++, canonical++;
8870 return ((*given == 0 && *canonical == 0)
8871 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
8875 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
8876 CPU name. We've traditionally allowed a lot of variation here.
8878 Note: this function is shared between GCC and GAS. */
8881 mips_matching_cpu_name_p (const char *canonical, const char *given)
8883 /* First see if the name matches exactly, or with a final "000"
8885 if (mips_strict_matching_cpu_name_p (canonical, given))
8888 /* If not, try comparing based on numerical designation alone.
8889 See if GIVEN is an unadorned number, or 'r' followed by a number. */
8890 if (TOLOWER (*given) == 'r')
8892 if (!ISDIGIT (*given))
8895 /* Skip over some well-known prefixes in the canonical name,
8896 hoping to find a number there too. */
8897 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
8899 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
8901 else if (TOLOWER (canonical[0]) == 'r')
8904 return mips_strict_matching_cpu_name_p (canonical, given);
8908 /* Parse an option that takes the name of a processor as its argument.
8909 OPTION is the name of the option and CPU_STRING is the argument.
8910 Return the corresponding processor enumeration if the CPU_STRING is
8911 recognized, otherwise report an error and return null.
8913 A similar function exists in GAS. */
8915 static const struct mips_cpu_info *
8916 mips_parse_cpu (const char *option, const char *cpu_string)
8918 const struct mips_cpu_info *p;
8921 /* In the past, we allowed upper-case CPU names, but it doesn't
8922 work well with the multilib machinery. */
8923 for (s = cpu_string; *s != 0; s++)
8926 warning ("the cpu name must be lower case");
8930 /* 'from-abi' selects the most compatible architecture for the given
8931 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
8932 EABIs, we have to decide whether we're using the 32-bit or 64-bit
8933 version. Look first at the -mgp options, if given, otherwise base
8934 the choice on MASK_64BIT in TARGET_DEFAULT. */
8935 if (strcasecmp (cpu_string, "from-abi") == 0)
8936 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
8937 : ABI_NEEDS_64BIT_REGS ? 3
8938 : (TARGET_64BIT ? 3 : 1));
8940 /* 'default' has traditionally been a no-op. Probably not very useful. */
8941 if (strcasecmp (cpu_string, "default") == 0)
8944 for (p = mips_cpu_info_table; p->name != 0; p++)
8945 if (mips_matching_cpu_name_p (p->name, cpu_string))
8948 error ("bad value (%s) for %s", cpu_string, option);
8953 /* Return the processor associated with the given ISA level, or null
8954 if the ISA isn't valid. */
8956 static const struct mips_cpu_info *
8957 mips_cpu_info_from_isa (int isa)
8959 const struct mips_cpu_info *p;
8961 for (p = mips_cpu_info_table; p->name != 0; p++)
8968 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
8969 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
8970 they only hold condition code modes, and CCmode is always considered to
8971 be 4 bytes wide. All other registers are word sized. */
8974 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8976 if (ST_REG_P (regno))
8977 return ((GET_MODE_SIZE (mode) + 3) / 4);
8978 else if (! FP_REG_P (regno))
8979 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
8981 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
8984 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
8985 all BLKmode objects are returned in memory. Under the new (N32 and
8986 64-bit MIPS ABIs) small structures are returned in a register.
8987 Objects with varying size must still be returned in memory, of
8991 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
8994 return (TYPE_MODE (type) == BLKmode);
8996 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
8997 || (int_size_in_bytes (type) == -1));
9001 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9003 return !TARGET_OLDABI;
9006 /* Return true if INSN is a multiply-add or multiply-subtract
9007 instruction and PREV assigns to the accumulator operand. */
9010 mips_linked_madd_p (rtx prev, rtx insn)
9014 x = single_set (insn);
9020 if (GET_CODE (x) == PLUS
9021 && GET_CODE (XEXP (x, 0)) == MULT
9022 && reg_set_p (XEXP (x, 1), prev))
9025 if (GET_CODE (x) == MINUS
9026 && GET_CODE (XEXP (x, 1)) == MULT
9027 && reg_set_p (XEXP (x, 0), prev))
9033 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9034 that may clobber hi or lo. */
9036 static rtx mips_macc_chains_last_hilo;
9038 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9039 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9042 mips_macc_chains_record (rtx insn)
9044 if (get_attr_may_clobber_hilo (insn))
9045 mips_macc_chains_last_hilo = insn;
9048 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9049 has NREADY elements, looking for a multiply-add or multiply-subtract
9050 instruction that is cumulative with mips_macc_chains_last_hilo.
9051 If there is one, promote it ahead of anything else that might
9052 clobber hi or lo. */
9055 mips_macc_chains_reorder (rtx *ready, int nready)
9059 if (mips_macc_chains_last_hilo != 0)
9060 for (i = nready - 1; i >= 0; i--)
9061 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9063 for (j = nready - 1; j > i; j--)
9064 if (recog_memoized (ready[j]) >= 0
9065 && get_attr_may_clobber_hilo (ready[j]))
9067 mips_promote_ready (ready, i, j);
9074 /* The last instruction to be scheduled. */
9076 static rtx vr4130_last_insn;
9078 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9079 points to an rtx that is initially an instruction. Nullify the rtx
9080 if the instruction uses the value of register X. */
9083 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9085 rtx *insn_ptr = data;
9088 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9092 /* Return true if there is true register dependence between vr4130_last_insn
9096 vr4130_true_reg_dependence_p (rtx insn)
9098 note_stores (PATTERN (vr4130_last_insn),
9099 vr4130_true_reg_dependence_p_1, &insn);
9103 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9104 the ready queue and that INSN2 is the instruction after it, return
9105 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9106 in which INSN1 and INSN2 can probably issue in parallel, but for
9107 which (INSN2, INSN1) should be less sensitive to instruction
9108 alignment than (INSN1, INSN2). See 4130.md for more details. */
9111 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9115 /* Check for the following case:
9117 1) there is some other instruction X with an anti dependence on INSN1;
9118 2) X has a higher priority than INSN2; and
9119 3) X is an arithmetic instruction (and thus has no unit restrictions).
9121 If INSN1 is the last instruction blocking X, it would better to
9122 choose (INSN1, X) over (INSN2, INSN1). */
9123 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9124 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9125 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9126 && recog_memoized (XEXP (dep, 0)) >= 0
9127 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9130 if (vr4130_last_insn != 0
9131 && recog_memoized (insn1) >= 0
9132 && recog_memoized (insn2) >= 0)
9134 /* See whether INSN1 and INSN2 use different execution units,
9135 or if they are both ALU-type instructions. If so, they can
9136 probably execute in parallel. */
9137 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9138 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9139 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9141 /* If only one of the instructions has a dependence on
9142 vr4130_last_insn, prefer to schedule the other one first. */
9143 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9144 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9148 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9149 is not an ALU-type instruction and if INSN1 uses the same
9150 execution unit. (Note that if this condition holds, we already
9151 know that INSN2 uses a different execution unit.) */
9152 if (class1 != VR4130_CLASS_ALU
9153 && recog_memoized (vr4130_last_insn) >= 0
9154 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9161 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9162 queue with at least two instructions. Swap the first two if
9163 vr4130_swap_insns_p says that it could be worthwhile. */
9166 vr4130_reorder (rtx *ready, int nready)
9168 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9169 mips_promote_ready (ready, nready - 2, nready - 1);
9172 /* Remove the instruction at index LOWER from ready queue READY and
9173 reinsert it in front of the instruction at index HIGHER. LOWER must
9177 mips_promote_ready (rtx *ready, int lower, int higher)
9182 new_head = ready[lower];
9183 for (i = lower; i < higher; i++)
9184 ready[i] = ready[i + 1];
9185 ready[i] = new_head;
9188 /* Implement TARGET_SCHED_REORDER. */
9191 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9192 rtx *ready, int *nreadyp, int cycle)
9194 if (!reload_completed && TUNE_MACC_CHAINS)
9197 mips_macc_chains_last_hilo = 0;
9199 mips_macc_chains_reorder (ready, *nreadyp);
9201 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9204 vr4130_last_insn = 0;
9206 vr4130_reorder (ready, *nreadyp);
9208 return mips_issue_rate ();
9211 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9214 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9217 switch (GET_CODE (PATTERN (insn)))
9221 /* Don't count USEs and CLOBBERs against the issue rate. */
9226 if (!reload_completed && TUNE_MACC_CHAINS)
9227 mips_macc_chains_record (insn);
9228 vr4130_last_insn = insn;
9234 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9235 dependencies have no cost. */
9238 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9239 rtx dep ATTRIBUTE_UNUSED, int cost)
9241 if (REG_NOTE_KIND (link) != 0)
9246 /* Return the number of instructions that can be issued per cycle. */
9249 mips_issue_rate (void)
9253 case PROCESSOR_R4130:
9254 case PROCESSOR_R5400:
9255 case PROCESSOR_R5500:
9256 case PROCESSOR_R7000:
9257 case PROCESSOR_R9000:
9261 /* This is actually 4, but we get better performance if we claim 3.
9262 This is partly because of unwanted speculative code motion with the
9263 larger number, and partly because in most common cases we can't
9264 reach the theoretical max of 4. */
9272 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9273 be as wide as the scheduling freedom in the DFA. */
9276 mips_multipass_dfa_lookahead (void)
9278 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9279 if (mips_tune == PROCESSOR_SB1)
9285 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9286 return the first operand of the associated "pref" or "prefx" insn. */
9289 mips_prefetch_cookie (rtx write, rtx locality)
9291 /* store_streamed / load_streamed. */
9292 if (INTVAL (locality) <= 0)
9293 return GEN_INT (INTVAL (write) + 4);
9296 if (INTVAL (locality) <= 2)
9299 /* store_retained / load_retained. */
9300 return GEN_INT (INTVAL (write) + 6);
9303 /* MIPS builtin function support. */
9305 struct builtin_description
9307 /* The code of the main .md file instruction. See mips_builtin_type
9308 for more information. */
9309 enum insn_code icode;
9311 /* The floating-point comparison code to use with ICODE, if any. */
9312 enum mips_fp_condition cond;
9314 /* The name of the builtin function. */
9317 /* Specifies how the function should be expanded. */
9318 enum mips_builtin_type builtin_type;
9320 /* The function's prototype. */
9321 enum mips_function_type function_type;
9323 /* The target flags required for this function. */
9327 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9328 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9329 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9330 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9331 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9333 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9335 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9336 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9337 "__builtin_mips_" #INSN "_" #COND "_s", \
9338 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9339 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9340 "__builtin_mips_" #INSN "_" #COND "_d", \
9341 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9343 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9344 The lower and upper forms require TARGET_FLAGS while the any and all
9345 forms require MASK_MIPS3D. */
9346 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9347 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9348 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9349 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9350 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9351 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9352 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9353 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9354 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9355 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9356 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9357 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9358 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9360 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9361 require MASK_MIPS3D. */
9362 #define CMP_4S_BUILTINS(INSN, COND) \
9363 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9364 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9365 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9367 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9368 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9369 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9372 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9373 instruction requires TARGET_FLAGS. */
9374 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9375 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9376 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9377 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9379 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9380 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9381 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9384 /* Define all the builtins related to c.cond.fmt condition COND. */
9385 #define CMP_BUILTINS(COND) \
9386 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9387 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9388 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9389 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9390 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9391 CMP_4S_BUILTINS (c, COND), \
9392 CMP_4S_BUILTINS (cabs, COND)
9394 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9395 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9397 static const struct builtin_description mips_bdesc[] =
9399 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9400 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9401 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9402 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9403 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE),
9404 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9405 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9406 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9408 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, MASK_PAIRED_SINGLE),
9409 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9410 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9411 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9412 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9414 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9415 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9416 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9417 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9418 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9419 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9421 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9422 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9423 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9424 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9425 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9426 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9428 MIPS_FP_CONDITIONS (CMP_BUILTINS)
9431 /* Builtin functions for the SB-1 processor. */
9433 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
9435 static const struct builtin_description sb1_bdesc[] =
9437 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE)
9440 /* This helps provide a mapping from builtin function codes to bdesc
9445 /* The builtin function table that this entry describes. */
9446 const struct builtin_description *bdesc;
9448 /* The number of entries in the builtin function table. */
9451 /* The target processor that supports these builtin functions.
9452 PROCESSOR_DEFAULT means we enable them for all processors. */
9453 enum processor_type proc;
9456 static const struct bdesc_map bdesc_arrays[] =
9458 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_DEFAULT },
9459 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 }
9462 /* Take the head of argument list *ARGLIST and convert it into a form
9463 suitable for input operand OP of instruction ICODE. Return the value
9464 and point *ARGLIST at the next element of the list. */
9467 mips_prepare_builtin_arg (enum insn_code icode,
9468 unsigned int op, tree *arglist)
9471 enum machine_mode mode;
9473 value = expand_expr (TREE_VALUE (*arglist), NULL_RTX, VOIDmode, 0);
9474 mode = insn_data[icode].operand[op].mode;
9475 if (!insn_data[icode].operand[op].predicate (value, mode))
9476 value = copy_to_mode_reg (mode, value);
9478 *arglist = TREE_CHAIN (*arglist);
9482 /* Return an rtx suitable for output operand OP of instruction ICODE.
9483 If TARGET is non-null, try to use it where possible. */
9486 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
9488 enum machine_mode mode;
9490 mode = insn_data[icode].operand[op].mode;
9491 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
9492 target = gen_reg_rtx (mode);
9497 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
9500 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9501 enum machine_mode mode ATTRIBUTE_UNUSED,
9502 int ignore ATTRIBUTE_UNUSED)
9504 enum insn_code icode;
9505 enum mips_builtin_type type;
9506 tree fndecl, arglist;
9508 const struct builtin_description *bdesc;
9509 const struct bdesc_map *m;
9511 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9512 arglist = TREE_OPERAND (exp, 1);
9513 fcode = DECL_FUNCTION_CODE (fndecl);
9516 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9518 if (fcode < m->size)
9521 icode = bdesc[fcode].icode;
9522 type = bdesc[fcode].builtin_type;
9532 case MIPS_BUILTIN_DIRECT:
9533 return mips_expand_builtin_direct (icode, target, arglist);
9535 case MIPS_BUILTIN_MOVT:
9536 case MIPS_BUILTIN_MOVF:
9537 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
9540 case MIPS_BUILTIN_CMP_ANY:
9541 case MIPS_BUILTIN_CMP_ALL:
9542 case MIPS_BUILTIN_CMP_UPPER:
9543 case MIPS_BUILTIN_CMP_LOWER:
9544 case MIPS_BUILTIN_CMP_SINGLE:
9545 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
9553 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
9556 mips_init_builtins (void)
9558 const struct builtin_description *d;
9559 const struct bdesc_map *m;
9560 tree types[(int) MIPS_MAX_FTYPE_MAX];
9561 tree V2SF_type_node;
9562 unsigned int offset;
9564 /* We have only builtins for -mpaired-single and -mips3d. */
9565 if (!TARGET_PAIRED_SINGLE_FLOAT)
9568 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
9570 types[MIPS_V2SF_FTYPE_V2SF]
9571 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
9573 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
9574 = build_function_type_list (V2SF_type_node,
9575 V2SF_type_node, V2SF_type_node, NULL_TREE);
9577 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
9578 = build_function_type_list (V2SF_type_node,
9579 V2SF_type_node, V2SF_type_node,
9580 integer_type_node, NULL_TREE);
9582 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
9583 = build_function_type_list (V2SF_type_node,
9584 V2SF_type_node, V2SF_type_node,
9585 V2SF_type_node, V2SF_type_node, NULL_TREE);
9587 types[MIPS_V2SF_FTYPE_SF_SF]
9588 = build_function_type_list (V2SF_type_node,
9589 float_type_node, float_type_node, NULL_TREE);
9591 types[MIPS_INT_FTYPE_V2SF_V2SF]
9592 = build_function_type_list (integer_type_node,
9593 V2SF_type_node, V2SF_type_node, NULL_TREE);
9595 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
9596 = build_function_type_list (integer_type_node,
9597 V2SF_type_node, V2SF_type_node,
9598 V2SF_type_node, V2SF_type_node, NULL_TREE);
9600 types[MIPS_INT_FTYPE_SF_SF]
9601 = build_function_type_list (integer_type_node,
9602 float_type_node, float_type_node, NULL_TREE);
9604 types[MIPS_INT_FTYPE_DF_DF]
9605 = build_function_type_list (integer_type_node,
9606 double_type_node, double_type_node, NULL_TREE);
9608 types[MIPS_SF_FTYPE_V2SF]
9609 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
9611 types[MIPS_SF_FTYPE_SF]
9612 = build_function_type_list (float_type_node,
9613 float_type_node, NULL_TREE);
9615 types[MIPS_SF_FTYPE_SF_SF]
9616 = build_function_type_list (float_type_node,
9617 float_type_node, float_type_node, NULL_TREE);
9619 types[MIPS_DF_FTYPE_DF]
9620 = build_function_type_list (double_type_node,
9621 double_type_node, NULL_TREE);
9623 types[MIPS_DF_FTYPE_DF_DF]
9624 = build_function_type_list (double_type_node,
9625 double_type_node, double_type_node, NULL_TREE);
9627 /* Iterate through all of the bdesc arrays, initializing all of the
9628 builtin functions. */
9631 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9633 if (m->proc == PROCESSOR_DEFAULT || (m->proc == mips_arch))
9634 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
9635 if ((d->target_flags & target_flags) == d->target_flags)
9636 lang_hooks.builtin_function (d->name, types[d->function_type],
9637 d - m->bdesc + offset,
9638 BUILT_IN_MD, NULL, NULL);
9643 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
9644 .md pattern and ARGLIST is the list of function arguments. TARGET,
9645 if nonnull, suggests a good place to put the result. */
9648 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist)
9650 rtx ops[MAX_RECOG_OPERANDS];
9653 target = mips_prepare_builtin_target (icode, 0, target);
9654 for (i = 1; i < insn_data[icode].n_operands; i++)
9655 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9657 switch (insn_data[icode].n_operands)
9660 emit_insn (GEN_FCN (icode) (target, ops[1]));
9664 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2]));
9668 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2], ops[3]));
9677 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
9678 function (TYPE says which). ARGLIST is the list of arguments to the
9679 function, ICODE is the instruction that should be used to compare
9680 the first two arguments, and COND is the condition it should test.
9681 TARGET, if nonnull, suggests a good place to put the result. */
9684 mips_expand_builtin_movtf (enum mips_builtin_type type,
9685 enum insn_code icode, enum mips_fp_condition cond,
9686 rtx target, tree arglist)
9688 rtx cmp_result, op0, op1;
9690 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9691 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9692 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9693 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
9695 icode = CODE_FOR_mips_cond_move_tf_ps;
9696 target = mips_prepare_builtin_target (icode, 0, target);
9697 if (type == MIPS_BUILTIN_MOVT)
9699 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9700 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9704 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9705 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9707 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
9711 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
9712 of the comparison instruction and COND is the condition it should test.
9713 ARGLIST is the list of function arguments and TARGET, if nonnull,
9714 suggests a good place to put the boolean result. */
9717 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
9718 enum insn_code icode, enum mips_fp_condition cond,
9719 rtx target, tree arglist)
9721 rtx label1, label2, if_then_else;
9722 rtx pat, cmp_result, ops[MAX_RECOG_OPERANDS];
9723 rtx target_if_equal, target_if_unequal;
9726 if (target == 0 || GET_MODE (target) != SImode)
9727 target = gen_reg_rtx (SImode);
9729 /* Prepare the operands to the comparison. */
9730 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9731 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
9732 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9734 switch (insn_data[icode].n_operands)
9737 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond));
9741 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2],
9742 ops[3], ops[4], GEN_INT (cond));
9749 /* If the comparison sets more than one register, we define the result
9750 to be 0 if all registers are false and -1 if all registers are true.
9751 The value of the complete result is indeterminate otherwise. It is
9752 possible to test individual registers using SUBREGs.
9754 Set up CMP_RESULT, CMP_VALUE, TARGET_IF_EQUAL and TARGET_IF_UNEQUAL so
9755 that the result should be TARGET_IF_EQUAL if (EQ CMP_RESULT CMP_VALUE)
9756 and TARGET_IF_UNEQUAL otherwise. */
9757 if (builtin_type == MIPS_BUILTIN_CMP_ALL)
9760 target_if_equal = const1_rtx;
9761 target_if_unequal = const0_rtx;
9766 target_if_equal = const0_rtx;
9767 target_if_unequal = const1_rtx;
9768 if (builtin_type == MIPS_BUILTIN_CMP_UPPER)
9769 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 4);
9770 else if (builtin_type == MIPS_BUILTIN_CMP_LOWER)
9771 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 0);
9774 /* First assume that CMP_RESULT == CMP_VALUE. */
9775 emit_move_insn (target, target_if_equal);
9777 /* Branch to LABEL1 if CMP_RESULT != CMP_VALUE. */
9779 label1 = gen_label_rtx ();
9780 label2 = gen_label_rtx ();
9782 = gen_rtx_IF_THEN_ELSE (VOIDmode,
9783 gen_rtx_fmt_ee (NE, GET_MODE (cmp_result),
9784 cmp_result, GEN_INT (cmp_value)),
9785 gen_rtx_LABEL_REF (VOIDmode, label1), pc_rtx);
9786 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
9787 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
9788 gen_rtx_LABEL_REF (VOIDmode, label2)));
9790 emit_label (label1);
9792 /* Fix TARGET for CMP_RESULT != CMP_VALUE. */
9793 emit_move_insn (target, target_if_unequal);
9794 emit_label (label2);
9799 #include "gt-mips.h"