1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING. If not, write to
24 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 Boston, MA 02110-1301, USA. */
29 #include "coretypes.h"
34 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-attr.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "tree-gimple.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static enum mips_symbol_type mips_classify_symbol (rtx);
292 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
293 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
294 static bool mips_classify_address (struct mips_address_info *, rtx,
295 enum machine_mode, int);
296 static bool mips_cannot_force_const_mem (rtx);
297 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
298 static int mips_symbol_insns (enum mips_symbol_type);
299 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
300 static rtx mips_force_temporary (rtx, rtx);
301 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
302 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
303 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
304 static unsigned int mips_build_lower (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static unsigned int mips_build_integer (struct mips_integer_op *,
307 unsigned HOST_WIDE_INT);
308 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
309 static int m16_check_op (rtx, int, int, int);
310 static bool mips_rtx_costs (rtx, int, int, int *);
311 static int mips_address_cost (rtx);
312 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
313 static void mips_load_call_address (rtx, rtx, int);
314 static bool mips_function_ok_for_sibcall (tree, tree);
315 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
316 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
317 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
318 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
319 tree, int, struct mips_arg_info *);
320 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
321 static void mips_set_architecture (const struct mips_cpu_info *);
322 static void mips_set_tune (const struct mips_cpu_info *);
323 static bool mips_handle_option (size_t, const char *, int);
324 static struct machine_function *mips_init_machine_status (void);
325 static void print_operand_reloc (FILE *, rtx, const char **);
326 static void mips_file_start (void);
327 static bool mips_rewrite_small_data_p (rtx);
328 static int mips_small_data_pattern_1 (rtx *, void *);
329 static int mips_rewrite_small_data_1 (rtx *, void *);
330 static bool mips_function_has_gp_insn (void);
331 static unsigned int mips_global_pointer (void);
332 static bool mips_save_reg_p (unsigned int);
333 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
334 mips_save_restore_fn);
335 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
336 static void mips_output_cplocal (void);
337 static void mips_emit_loadgp (void);
338 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
339 static void mips_set_frame_expr (rtx);
340 static rtx mips_frame_set (rtx, rtx);
341 static void mips_save_reg (rtx, rtx);
342 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
343 static void mips_restore_reg (rtx, rtx);
344 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
345 HOST_WIDE_INT, tree);
346 static int symbolic_expression_p (rtx);
347 static section *mips_select_rtx_section (enum machine_mode, rtx,
348 unsigned HOST_WIDE_INT);
349 static section *mips_function_rodata_section (tree);
350 static bool mips_in_small_data_p (tree);
351 static bool mips_use_anchors_for_symbol_p (rtx);
352 static int mips_fpr_return_fields (tree, tree *);
353 static bool mips_return_in_msb (tree);
354 static rtx mips_return_fpr_pair (enum machine_mode mode,
355 enum machine_mode mode1, HOST_WIDE_INT,
356 enum machine_mode mode2, HOST_WIDE_INT);
357 static rtx mips16_gp_pseudo_reg (void);
358 static void mips16_fp_args (FILE *, int, int);
359 static void build_mips16_function_stub (FILE *);
360 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
361 static void dump_constants (struct mips16_constant *, rtx);
362 static int mips16_insn_length (rtx);
363 static int mips16_rewrite_pool_refs (rtx *, void *);
364 static void mips16_lay_out_constants (void);
365 static void mips_sim_reset (struct mips_sim *);
366 static void mips_sim_init (struct mips_sim *, state_t);
367 static void mips_sim_next_cycle (struct mips_sim *);
368 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
369 static int mips_sim_wait_regs_2 (rtx *, void *);
370 static void mips_sim_wait_regs_1 (rtx *, void *);
371 static void mips_sim_wait_regs (struct mips_sim *, rtx);
372 static void mips_sim_wait_units (struct mips_sim *, rtx);
373 static void mips_sim_wait_insn (struct mips_sim *, rtx);
374 static void mips_sim_record_set (rtx, rtx, void *);
375 static void mips_sim_issue_insn (struct mips_sim *, rtx);
376 static void mips_sim_issue_nop (struct mips_sim *);
377 static void mips_sim_finish_insn (struct mips_sim *, rtx);
378 static void vr4130_avoid_branch_rt_conflict (rtx);
379 static void vr4130_align_insns (void);
380 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
381 static void mips_avoid_hazards (void);
382 static void mips_reorg (void);
383 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
384 static bool mips_matching_cpu_name_p (const char *, const char *);
385 static const struct mips_cpu_info *mips_parse_cpu (const char *);
386 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
387 static bool mips_return_in_memory (tree, tree);
388 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
389 static void mips_macc_chains_record (rtx);
390 static void mips_macc_chains_reorder (rtx *, int);
391 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
392 static bool vr4130_true_reg_dependence_p (rtx);
393 static bool vr4130_swap_insns_p (rtx, rtx);
394 static void vr4130_reorder (rtx *, int);
395 static void mips_promote_ready (rtx *, int, int);
396 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
397 static int mips_variable_issue (FILE *, int, rtx, int);
398 static int mips_adjust_cost (rtx, rtx, rtx, int);
399 static int mips_issue_rate (void);
400 static int mips_multipass_dfa_lookahead (void);
401 static void mips_init_libfuncs (void);
402 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
404 static tree mips_build_builtin_va_list (void);
405 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
406 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
412 static bool mips_valid_pointer_mode (enum machine_mode);
413 static bool mips_vector_mode_supported_p (enum machine_mode);
414 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
415 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
416 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
417 static void mips_init_builtins (void);
418 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
419 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
420 enum insn_code, enum mips_fp_condition,
422 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
423 enum insn_code, enum mips_fp_condition,
425 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
426 static void mips_encode_section_info (tree, rtx, int);
427 static void mips_extra_live_on_entry (bitmap);
428 static int mips_comp_type_attributes (tree, tree);
429 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
430 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
432 /* Structure to be filled in by compute_frame_size with register
433 save masks, and offsets for the current function. */
435 struct mips_frame_info GTY(())
437 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
438 HOST_WIDE_INT var_size; /* # bytes that variables take up */
439 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
440 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
441 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
442 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
443 unsigned int mask; /* mask of saved gp registers */
444 unsigned int fmask; /* mask of saved fp registers */
445 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
446 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
447 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
448 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
449 bool initialized; /* true if frame size already calculated */
450 int num_gp; /* number of gp registers saved */
451 int num_fp; /* number of fp registers saved */
454 struct machine_function GTY(()) {
455 /* Pseudo-reg holding the value of $28 in a mips16 function which
456 refers to GP relative global variables. */
457 rtx mips16_gp_pseudo_rtx;
459 /* The number of extra stack bytes taken up by register varargs.
460 This area is allocated by the callee at the very top of the frame. */
463 /* Current frame information, calculated by compute_frame_size. */
464 struct mips_frame_info frame;
466 /* The register to use as the global pointer within this function. */
467 unsigned int global_pointer;
469 /* True if mips_adjust_insn_length should ignore an instruction's
471 bool ignore_hazard_length_p;
473 /* True if the whole function is suitable for .set noreorder and
475 bool all_noreorder_p;
477 /* True if the function is known to have an instruction that needs $gp. */
481 /* Information about a single argument. */
484 /* True if the argument is passed in a floating-point register, or
485 would have been if we hadn't run out of registers. */
488 /* The number of words passed in registers, rounded up. */
489 unsigned int reg_words;
491 /* For EABI, the offset of the first register from GP_ARG_FIRST or
492 FP_ARG_FIRST. For other ABIs, the offset of the first register from
493 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
494 comment for details).
496 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
498 unsigned int reg_offset;
500 /* The number of words that must be passed on the stack, rounded up. */
501 unsigned int stack_words;
503 /* The offset from the start of the stack overflow area of the argument's
504 first stack word. Only meaningful when STACK_WORDS is nonzero. */
505 unsigned int stack_offset;
509 /* Information about an address described by mips_address_type.
515 REG is the base register and OFFSET is the constant offset.
518 REG is the register that contains the high part of the address,
519 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
520 is the type of OFFSET's symbol.
523 SYMBOL_TYPE is the type of symbol being referenced. */
525 struct mips_address_info
527 enum mips_address_type type;
530 enum mips_symbol_type symbol_type;
534 /* One stage in a constant building sequence. These sequences have
538 A = A CODE[1] VALUE[1]
539 A = A CODE[2] VALUE[2]
542 where A is an accumulator, each CODE[i] is a binary rtl operation
543 and each VALUE[i] is a constant integer. */
544 struct mips_integer_op {
546 unsigned HOST_WIDE_INT value;
550 /* The largest number of operations needed to load an integer constant.
551 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
552 When the lowest bit is clear, we can try, but reject a sequence with
553 an extra SLL at the end. */
554 #define MIPS_MAX_INTEGER_OPS 7
556 /* Information about a MIPS16e SAVE or RESTORE instruction. */
557 struct mips16e_save_restore_info {
558 /* The number of argument registers saved by a SAVE instruction.
559 0 for RESTORE instructions. */
562 /* Bit X is set if the instruction saves or restores GPR X. */
565 /* The total number of bytes to allocate. */
569 /* Global variables for machine-dependent things. */
571 /* Threshold for data being put into the small data/bss area, instead
572 of the normal data area. */
573 int mips_section_threshold = -1;
575 /* Count the number of .file directives, so that .loc is up to date. */
576 int num_source_filenames = 0;
578 /* Count the number of sdb related labels are generated (to find block
579 start and end boundaries). */
580 int sdb_label_count = 0;
582 /* Next label # for each statement for Silicon Graphics IRIS systems. */
585 /* Name of the file containing the current function. */
586 const char *current_function_file = "";
588 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
594 /* The next branch instruction is a branch likely, not branch normal. */
595 int mips_branch_likely;
597 /* The operands passed to the last cmpMM expander. */
600 /* The target cpu for code generation. */
601 enum processor_type mips_arch;
602 const struct mips_cpu_info *mips_arch_info;
604 /* The target cpu for optimization and scheduling. */
605 enum processor_type mips_tune;
606 const struct mips_cpu_info *mips_tune_info;
608 /* Which instruction set architecture to use. */
611 /* Which ABI to use. */
612 int mips_abi = MIPS_ABI_DEFAULT;
614 /* Cost information to use. */
615 const struct mips_rtx_cost_data *mips_cost;
617 /* Whether we are generating mips16 hard float code. In mips16 mode
618 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
619 -msoft-float was not specified by the user, which means that we
620 should arrange to call mips32 hard floating point code. */
621 int mips16_hard_float;
623 /* The architecture selected by -mipsN. */
624 static const struct mips_cpu_info *mips_isa_info;
626 /* If TRUE, we split addresses into their high and low parts in the RTL. */
627 int mips_split_addresses;
629 /* Mode used for saving/restoring general purpose registers. */
630 static enum machine_mode gpr_mode;
632 /* Array giving truth value on whether or not a given hard register
633 can support a given mode. */
634 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
636 /* List of all MIPS punctuation characters used by print_operand. */
637 char mips_print_operand_punct[256];
639 /* Map GCC register number to debugger register number. */
640 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
642 /* A copy of the original flag_delayed_branch: see override_options. */
643 static int mips_flag_delayed_branch;
645 static GTY (()) int mips_output_filename_first_time = 1;
647 /* mips_split_p[X] is true if symbols of type X can be split by
648 mips_split_symbol(). */
649 bool mips_split_p[NUM_SYMBOL_TYPES];
651 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
652 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
653 if they are matched by a special .md file pattern. */
654 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
656 /* Likewise for HIGHs. */
657 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
659 /* Map hard register number to register class */
660 const enum reg_class mips_regno_to_class[] =
662 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
663 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
664 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
665 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
666 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
667 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
668 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
669 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
670 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
671 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
672 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
673 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
674 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 HI_REG, LO_REG, NO_REGS, ST_REGS,
679 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
680 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
681 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
682 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
683 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
684 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
685 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
686 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
691 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
692 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
693 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
694 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
699 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
700 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
701 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
702 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
707 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
708 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
711 /* Table of machine dependent attributes. */
712 const struct attribute_spec mips_attribute_table[] =
714 { "long_call", 0, 0, false, true, true, NULL },
715 { "far", 0, 0, false, true, true, NULL },
716 { "near", 0, 0, false, true, true, NULL },
717 { NULL, 0, 0, false, false, false, NULL }
720 /* A table describing all the processors gcc knows about. Names are
721 matched in the order listed. The first mention of an ISA level is
722 taken as the canonical name for that ISA.
724 To ease comparison, please keep this table in the same order as
725 gas's mips_cpu_info_table[]. */
726 const struct mips_cpu_info mips_cpu_info_table[] = {
727 /* Entries for generic ISAs */
728 { "mips1", PROCESSOR_R3000, 1 },
729 { "mips2", PROCESSOR_R6000, 2 },
730 { "mips3", PROCESSOR_R4000, 3 },
731 { "mips4", PROCESSOR_R8000, 4 },
732 { "mips32", PROCESSOR_4KC, 32 },
733 { "mips32r2", PROCESSOR_M4K, 33 },
734 { "mips64", PROCESSOR_5KC, 64 },
737 { "r3000", PROCESSOR_R3000, 1 },
738 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
739 { "r3900", PROCESSOR_R3900, 1 },
742 { "r6000", PROCESSOR_R6000, 2 },
745 { "r4000", PROCESSOR_R4000, 3 },
746 { "vr4100", PROCESSOR_R4100, 3 },
747 { "vr4111", PROCESSOR_R4111, 3 },
748 { "vr4120", PROCESSOR_R4120, 3 },
749 { "vr4130", PROCESSOR_R4130, 3 },
750 { "vr4300", PROCESSOR_R4300, 3 },
751 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
752 { "r4600", PROCESSOR_R4600, 3 },
753 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
754 { "r4650", PROCESSOR_R4650, 3 },
757 { "r8000", PROCESSOR_R8000, 4 },
758 { "vr5000", PROCESSOR_R5000, 4 },
759 { "vr5400", PROCESSOR_R5400, 4 },
760 { "vr5500", PROCESSOR_R5500, 4 },
761 { "rm7000", PROCESSOR_R7000, 4 },
762 { "rm9000", PROCESSOR_R9000, 4 },
765 { "4kc", PROCESSOR_4KC, 32 },
766 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
767 { "4kp", PROCESSOR_4KP, 32 },
769 /* MIPS32 Release 2 */
770 { "m4k", PROCESSOR_M4K, 33 },
771 { "4kec", PROCESSOR_4KC, 33 },
772 { "4kem", PROCESSOR_4KC, 33 },
773 { "4kep", PROCESSOR_4KP, 33 },
775 { "24kc", PROCESSOR_24KC, 33 },
776 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
777 { "24kf", PROCESSOR_24KF2_1, 33 },
778 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
779 { "24kfx", PROCESSOR_24KF1_1, 33 },
780 { "24kx", PROCESSOR_24KF1_1, 33 },
782 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
783 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
784 { "24kef", PROCESSOR_24KF2_1, 33 },
785 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
786 { "24kefx", PROCESSOR_24KF1_1, 33 },
787 { "24kex", PROCESSOR_24KF1_1, 33 },
789 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
790 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
791 { "34kf", PROCESSOR_24KF2_1, 33 },
792 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
793 { "34kfx", PROCESSOR_24KF1_1, 33 },
794 { "34kx", PROCESSOR_24KF1_1, 33 },
796 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
797 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
798 { "74kf", PROCESSOR_74KF2_1, 33 },
799 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
800 { "74kfx", PROCESSOR_74KF1_1, 33 },
801 { "74kx", PROCESSOR_74KF1_1, 33 },
802 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
805 { "5kc", PROCESSOR_5KC, 64 },
806 { "5kf", PROCESSOR_5KF, 64 },
807 { "20kc", PROCESSOR_20KC, 64 },
808 { "sb1", PROCESSOR_SB1, 64 },
809 { "sb1a", PROCESSOR_SB1A, 64 },
810 { "sr71000", PROCESSOR_SR71000, 64 },
816 /* Default costs. If these are used for a processor we should look
817 up the actual costs. */
818 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
819 COSTS_N_INSNS (7), /* fp_mult_sf */ \
820 COSTS_N_INSNS (8), /* fp_mult_df */ \
821 COSTS_N_INSNS (23), /* fp_div_sf */ \
822 COSTS_N_INSNS (36), /* fp_div_df */ \
823 COSTS_N_INSNS (10), /* int_mult_si */ \
824 COSTS_N_INSNS (10), /* int_mult_di */ \
825 COSTS_N_INSNS (69), /* int_div_si */ \
826 COSTS_N_INSNS (69), /* int_div_di */ \
827 2, /* branch_cost */ \
828 4 /* memory_latency */
830 /* Need to replace these with the costs of calling the appropriate
832 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
833 COSTS_N_INSNS (256), /* fp_mult_sf */ \
834 COSTS_N_INSNS (256), /* fp_mult_df */ \
835 COSTS_N_INSNS (256), /* fp_div_sf */ \
836 COSTS_N_INSNS (256) /* fp_div_df */
838 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
840 COSTS_N_INSNS (1), /* fp_add */
841 COSTS_N_INSNS (1), /* fp_mult_sf */
842 COSTS_N_INSNS (1), /* fp_mult_df */
843 COSTS_N_INSNS (1), /* fp_div_sf */
844 COSTS_N_INSNS (1), /* fp_div_df */
845 COSTS_N_INSNS (1), /* int_mult_si */
846 COSTS_N_INSNS (1), /* int_mult_di */
847 COSTS_N_INSNS (1), /* int_div_si */
848 COSTS_N_INSNS (1), /* int_div_di */
850 4 /* memory_latency */
853 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
856 COSTS_N_INSNS (2), /* fp_add */
857 COSTS_N_INSNS (4), /* fp_mult_sf */
858 COSTS_N_INSNS (5), /* fp_mult_df */
859 COSTS_N_INSNS (12), /* fp_div_sf */
860 COSTS_N_INSNS (19), /* fp_div_df */
861 COSTS_N_INSNS (12), /* int_mult_si */
862 COSTS_N_INSNS (12), /* int_mult_di */
863 COSTS_N_INSNS (35), /* int_div_si */
864 COSTS_N_INSNS (35), /* int_div_di */
866 4 /* memory_latency */
871 COSTS_N_INSNS (6), /* int_mult_si */
872 COSTS_N_INSNS (6), /* int_mult_di */
873 COSTS_N_INSNS (36), /* int_div_si */
874 COSTS_N_INSNS (36), /* int_div_di */
876 4 /* memory_latency */
880 COSTS_N_INSNS (36), /* int_mult_si */
881 COSTS_N_INSNS (36), /* int_mult_di */
882 COSTS_N_INSNS (37), /* int_div_si */
883 COSTS_N_INSNS (37), /* int_div_di */
885 4 /* memory_latency */
889 COSTS_N_INSNS (4), /* int_mult_si */
890 COSTS_N_INSNS (11), /* int_mult_di */
891 COSTS_N_INSNS (36), /* int_div_si */
892 COSTS_N_INSNS (68), /* int_div_di */
894 4 /* memory_latency */
897 COSTS_N_INSNS (4), /* fp_add */
898 COSTS_N_INSNS (4), /* fp_mult_sf */
899 COSTS_N_INSNS (5), /* fp_mult_df */
900 COSTS_N_INSNS (17), /* fp_div_sf */
901 COSTS_N_INSNS (32), /* fp_div_df */
902 COSTS_N_INSNS (4), /* int_mult_si */
903 COSTS_N_INSNS (11), /* int_mult_di */
904 COSTS_N_INSNS (36), /* int_div_si */
905 COSTS_N_INSNS (68), /* int_div_di */
907 4 /* memory_latency */
914 COSTS_N_INSNS (5), /* int_mult_si */
915 COSTS_N_INSNS (5), /* int_mult_di */
916 COSTS_N_INSNS (41), /* int_div_si */
917 COSTS_N_INSNS (41), /* int_div_di */
919 4 /* memory_latency */
922 COSTS_N_INSNS (8), /* fp_add */
923 COSTS_N_INSNS (8), /* fp_mult_sf */
924 COSTS_N_INSNS (10), /* fp_mult_df */
925 COSTS_N_INSNS (34), /* fp_div_sf */
926 COSTS_N_INSNS (64), /* fp_div_df */
927 COSTS_N_INSNS (5), /* int_mult_si */
928 COSTS_N_INSNS (5), /* int_mult_di */
929 COSTS_N_INSNS (41), /* int_div_si */
930 COSTS_N_INSNS (41), /* int_div_di */
932 4 /* memory_latency */
935 COSTS_N_INSNS (4), /* fp_add */
936 COSTS_N_INSNS (4), /* fp_mult_sf */
937 COSTS_N_INSNS (5), /* fp_mult_df */
938 COSTS_N_INSNS (17), /* fp_div_sf */
939 COSTS_N_INSNS (32), /* fp_div_df */
940 COSTS_N_INSNS (5), /* int_mult_si */
941 COSTS_N_INSNS (5), /* int_mult_di */
942 COSTS_N_INSNS (41), /* int_div_si */
943 COSTS_N_INSNS (41), /* int_div_di */
945 4 /* memory_latency */
949 COSTS_N_INSNS (5), /* int_mult_si */
950 COSTS_N_INSNS (5), /* int_mult_di */
951 COSTS_N_INSNS (41), /* int_div_si */
952 COSTS_N_INSNS (41), /* int_div_di */
954 4 /* memory_latency */
957 COSTS_N_INSNS (8), /* fp_add */
958 COSTS_N_INSNS (8), /* fp_mult_sf */
959 COSTS_N_INSNS (10), /* fp_mult_df */
960 COSTS_N_INSNS (34), /* fp_div_sf */
961 COSTS_N_INSNS (64), /* fp_div_df */
962 COSTS_N_INSNS (5), /* int_mult_si */
963 COSTS_N_INSNS (5), /* int_mult_di */
964 COSTS_N_INSNS (41), /* int_div_si */
965 COSTS_N_INSNS (41), /* int_div_di */
967 4 /* memory_latency */
970 COSTS_N_INSNS (4), /* fp_add */
971 COSTS_N_INSNS (4), /* fp_mult_sf */
972 COSTS_N_INSNS (5), /* fp_mult_df */
973 COSTS_N_INSNS (17), /* fp_div_sf */
974 COSTS_N_INSNS (32), /* fp_div_df */
975 COSTS_N_INSNS (5), /* int_mult_si */
976 COSTS_N_INSNS (5), /* int_mult_di */
977 COSTS_N_INSNS (41), /* int_div_si */
978 COSTS_N_INSNS (41), /* int_div_di */
980 4 /* memory_latency */
983 COSTS_N_INSNS (6), /* fp_add */
984 COSTS_N_INSNS (6), /* fp_mult_sf */
985 COSTS_N_INSNS (7), /* fp_mult_df */
986 COSTS_N_INSNS (25), /* fp_div_sf */
987 COSTS_N_INSNS (48), /* fp_div_df */
988 COSTS_N_INSNS (5), /* int_mult_si */
989 COSTS_N_INSNS (5), /* int_mult_di */
990 COSTS_N_INSNS (41), /* int_div_si */
991 COSTS_N_INSNS (41), /* int_div_di */
993 4 /* memory_latency */
999 COSTS_N_INSNS (2), /* fp_add */
1000 COSTS_N_INSNS (4), /* fp_mult_sf */
1001 COSTS_N_INSNS (5), /* fp_mult_df */
1002 COSTS_N_INSNS (12), /* fp_div_sf */
1003 COSTS_N_INSNS (19), /* fp_div_df */
1004 COSTS_N_INSNS (2), /* int_mult_si */
1005 COSTS_N_INSNS (2), /* int_mult_di */
1006 COSTS_N_INSNS (35), /* int_div_si */
1007 COSTS_N_INSNS (35), /* int_div_di */
1008 1, /* branch_cost */
1009 4 /* memory_latency */
1012 COSTS_N_INSNS (3), /* fp_add */
1013 COSTS_N_INSNS (5), /* fp_mult_sf */
1014 COSTS_N_INSNS (6), /* fp_mult_df */
1015 COSTS_N_INSNS (15), /* fp_div_sf */
1016 COSTS_N_INSNS (16), /* fp_div_df */
1017 COSTS_N_INSNS (17), /* int_mult_si */
1018 COSTS_N_INSNS (17), /* int_mult_di */
1019 COSTS_N_INSNS (38), /* int_div_si */
1020 COSTS_N_INSNS (38), /* int_div_di */
1021 2, /* branch_cost */
1022 6 /* memory_latency */
1025 COSTS_N_INSNS (6), /* fp_add */
1026 COSTS_N_INSNS (7), /* fp_mult_sf */
1027 COSTS_N_INSNS (8), /* fp_mult_df */
1028 COSTS_N_INSNS (23), /* fp_div_sf */
1029 COSTS_N_INSNS (36), /* fp_div_df */
1030 COSTS_N_INSNS (10), /* int_mult_si */
1031 COSTS_N_INSNS (10), /* int_mult_di */
1032 COSTS_N_INSNS (69), /* int_div_si */
1033 COSTS_N_INSNS (69), /* int_div_di */
1034 2, /* branch_cost */
1035 6 /* memory_latency */
1047 /* The only costs that appear to be updated here are
1048 integer multiplication. */
1050 COSTS_N_INSNS (4), /* int_mult_si */
1051 COSTS_N_INSNS (6), /* int_mult_di */
1052 COSTS_N_INSNS (69), /* int_div_si */
1053 COSTS_N_INSNS (69), /* int_div_di */
1054 1, /* branch_cost */
1055 4 /* memory_latency */
1067 COSTS_N_INSNS (6), /* fp_add */
1068 COSTS_N_INSNS (4), /* fp_mult_sf */
1069 COSTS_N_INSNS (5), /* fp_mult_df */
1070 COSTS_N_INSNS (23), /* fp_div_sf */
1071 COSTS_N_INSNS (36), /* fp_div_df */
1072 COSTS_N_INSNS (5), /* int_mult_si */
1073 COSTS_N_INSNS (5), /* int_mult_di */
1074 COSTS_N_INSNS (36), /* int_div_si */
1075 COSTS_N_INSNS (36), /* int_div_di */
1076 1, /* branch_cost */
1077 4 /* memory_latency */
1080 COSTS_N_INSNS (6), /* fp_add */
1081 COSTS_N_INSNS (5), /* fp_mult_sf */
1082 COSTS_N_INSNS (6), /* fp_mult_df */
1083 COSTS_N_INSNS (30), /* fp_div_sf */
1084 COSTS_N_INSNS (59), /* fp_div_df */
1085 COSTS_N_INSNS (3), /* int_mult_si */
1086 COSTS_N_INSNS (4), /* int_mult_di */
1087 COSTS_N_INSNS (42), /* int_div_si */
1088 COSTS_N_INSNS (74), /* int_div_di */
1089 1, /* branch_cost */
1090 4 /* memory_latency */
1093 COSTS_N_INSNS (6), /* fp_add */
1094 COSTS_N_INSNS (5), /* fp_mult_sf */
1095 COSTS_N_INSNS (6), /* fp_mult_df */
1096 COSTS_N_INSNS (30), /* fp_div_sf */
1097 COSTS_N_INSNS (59), /* fp_div_df */
1098 COSTS_N_INSNS (5), /* int_mult_si */
1099 COSTS_N_INSNS (9), /* int_mult_di */
1100 COSTS_N_INSNS (42), /* int_div_si */
1101 COSTS_N_INSNS (74), /* int_div_di */
1102 1, /* branch_cost */
1103 4 /* memory_latency */
1106 /* The only costs that are changed here are
1107 integer multiplication. */
1108 COSTS_N_INSNS (6), /* fp_add */
1109 COSTS_N_INSNS (7), /* fp_mult_sf */
1110 COSTS_N_INSNS (8), /* fp_mult_df */
1111 COSTS_N_INSNS (23), /* fp_div_sf */
1112 COSTS_N_INSNS (36), /* fp_div_df */
1113 COSTS_N_INSNS (5), /* int_mult_si */
1114 COSTS_N_INSNS (9), /* int_mult_di */
1115 COSTS_N_INSNS (69), /* int_div_si */
1116 COSTS_N_INSNS (69), /* int_div_di */
1117 1, /* branch_cost */
1118 4 /* memory_latency */
1124 /* The only costs that are changed here are
1125 integer multiplication. */
1126 COSTS_N_INSNS (6), /* fp_add */
1127 COSTS_N_INSNS (7), /* fp_mult_sf */
1128 COSTS_N_INSNS (8), /* fp_mult_df */
1129 COSTS_N_INSNS (23), /* fp_div_sf */
1130 COSTS_N_INSNS (36), /* fp_div_df */
1131 COSTS_N_INSNS (3), /* int_mult_si */
1132 COSTS_N_INSNS (8), /* int_mult_di */
1133 COSTS_N_INSNS (69), /* int_div_si */
1134 COSTS_N_INSNS (69), /* int_div_di */
1135 1, /* branch_cost */
1136 4 /* memory_latency */
1139 /* These costs are the same as the SB-1A below. */
1140 COSTS_N_INSNS (4), /* fp_add */
1141 COSTS_N_INSNS (4), /* fp_mult_sf */
1142 COSTS_N_INSNS (4), /* fp_mult_df */
1143 COSTS_N_INSNS (24), /* fp_div_sf */
1144 COSTS_N_INSNS (32), /* fp_div_df */
1145 COSTS_N_INSNS (3), /* int_mult_si */
1146 COSTS_N_INSNS (4), /* int_mult_di */
1147 COSTS_N_INSNS (36), /* int_div_si */
1148 COSTS_N_INSNS (68), /* int_div_di */
1149 1, /* branch_cost */
1150 4 /* memory_latency */
1153 /* These costs are the same as the SB-1 above. */
1154 COSTS_N_INSNS (4), /* fp_add */
1155 COSTS_N_INSNS (4), /* fp_mult_sf */
1156 COSTS_N_INSNS (4), /* fp_mult_df */
1157 COSTS_N_INSNS (24), /* fp_div_sf */
1158 COSTS_N_INSNS (32), /* fp_div_df */
1159 COSTS_N_INSNS (3), /* int_mult_si */
1160 COSTS_N_INSNS (4), /* int_mult_di */
1161 COSTS_N_INSNS (36), /* int_div_si */
1162 COSTS_N_INSNS (68), /* int_div_di */
1163 1, /* branch_cost */
1164 4 /* memory_latency */
1171 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1172 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1173 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1174 static const unsigned char mips16e_s2_s8_regs[] = {
1175 30, 23, 22, 21, 20, 19, 18
1177 static const unsigned char mips16e_a0_a3_regs[] = {
1181 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1182 ordered from the uppermost in memory to the lowest in memory. */
1183 static const unsigned char mips16e_save_restore_regs[] = {
1184 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1187 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1188 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1189 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1192 /* Initialize the GCC target structure. */
1193 #undef TARGET_ASM_ALIGNED_HI_OP
1194 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1195 #undef TARGET_ASM_ALIGNED_SI_OP
1196 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1197 #undef TARGET_ASM_ALIGNED_DI_OP
1198 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1200 #undef TARGET_ASM_FUNCTION_PROLOGUE
1201 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1202 #undef TARGET_ASM_FUNCTION_EPILOGUE
1203 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1204 #undef TARGET_ASM_SELECT_RTX_SECTION
1205 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1206 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1207 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1209 #undef TARGET_SCHED_REORDER
1210 #define TARGET_SCHED_REORDER mips_sched_reorder
1211 #undef TARGET_SCHED_VARIABLE_ISSUE
1212 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1213 #undef TARGET_SCHED_ADJUST_COST
1214 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1215 #undef TARGET_SCHED_ISSUE_RATE
1216 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1217 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1218 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1219 mips_multipass_dfa_lookahead
1221 #undef TARGET_DEFAULT_TARGET_FLAGS
1222 #define TARGET_DEFAULT_TARGET_FLAGS \
1224 | TARGET_CPU_DEFAULT \
1225 | TARGET_ENDIAN_DEFAULT \
1226 | TARGET_FP_EXCEPTIONS_DEFAULT \
1227 | MASK_CHECK_ZERO_DIV \
1229 #undef TARGET_HANDLE_OPTION
1230 #define TARGET_HANDLE_OPTION mips_handle_option
1232 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1233 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1235 #undef TARGET_VALID_POINTER_MODE
1236 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1237 #undef TARGET_RTX_COSTS
1238 #define TARGET_RTX_COSTS mips_rtx_costs
1239 #undef TARGET_ADDRESS_COST
1240 #define TARGET_ADDRESS_COST mips_address_cost
1242 #undef TARGET_IN_SMALL_DATA_P
1243 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1245 #undef TARGET_MACHINE_DEPENDENT_REORG
1246 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1248 #undef TARGET_ASM_FILE_START
1249 #define TARGET_ASM_FILE_START mips_file_start
1250 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1251 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1253 #undef TARGET_INIT_LIBFUNCS
1254 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1256 #undef TARGET_BUILD_BUILTIN_VA_LIST
1257 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1258 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1259 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1261 #undef TARGET_PROMOTE_FUNCTION_ARGS
1262 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1263 #undef TARGET_PROMOTE_FUNCTION_RETURN
1264 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1265 #undef TARGET_PROMOTE_PROTOTYPES
1266 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1268 #undef TARGET_RETURN_IN_MEMORY
1269 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1270 #undef TARGET_RETURN_IN_MSB
1271 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1273 #undef TARGET_ASM_OUTPUT_MI_THUNK
1274 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1275 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1276 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1278 #undef TARGET_SETUP_INCOMING_VARARGS
1279 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1280 #undef TARGET_STRICT_ARGUMENT_NAMING
1281 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1282 #undef TARGET_MUST_PASS_IN_STACK
1283 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1284 #undef TARGET_PASS_BY_REFERENCE
1285 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1286 #undef TARGET_CALLEE_COPIES
1287 #define TARGET_CALLEE_COPIES mips_callee_copies
1288 #undef TARGET_ARG_PARTIAL_BYTES
1289 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1291 #undef TARGET_MODE_REP_EXTENDED
1292 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1294 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1295 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1297 #undef TARGET_INIT_BUILTINS
1298 #define TARGET_INIT_BUILTINS mips_init_builtins
1299 #undef TARGET_EXPAND_BUILTIN
1300 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1302 #undef TARGET_HAVE_TLS
1303 #define TARGET_HAVE_TLS HAVE_AS_TLS
1305 #undef TARGET_CANNOT_FORCE_CONST_MEM
1306 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1308 #undef TARGET_ENCODE_SECTION_INFO
1309 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1311 #undef TARGET_ATTRIBUTE_TABLE
1312 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1314 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1315 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1317 #undef TARGET_MIN_ANCHOR_OFFSET
1318 #define TARGET_MIN_ANCHOR_OFFSET -32768
1319 #undef TARGET_MAX_ANCHOR_OFFSET
1320 #define TARGET_MAX_ANCHOR_OFFSET 32767
1321 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1322 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1323 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1324 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1326 #undef TARGET_COMP_TYPE_ATTRIBUTES
1327 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1329 struct gcc_target targetm = TARGET_INITIALIZER;
1332 /* Predicates to test for presence of "near" and "far"/"long_call"
1333 attributes on the given TYPE. */
1336 mips_near_type_p (tree type)
1338 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1342 mips_far_type_p (tree type)
1344 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1345 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1349 /* Return 0 if the attributes for two types are incompatible, 1 if they
1350 are compatible, and 2 if they are nearly compatible (which causes a
1351 warning to be generated). */
1354 mips_comp_type_attributes (tree type1, tree type2)
1356 /* Check for mismatch of non-default calling convention. */
1357 if (TREE_CODE (type1) != FUNCTION_TYPE)
1360 /* Disallow mixed near/far attributes. */
1361 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1363 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1369 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1370 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1373 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1375 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1377 *base_ptr = XEXP (x, 0);
1378 *offset_ptr = INTVAL (XEXP (x, 1));
1387 /* Return true if SYMBOL_REF X is associated with a global symbol
1388 (in the STB_GLOBAL sense). */
1391 mips_global_symbol_p (rtx x)
1395 decl = SYMBOL_REF_DECL (x);
1397 return !SYMBOL_REF_LOCAL_P (x);
1399 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1400 or weak symbols. Relocations in the object file will be against
1401 the target symbol, so it's that symbol's binding that matters here. */
1402 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1405 /* Return true if SYMBOL_REF X binds locally. */
1408 mips_symbol_binds_local_p (rtx x)
1410 return (SYMBOL_REF_DECL (x)
1411 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1412 : SYMBOL_REF_LOCAL_P (x));
1415 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1417 static enum mips_symbol_type
1418 mips_classify_symbol (rtx x)
1421 return SYMBOL_GOT_DISP;
1423 if (GET_CODE (x) == LABEL_REF)
1426 return SYMBOL_CONSTANT_POOL;
1427 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1428 return SYMBOL_GOT_PAGE_OFST;
1429 return SYMBOL_GENERAL;
1432 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1434 if (SYMBOL_REF_TLS_MODEL (x))
1437 if (CONSTANT_POOL_ADDRESS_P (x))
1440 return SYMBOL_CONSTANT_POOL;
1442 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1443 return SYMBOL_SMALL_DATA;
1446 /* Do not use small-data accesses for weak symbols; they may end up
1448 if (SYMBOL_REF_SMALL_P (x)
1449 && !SYMBOL_REF_WEAK (x))
1450 return SYMBOL_SMALL_DATA;
1452 if (TARGET_ABICALLS)
1454 /* Don't use GOT accesses for locally-binding symbols; we can use
1455 %hi and %lo instead. */
1456 if (TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))
1457 return SYMBOL_GENERAL;
1459 /* There are three cases to consider:
1461 - o32 PIC (either with or without explicit relocs)
1462 - n32/n64 PIC without explicit relocs
1463 - n32/n64 PIC with explicit relocs
1465 In the first case, both local and global accesses will use an
1466 R_MIPS_GOT16 relocation. We must correctly predict which of
1467 the two semantics (local or global) the assembler and linker
1468 will apply. The choice depends on the symbol's binding rather
1469 than its visibility.
1471 In the second case, the assembler will not use R_MIPS_GOT16
1472 relocations, but it chooses between local and global accesses
1473 in the same way as for o32 PIC.
1475 In the third case we have more freedom since both forms of
1476 access will work for any kind of symbol. However, there seems
1477 little point in doing things differently. */
1478 if (mips_global_symbol_p (x))
1479 return SYMBOL_GOT_DISP;
1481 return SYMBOL_GOT_PAGE_OFST;
1484 return SYMBOL_GENERAL;
1487 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1488 is the alignment (in bytes) of SYMBOL_REF X. */
1491 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1493 /* If for some reason we can't get the alignment for the
1494 symbol, initializing this to one means we will only accept
1496 HOST_WIDE_INT align = 1;
1499 /* Get the alignment of the symbol we're referring to. */
1500 t = SYMBOL_REF_DECL (x);
1502 align = DECL_ALIGN_UNIT (t);
1504 return offset >= 0 && offset < align;
1507 /* Return true if X is a symbolic constant that can be calculated in
1508 the same way as a bare symbol. If it is, store the type of the
1509 symbol in *SYMBOL_TYPE. */
1512 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1516 split_const (x, &x, &offset);
1517 if (UNSPEC_ADDRESS_P (x))
1519 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1520 x = UNSPEC_ADDRESS (x);
1522 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1524 *symbol_type = mips_classify_symbol (x);
1525 if (*symbol_type == SYMBOL_TLS)
1531 if (offset == const0_rtx)
1534 /* Check whether a nonzero offset is valid for the underlying
1536 switch (*symbol_type)
1538 case SYMBOL_GENERAL:
1539 case SYMBOL_64_HIGH:
1542 /* If the target has 64-bit pointers and the object file only
1543 supports 32-bit symbols, the values of those symbols will be
1544 sign-extended. In this case we can't allow an arbitrary offset
1545 in case the 32-bit value X + OFFSET has a different sign from X. */
1546 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1547 return offset_within_block_p (x, INTVAL (offset));
1549 /* In other cases the relocations can handle any offset. */
1552 case SYMBOL_CONSTANT_POOL:
1553 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1554 In this case, we no longer have access to the underlying constant,
1555 but the original symbol-based access was known to be valid. */
1556 if (GET_CODE (x) == LABEL_REF)
1561 case SYMBOL_SMALL_DATA:
1562 /* Make sure that the offset refers to something within the
1563 same object block. This should guarantee that the final
1564 PC- or GP-relative offset is within the 16-bit limit. */
1565 return offset_within_block_p (x, INTVAL (offset));
1567 case SYMBOL_GOT_PAGE_OFST:
1568 case SYMBOL_GOTOFF_PAGE:
1569 /* If the symbol is global, the GOT entry will contain the symbol's
1570 address, and we will apply a 16-bit offset after loading it.
1571 If the symbol is local, the linker should provide enough local
1572 GOT entries for a 16-bit offset, but larger offsets may lead
1574 return SMALL_INT (offset);
1578 /* There is no carry between the HI and LO REL relocations, so the
1579 offset is only valid if we know it won't lead to such a carry. */
1580 return mips_offset_within_alignment_p (x, INTVAL (offset));
1582 case SYMBOL_GOT_DISP:
1583 case SYMBOL_GOTOFF_DISP:
1584 case SYMBOL_GOTOFF_CALL:
1585 case SYMBOL_GOTOFF_LOADGP:
1588 case SYMBOL_GOTTPREL:
1597 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1600 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1602 if (!HARD_REGISTER_NUM_P (regno))
1606 regno = reg_renumber[regno];
1609 /* These fake registers will be eliminated to either the stack or
1610 hard frame pointer, both of which are usually valid base registers.
1611 Reload deals with the cases where the eliminated form isn't valid. */
1612 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1615 /* In mips16 mode, the stack pointer can only address word and doubleword
1616 values, nothing smaller. There are two problems here:
1618 (a) Instantiating virtual registers can introduce new uses of the
1619 stack pointer. If these virtual registers are valid addresses,
1620 the stack pointer should be too.
1622 (b) Most uses of the stack pointer are not made explicit until
1623 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1624 We don't know until that stage whether we'll be eliminating to the
1625 stack pointer (which needs the restriction) or the hard frame
1626 pointer (which doesn't).
1628 All in all, it seems more consistent to only enforce this restriction
1629 during and after reload. */
1630 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1631 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1633 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1637 /* Return true if X is a valid base register for the given mode.
1638 Allow only hard registers if STRICT. */
1641 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1643 if (!strict && GET_CODE (x) == SUBREG)
1647 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1651 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1652 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1655 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1656 enum machine_mode mode)
1658 switch (symbol_type)
1660 case SYMBOL_GENERAL:
1661 return !TARGET_MIPS16;
1663 case SYMBOL_SMALL_DATA:
1666 case SYMBOL_CONSTANT_POOL:
1667 /* PC-relative addressing is only available for lw and ld. */
1668 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1670 case SYMBOL_GOT_PAGE_OFST:
1673 case SYMBOL_GOT_DISP:
1674 /* The address will have to be loaded from the GOT first. */
1677 case SYMBOL_GOTOFF_PAGE:
1678 case SYMBOL_GOTOFF_DISP:
1679 case SYMBOL_GOTOFF_CALL:
1680 case SYMBOL_GOTOFF_LOADGP:
1685 case SYMBOL_GOTTPREL:
1687 case SYMBOL_64_HIGH:
1697 /* Return true if X is a valid address for machine mode MODE. If it is,
1698 fill in INFO appropriately. STRICT is true if we should only accept
1699 hard base registers. */
1702 mips_classify_address (struct mips_address_info *info, rtx x,
1703 enum machine_mode mode, int strict)
1705 switch (GET_CODE (x))
1709 info->type = ADDRESS_REG;
1711 info->offset = const0_rtx;
1712 return mips_valid_base_register_p (info->reg, mode, strict);
1715 info->type = ADDRESS_REG;
1716 info->reg = XEXP (x, 0);
1717 info->offset = XEXP (x, 1);
1718 return (mips_valid_base_register_p (info->reg, mode, strict)
1719 && const_arith_operand (info->offset, VOIDmode));
1722 info->type = ADDRESS_LO_SUM;
1723 info->reg = XEXP (x, 0);
1724 info->offset = XEXP (x, 1);
1725 return (mips_valid_base_register_p (info->reg, mode, strict)
1726 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1727 && mips_symbolic_address_p (info->symbol_type, mode)
1728 && mips_lo_relocs[info->symbol_type] != 0);
1731 /* Small-integer addresses don't occur very often, but they
1732 are legitimate if $0 is a valid base register. */
1733 info->type = ADDRESS_CONST_INT;
1734 return !TARGET_MIPS16 && SMALL_INT (x);
1739 info->type = ADDRESS_SYMBOLIC;
1740 return (mips_symbolic_constant_p (x, &info->symbol_type)
1741 && mips_symbolic_address_p (info->symbol_type, mode)
1742 && !mips_split_p[info->symbol_type]);
1749 /* Return true if X is a thread-local symbol. */
1752 mips_tls_operand_p (rtx x)
1754 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1757 /* Return true if X can not be forced into a constant pool. */
1760 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1762 return mips_tls_operand_p (*x);
1765 /* Return true if X can not be forced into a constant pool. */
1768 mips_cannot_force_const_mem (rtx x)
1774 /* As an optimization, reject constants that mips_legitimize_move
1777 Suppose we have a multi-instruction sequence that loads constant C
1778 into register R. If R does not get allocated a hard register, and
1779 R is used in an operand that allows both registers and memory
1780 references, reload will consider forcing C into memory and using
1781 one of the instruction's memory alternatives. Returning false
1782 here will force it to use an input reload instead. */
1783 if (GET_CODE (x) == CONST_INT)
1786 split_const (x, &base, &offset);
1787 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1791 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1797 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1798 constant pools, but normal-mode code doesn't need to. */
1801 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1802 rtx x ATTRIBUTE_UNUSED)
1804 return !TARGET_MIPS16;
1807 /* Return the number of instructions needed to load a symbol of the
1808 given type into a register. If valid in an address, the same number
1809 of instructions are needed for loads and stores. Treat extended
1810 mips16 instructions as two instructions. */
1813 mips_symbol_insns (enum mips_symbol_type type)
1817 case SYMBOL_GENERAL:
1818 /* In mips16 code, general symbols must be fetched from the
1823 /* When using 64-bit symbols, we need 5 preparatory instructions,
1826 lui $at,%highest(symbol)
1827 daddiu $at,$at,%higher(symbol)
1829 daddiu $at,$at,%hi(symbol)
1832 The final address is then $at + %lo(symbol). With 32-bit
1833 symbols we just need a preparatory lui. */
1834 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1836 case SYMBOL_SMALL_DATA:
1840 case SYMBOL_CONSTANT_POOL:
1841 /* This case is for mips16 only. Assume we'll need an
1842 extended instruction. */
1845 case SYMBOL_GOT_PAGE_OFST:
1846 case SYMBOL_GOT_DISP:
1847 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1848 the local/global classification is accurate. See override_options
1851 The worst cases are:
1853 (1) For local symbols when generating o32 or o64 code. The assembler
1859 ...and the final address will be $at + %lo(symbol).
1861 (2) For global symbols when -mxgot. The assembler will use:
1863 lui $at,%got_hi(symbol)
1866 ...and the final address will be $at + %got_lo(symbol). */
1869 case SYMBOL_GOTOFF_PAGE:
1870 case SYMBOL_GOTOFF_DISP:
1871 case SYMBOL_GOTOFF_CALL:
1872 case SYMBOL_GOTOFF_LOADGP:
1873 case SYMBOL_64_HIGH:
1879 case SYMBOL_GOTTPREL:
1881 /* Check whether the offset is a 16- or 32-bit value. */
1882 return mips_split_p[type] ? 2 : 1;
1885 /* We don't treat a bare TLS symbol as a constant. */
1891 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1894 mips_stack_address_p (rtx x, enum machine_mode mode)
1896 struct mips_address_info addr;
1898 return (mips_classify_address (&addr, x, mode, false)
1899 && addr.type == ADDRESS_REG
1900 && addr.reg == stack_pointer_rtx);
1903 /* Return true if a value at OFFSET bytes from BASE can be accessed
1904 using an unextended mips16 instruction. MODE is the mode of the
1907 Usually the offset in an unextended instruction is a 5-bit field.
1908 The offset is unsigned and shifted left once for HIs, twice
1909 for SIs, and so on. An exception is SImode accesses off the
1910 stack pointer, which have an 8-bit immediate field. */
1913 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1916 && GET_CODE (offset) == CONST_INT
1917 && INTVAL (offset) >= 0
1918 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1920 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1921 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1922 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1928 /* Return the number of instructions needed to load or store a value
1929 of mode MODE at X. Return 0 if X isn't valid for MODE.
1931 For mips16 code, count extended instructions as two instructions. */
1934 mips_address_insns (rtx x, enum machine_mode mode)
1936 struct mips_address_info addr;
1939 if (mode == BLKmode)
1940 /* BLKmode is used for single unaligned loads and stores. */
1943 /* Each word of a multi-word value will be accessed individually. */
1944 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1946 if (mips_classify_address (&addr, x, mode, false))
1951 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1955 case ADDRESS_LO_SUM:
1956 return (TARGET_MIPS16 ? factor * 2 : factor);
1958 case ADDRESS_CONST_INT:
1961 case ADDRESS_SYMBOLIC:
1962 return factor * mips_symbol_insns (addr.symbol_type);
1968 /* Likewise for constant X. */
1971 mips_const_insns (rtx x)
1973 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1974 enum mips_symbol_type symbol_type;
1977 switch (GET_CODE (x))
1981 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1982 || !mips_split_p[symbol_type])
1989 /* Unsigned 8-bit constants can be loaded using an unextended
1990 LI instruction. Unsigned 16-bit constants can be loaded
1991 using an extended LI. Negative constants must be loaded
1992 using LI and then negated. */
1993 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1994 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1995 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1996 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1999 return mips_build_integer (codes, INTVAL (x));
2003 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2009 /* See if we can refer to X directly. */
2010 if (mips_symbolic_constant_p (x, &symbol_type))
2011 return mips_symbol_insns (symbol_type);
2013 /* Otherwise try splitting the constant into a base and offset.
2014 16-bit offsets can be added using an extra addiu. Larger offsets
2015 must be calculated separately and then added to the base. */
2016 split_const (x, &x, &offset);
2019 int n = mips_const_insns (x);
2022 if (SMALL_INT (offset))
2025 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2032 return mips_symbol_insns (mips_classify_symbol (x));
2040 /* Return the number of instructions needed for memory reference X.
2041 Count extended mips16 instructions as two instructions. */
2044 mips_fetch_insns (rtx x)
2046 gcc_assert (MEM_P (x));
2047 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
2051 /* Return the number of instructions needed for an integer division. */
2054 mips_idiv_insns (void)
2059 if (TARGET_CHECK_ZERO_DIV)
2061 if (GENERATE_DIVIDE_TRAPS)
2067 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2072 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2073 returns a nonzero value if X is a legitimate address for a memory
2074 operand of the indicated MODE. STRICT is nonzero if this function
2075 is called during reload. */
2078 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2080 struct mips_address_info addr;
2082 return mips_classify_address (&addr, x, mode, strict);
2086 /* Copy VALUE to a register and return that register. If new psuedos
2087 are allowed, copy it into a new register, otherwise use DEST. */
2090 mips_force_temporary (rtx dest, rtx value)
2092 if (!no_new_pseudos)
2093 return force_reg (Pmode, value);
2096 emit_move_insn (copy_rtx (dest), value);
2102 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
2103 and is used to load the high part into a register. */
2106 mips_split_symbol (rtx temp, rtx addr)
2111 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
2112 else if (no_new_pseudos)
2114 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2118 high = mips16_gp_pseudo_reg ();
2119 return gen_rtx_LO_SUM (Pmode, high, addr);
2123 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2124 type SYMBOL_TYPE. */
2127 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2131 split_const (address, &base, &offset);
2132 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2133 UNSPEC_ADDRESS_FIRST + symbol_type);
2134 if (offset != const0_rtx)
2135 base = gen_rtx_PLUS (Pmode, base, offset);
2136 return gen_rtx_CONST (Pmode, base);
2140 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2141 high part to BASE and return the result. Just return BASE otherwise.
2142 TEMP is available as a temporary register if needed.
2144 The returned expression can be used as the first operand to a LO_SUM. */
2147 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2148 enum mips_symbol_type symbol_type)
2150 if (mips_split_p[symbol_type])
2152 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2153 addr = mips_force_temporary (temp, addr);
2154 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2160 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2161 mips_force_temporary; it is only needed when OFFSET is not a
2165 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2167 if (!SMALL_OPERAND (offset))
2172 /* Load the full offset into a register so that we can use
2173 an unextended instruction for the address itself. */
2174 high = GEN_INT (offset);
2179 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2180 high = GEN_INT (CONST_HIGH_PART (offset));
2181 offset = CONST_LOW_PART (offset);
2183 high = mips_force_temporary (temp, high);
2184 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2186 return plus_constant (reg, offset);
2189 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2190 referencing, and TYPE is the symbol type to use (either global
2191 dynamic or local dynamic). V0 is an RTX for the return value
2192 location. The entire insn sequence is returned. */
2194 static GTY(()) rtx mips_tls_symbol;
2197 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2199 rtx insn, loc, tga, a0;
2201 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2203 if (!mips_tls_symbol)
2204 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2206 loc = mips_unspec_address (sym, type);
2210 emit_insn (gen_rtx_SET (Pmode, a0,
2211 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2212 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2213 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2214 CONST_OR_PURE_CALL_P (insn) = 1;
2215 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2216 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2217 insn = get_insns ();
2224 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2225 return value will be a valid address and move_operand (either a REG
2229 mips_legitimize_tls_address (rtx loc)
2231 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2232 enum tls_model model;
2234 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2235 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2237 model = SYMBOL_REF_TLS_MODEL (loc);
2238 /* Only TARGET_ABICALLS code can have more than one module; other
2239 code must be be static and should not use a GOT. All TLS models
2240 reduce to local exec in this situation. */
2241 if (!TARGET_ABICALLS)
2242 model = TLS_MODEL_LOCAL_EXEC;
2246 case TLS_MODEL_GLOBAL_DYNAMIC:
2247 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2248 dest = gen_reg_rtx (Pmode);
2249 emit_libcall_block (insn, dest, v0, loc);
2252 case TLS_MODEL_LOCAL_DYNAMIC:
2253 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2254 tmp1 = gen_reg_rtx (Pmode);
2256 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2257 share the LDM result with other LD model accesses. */
2258 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2260 emit_libcall_block (insn, tmp1, v0, eqv);
2262 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2263 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2264 mips_unspec_address (loc, SYMBOL_DTPREL));
2267 case TLS_MODEL_INITIAL_EXEC:
2268 tmp1 = gen_reg_rtx (Pmode);
2269 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2270 if (Pmode == DImode)
2272 emit_insn (gen_tls_get_tp_di (v1));
2273 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2277 emit_insn (gen_tls_get_tp_si (v1));
2278 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2280 dest = gen_reg_rtx (Pmode);
2281 emit_insn (gen_add3_insn (dest, tmp1, v1));
2284 case TLS_MODEL_LOCAL_EXEC:
2285 if (Pmode == DImode)
2286 emit_insn (gen_tls_get_tp_di (v1));
2288 emit_insn (gen_tls_get_tp_si (v1));
2290 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2291 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2292 mips_unspec_address (loc, SYMBOL_TPREL));
2302 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2303 be legitimized in a way that the generic machinery might not expect,
2304 put the new address in *XLOC and return true. MODE is the mode of
2305 the memory being accessed. */
2308 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2310 enum mips_symbol_type symbol_type;
2312 if (mips_tls_operand_p (*xloc))
2314 *xloc = mips_legitimize_tls_address (*xloc);
2318 /* See if the address can split into a high part and a LO_SUM. */
2319 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2320 && mips_symbolic_address_p (symbol_type, mode)
2321 && mips_split_p[symbol_type])
2323 *xloc = mips_split_symbol (0, *xloc);
2327 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2329 /* Handle REG + CONSTANT using mips_add_offset. */
2332 reg = XEXP (*xloc, 0);
2333 if (!mips_valid_base_register_p (reg, mode, 0))
2334 reg = copy_to_mode_reg (Pmode, reg);
2335 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2343 /* Subroutine of mips_build_integer (with the same interface).
2344 Assume that the final action in the sequence should be a left shift. */
2347 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2349 unsigned int i, shift;
2351 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2352 since signed numbers are easier to load than unsigned ones. */
2354 while ((value & 1) == 0)
2355 value /= 2, shift++;
2357 i = mips_build_integer (codes, value);
2358 codes[i].code = ASHIFT;
2359 codes[i].value = shift;
2364 /* As for mips_build_shift, but assume that the final action will be
2365 an IOR or PLUS operation. */
2368 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2370 unsigned HOST_WIDE_INT high;
2373 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2374 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2376 /* The constant is too complex to load with a simple lui/ori pair
2377 so our goal is to clear as many trailing zeros as possible.
2378 In this case, we know bit 16 is set and that the low 16 bits
2379 form a negative number. If we subtract that number from VALUE,
2380 we will clear at least the lowest 17 bits, maybe more. */
2381 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2382 codes[i].code = PLUS;
2383 codes[i].value = CONST_LOW_PART (value);
2387 i = mips_build_integer (codes, high);
2388 codes[i].code = IOR;
2389 codes[i].value = value & 0xffff;
2395 /* Fill CODES with a sequence of rtl operations to load VALUE.
2396 Return the number of operations needed. */
2399 mips_build_integer (struct mips_integer_op *codes,
2400 unsigned HOST_WIDE_INT value)
2402 if (SMALL_OPERAND (value)
2403 || SMALL_OPERAND_UNSIGNED (value)
2404 || LUI_OPERAND (value))
2406 /* The value can be loaded with a single instruction. */
2407 codes[0].code = UNKNOWN;
2408 codes[0].value = value;
2411 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2413 /* Either the constant is a simple LUI/ORI combination or its
2414 lowest bit is set. We don't want to shift in this case. */
2415 return mips_build_lower (codes, value);
2417 else if ((value & 0xffff) == 0)
2419 /* The constant will need at least three actions. The lowest
2420 16 bits are clear, so the final action will be a shift. */
2421 return mips_build_shift (codes, value);
2425 /* The final action could be a shift, add or inclusive OR.
2426 Rather than use a complex condition to select the best
2427 approach, try both mips_build_shift and mips_build_lower
2428 and pick the one that gives the shortest sequence.
2429 Note that this case is only used once per constant. */
2430 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2431 unsigned int cost, alt_cost;
2433 cost = mips_build_shift (codes, value);
2434 alt_cost = mips_build_lower (alt_codes, value);
2435 if (alt_cost < cost)
2437 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2445 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2448 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2450 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2451 enum machine_mode mode;
2452 unsigned int i, cost;
2455 mode = GET_MODE (dest);
2456 cost = mips_build_integer (codes, value);
2458 /* Apply each binary operation to X. Invariant: X is a legitimate
2459 source operand for a SET pattern. */
2460 x = GEN_INT (codes[0].value);
2461 for (i = 1; i < cost; i++)
2465 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2469 x = force_reg (mode, x);
2470 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2473 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2477 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2478 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2482 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2486 /* Split moves of big integers into smaller pieces. */
2487 if (splittable_const_int_operand (src, mode))
2489 mips_move_integer (dest, dest, INTVAL (src));
2493 /* Split moves of symbolic constants into high/low pairs. */
2494 if (splittable_symbolic_operand (src, mode))
2496 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2500 if (mips_tls_operand_p (src))
2502 emit_move_insn (dest, mips_legitimize_tls_address (src));
2506 /* If we have (const (plus symbol offset)), load the symbol first
2507 and then add in the offset. This is usually better than forcing
2508 the constant into memory, at least in non-mips16 code. */
2509 split_const (src, &base, &offset);
2511 && offset != const0_rtx
2512 && (!no_new_pseudos || SMALL_INT (offset)))
2514 base = mips_force_temporary (dest, base);
2515 emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
2519 src = force_const_mem (mode, src);
2521 /* When using explicit relocs, constant pool references are sometimes
2522 not legitimate addresses. */
2523 if (!memory_operand (src, VOIDmode))
2524 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2525 emit_move_insn (dest, src);
2529 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2530 sequence that is valid. */
2533 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2535 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2537 emit_move_insn (dest, force_reg (mode, src));
2541 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2542 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2543 && REG_P (src) && MD_REG_P (REGNO (src))
2544 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2546 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2547 if (GET_MODE_SIZE (mode) <= 4)
2548 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2549 gen_rtx_REG (SImode, REGNO (src)),
2550 gen_rtx_REG (SImode, other_regno)));
2552 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2553 gen_rtx_REG (DImode, REGNO (src)),
2554 gen_rtx_REG (DImode, other_regno)));
2558 /* We need to deal with constants that would be legitimate
2559 immediate_operands but not legitimate move_operands. */
2560 if (CONSTANT_P (src) && !move_operand (src, mode))
2562 mips_legitimize_const_move (mode, dest, src);
2563 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2569 /* We need a lot of little routines to check constant values on the
2570 mips16. These are used to figure out how long the instruction will
2571 be. It would be much better to do this using constraints, but
2572 there aren't nearly enough letters available. */
2575 m16_check_op (rtx op, int low, int high, int mask)
2577 return (GET_CODE (op) == CONST_INT
2578 && INTVAL (op) >= low
2579 && INTVAL (op) <= high
2580 && (INTVAL (op) & mask) == 0);
2584 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2586 return m16_check_op (op, 0x1, 0x8, 0);
2590 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2592 return m16_check_op (op, - 0x8, 0x7, 0);
2596 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2598 return m16_check_op (op, - 0x7, 0x8, 0);
2602 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2604 return m16_check_op (op, - 0x10, 0xf, 0);
2608 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2610 return m16_check_op (op, - 0xf, 0x10, 0);
2614 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2616 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2620 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2622 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2626 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2628 return m16_check_op (op, - 0x80, 0x7f, 0);
2632 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2634 return m16_check_op (op, - 0x7f, 0x80, 0);
2638 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2640 return m16_check_op (op, 0x0, 0xff, 0);
2644 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2646 return m16_check_op (op, - 0xff, 0x0, 0);
2650 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2652 return m16_check_op (op, - 0x1, 0xfe, 0);
2656 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2658 return m16_check_op (op, 0x0, 0xff << 2, 3);
2662 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2664 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2668 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2670 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2674 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2676 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2680 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2682 enum machine_mode mode = GET_MODE (x);
2683 bool float_mode_p = FLOAT_MODE_P (mode);
2690 /* A number between 1 and 8 inclusive is efficient for a shift.
2691 Otherwise, we will need an extended instruction. */
2692 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2693 || (outer_code) == LSHIFTRT)
2695 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2698 *total = COSTS_N_INSNS (1);
2702 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2703 if ((outer_code) == XOR
2704 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2710 /* We may be able to use slt or sltu for a comparison with a
2711 signed 16-bit value. (The boundary conditions aren't quite
2712 right, but this is just a heuristic anyhow.) */
2713 if (((outer_code) == LT || (outer_code) == LE
2714 || (outer_code) == GE || (outer_code) == GT
2715 || (outer_code) == LTU || (outer_code) == LEU
2716 || (outer_code) == GEU || (outer_code) == GTU)
2717 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2723 /* Equality comparisons with 0 are cheap. */
2724 if (((outer_code) == EQ || (outer_code) == NE)
2731 /* Constants in the range 0...255 can be loaded with an unextended
2732 instruction. They are therefore as cheap as a register move.
2734 Given the choice between "li R1,0...255" and "move R1,R2"
2735 (where R2 is a known constant), it is usually better to use "li",
2736 since we do not want to unnecessarily extend the lifetime
2738 if (outer_code == SET
2740 && INTVAL (x) < 256)
2748 /* These can be used anywhere. */
2753 /* Otherwise fall through to the handling below because
2754 we'll need to construct the constant. */
2760 if (LEGITIMATE_CONSTANT_P (x))
2762 *total = COSTS_N_INSNS (1);
2767 /* The value will need to be fetched from the constant pool. */
2768 *total = CONSTANT_POOL_COST;
2774 /* If the address is legitimate, return the number of
2775 instructions it needs, otherwise use the default handling. */
2776 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2779 *total = COSTS_N_INSNS (n + 1);
2786 *total = COSTS_N_INSNS (6);
2790 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2796 if (mode == DImode && !TARGET_64BIT)
2798 *total = COSTS_N_INSNS (2);
2806 if (mode == DImode && !TARGET_64BIT)
2808 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2816 *total = COSTS_N_INSNS (1);
2818 *total = COSTS_N_INSNS (4);
2822 *total = COSTS_N_INSNS (1);
2829 *total = mips_cost->fp_add;
2833 else if (mode == DImode && !TARGET_64BIT)
2835 *total = COSTS_N_INSNS (4);
2841 if (mode == DImode && !TARGET_64BIT)
2843 *total = COSTS_N_INSNS (4);
2850 *total = mips_cost->fp_mult_sf;
2852 else if (mode == DFmode)
2853 *total = mips_cost->fp_mult_df;
2855 else if (mode == SImode)
2856 *total = mips_cost->int_mult_si;
2859 *total = mips_cost->int_mult_di;
2868 *total = mips_cost->fp_div_sf;
2870 *total = mips_cost->fp_div_df;
2879 *total = mips_cost->int_div_di;
2881 *total = mips_cost->int_div_si;
2886 /* A sign extend from SImode to DImode in 64-bit mode is often
2887 zero instructions, because the result can often be used
2888 directly by another instruction; we'll call it one. */
2889 if (TARGET_64BIT && mode == DImode
2890 && GET_MODE (XEXP (x, 0)) == SImode)
2891 *total = COSTS_N_INSNS (1);
2893 *total = COSTS_N_INSNS (2);
2897 if (TARGET_64BIT && mode == DImode
2898 && GET_MODE (XEXP (x, 0)) == SImode)
2899 *total = COSTS_N_INSNS (2);
2901 *total = COSTS_N_INSNS (1);
2905 case UNSIGNED_FLOAT:
2908 case FLOAT_TRUNCATE:
2910 *total = mips_cost->fp_add;
2918 /* Provide the costs of an addressing mode that contains ADDR.
2919 If ADDR is not a valid address, its cost is irrelevant. */
2922 mips_address_cost (rtx addr)
2924 return mips_address_insns (addr, SImode);
2927 /* Return one word of double-word value OP, taking into account the fixed
2928 endianness of certain registers. HIGH_P is true to select the high part,
2929 false to select the low part. */
2932 mips_subword (rtx op, int high_p)
2935 enum machine_mode mode;
2937 mode = GET_MODE (op);
2938 if (mode == VOIDmode)
2941 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2942 byte = UNITS_PER_WORD;
2948 if (FP_REG_P (REGNO (op)))
2949 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2950 if (ACC_HI_REG_P (REGNO (op)))
2951 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2955 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2957 return simplify_gen_subreg (word_mode, op, mode, byte);
2961 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2964 mips_split_64bit_move_p (rtx dest, rtx src)
2969 /* FP->FP moves can be done in a single instruction. */
2970 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2973 /* Check for floating-point loads and stores. They can be done using
2974 ldc1 and sdc1 on MIPS II and above. */
2977 if (FP_REG_RTX_P (dest) && MEM_P (src))
2979 if (FP_REG_RTX_P (src) && MEM_P (dest))
2986 /* Split a 64-bit move from SRC to DEST assuming that
2987 mips_split_64bit_move_p holds.
2989 Moves into and out of FPRs cause some difficulty here. Such moves
2990 will always be DFmode, since paired FPRs are not allowed to store
2991 DImode values. The most natural representation would be two separate
2992 32-bit moves, such as:
2994 (set (reg:SI $f0) (mem:SI ...))
2995 (set (reg:SI $f1) (mem:SI ...))
2997 However, the second insn is invalid because odd-numbered FPRs are
2998 not allowed to store independent values. Use the patterns load_df_low,
2999 load_df_high and store_df_high instead. */
3002 mips_split_64bit_move (rtx dest, rtx src)
3004 if (FP_REG_RTX_P (dest))
3006 /* Loading an FPR from memory or from GPRs. */
3009 dest = gen_lowpart (DFmode, dest);
3010 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3011 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3016 emit_insn (gen_load_df_low (copy_rtx (dest),
3017 mips_subword (src, 0)));
3018 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3022 else if (FP_REG_RTX_P (src))
3024 /* Storing an FPR into memory or GPRs. */
3027 src = gen_lowpart (DFmode, src);
3028 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3029 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3033 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3034 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3039 /* The operation can be split into two normal moves. Decide in
3040 which order to do them. */
3043 low_dest = mips_subword (dest, 0);
3044 if (REG_P (low_dest)
3045 && reg_overlap_mentioned_p (low_dest, src))
3047 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3048 emit_move_insn (low_dest, mips_subword (src, 0));
3052 emit_move_insn (low_dest, mips_subword (src, 0));
3053 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3058 /* Return the appropriate instructions to move SRC into DEST. Assume
3059 that SRC is operand 1 and DEST is operand 0. */
3062 mips_output_move (rtx dest, rtx src)
3064 enum rtx_code dest_code, src_code;
3067 dest_code = GET_CODE (dest);
3068 src_code = GET_CODE (src);
3069 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3071 if (dbl_p && mips_split_64bit_move_p (dest, src))
3074 if ((src_code == REG && GP_REG_P (REGNO (src)))
3075 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3077 if (dest_code == REG)
3079 if (GP_REG_P (REGNO (dest)))
3080 return "move\t%0,%z1";
3082 if (MD_REG_P (REGNO (dest)))
3085 if (DSP_ACC_REG_P (REGNO (dest)))
3087 static char retval[] = "mt__\t%z1,%q0";
3088 retval[2] = reg_names[REGNO (dest)][4];
3089 retval[3] = reg_names[REGNO (dest)][5];
3093 if (FP_REG_P (REGNO (dest)))
3094 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3096 if (ALL_COP_REG_P (REGNO (dest)))
3098 static char retval[] = "dmtc_\t%z1,%0";
3100 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3101 return (dbl_p ? retval : retval + 1);
3104 if (dest_code == MEM)
3105 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3107 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3109 if (src_code == REG)
3111 if (DSP_ACC_REG_P (REGNO (src)))
3113 static char retval[] = "mf__\t%0,%q1";
3114 retval[2] = reg_names[REGNO (src)][4];
3115 retval[3] = reg_names[REGNO (src)][5];
3119 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3120 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3122 if (FP_REG_P (REGNO (src)))
3123 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3125 if (ALL_COP_REG_P (REGNO (src)))
3127 static char retval[] = "dmfc_\t%0,%1";
3129 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3130 return (dbl_p ? retval : retval + 1);
3134 if (src_code == MEM)
3135 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3137 if (src_code == CONST_INT)
3139 /* Don't use the X format, because that will give out of
3140 range numbers for 64-bit hosts and 32-bit targets. */
3142 return "li\t%0,%1\t\t\t# %X1";
3144 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3147 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3151 if (src_code == HIGH)
3152 return "lui\t%0,%h1";
3154 if (CONST_GP_P (src))
3155 return "move\t%0,%1";
3157 if (symbolic_operand (src, VOIDmode))
3158 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3160 if (src_code == REG && FP_REG_P (REGNO (src)))
3162 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3164 if (GET_MODE (dest) == V2SFmode)
3165 return "mov.ps\t%0,%1";
3167 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3170 if (dest_code == MEM)
3171 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3173 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3175 if (src_code == MEM)
3176 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3178 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3180 static char retval[] = "l_c_\t%0,%1";
3182 retval[1] = (dbl_p ? 'd' : 'w');
3183 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3186 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3188 static char retval[] = "s_c_\t%1,%0";
3190 retval[1] = (dbl_p ? 'd' : 'w');
3191 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3197 /* Restore $gp from its save slot. Valid only when using o32 or
3201 mips_restore_gp (void)
3205 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3207 address = mips_add_offset (pic_offset_table_rtx,
3208 frame_pointer_needed
3209 ? hard_frame_pointer_rtx
3210 : stack_pointer_rtx,
3211 current_function_outgoing_args_size);
3212 slot = gen_rtx_MEM (Pmode, address);
3214 emit_move_insn (pic_offset_table_rtx, slot);
3215 if (!TARGET_EXPLICIT_RELOCS)
3216 emit_insn (gen_blockage ());
3219 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3222 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3224 emit_insn (gen_rtx_SET (VOIDmode, target,
3225 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3228 /* Return true if CMP1 is a suitable second operand for relational
3229 operator CODE. See also the *sCC patterns in mips.md. */
3232 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3238 return reg_or_0_operand (cmp1, VOIDmode);
3242 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3246 return arith_operand (cmp1, VOIDmode);
3249 return sle_operand (cmp1, VOIDmode);
3252 return sleu_operand (cmp1, VOIDmode);
3259 /* Canonicalize LE or LEU comparisons into LT comparisons when
3260 possible to avoid extra instructions or inverting the
3264 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3265 enum machine_mode mode)
3267 HOST_WIDE_INT original, plus_one;
3269 if (GET_CODE (*cmp1) != CONST_INT)
3272 original = INTVAL (*cmp1);
3273 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3278 if (original < plus_one)
3281 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3290 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3303 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3304 result in TARGET. CMP0 and TARGET are register_operands that have
3305 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3306 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3309 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3310 rtx target, rtx cmp0, rtx cmp1)
3312 /* First see if there is a MIPS instruction that can do this operation
3313 with CMP1 in its current form. If not, try to canonicalize the
3314 comparison to LT. If that fails, try doing the same for the
3315 inverse operation. If that also fails, force CMP1 into a register
3317 if (mips_relational_operand_ok_p (code, cmp1))
3318 mips_emit_binary (code, target, cmp0, cmp1);
3319 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3320 mips_emit_binary (code, target, cmp0, cmp1);
3323 enum rtx_code inv_code = reverse_condition (code);
3324 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3326 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3327 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3329 else if (invert_ptr == 0)
3331 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3332 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3333 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3337 *invert_ptr = !*invert_ptr;
3338 mips_emit_binary (inv_code, target, cmp0, cmp1);
3343 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3344 The register will have the same mode as CMP0. */
3347 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3349 if (cmp1 == const0_rtx)
3352 if (uns_arith_operand (cmp1, VOIDmode))
3353 return expand_binop (GET_MODE (cmp0), xor_optab,
3354 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3356 return expand_binop (GET_MODE (cmp0), sub_optab,
3357 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3360 /* Convert *CODE into a code that can be used in a floating-point
3361 scc instruction (c.<cond>.<fmt>). Return true if the values of
3362 the condition code registers will be inverted, with 0 indicating
3363 that the condition holds. */
3366 mips_reverse_fp_cond_p (enum rtx_code *code)
3373 *code = reverse_condition_maybe_unordered (*code);
3381 /* Convert a comparison into something that can be used in a branch or
3382 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3383 being compared and *CODE is the code used to compare them.
3385 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3386 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3387 otherwise any standard branch condition can be used. The standard branch
3390 - EQ/NE between two registers.
3391 - any comparison between a register and zero. */
3394 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3396 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3398 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3400 *op0 = cmp_operands[0];
3401 *op1 = cmp_operands[1];
3403 else if (*code == EQ || *code == NE)
3407 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3412 *op0 = cmp_operands[0];
3413 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3418 /* The comparison needs a separate scc instruction. Store the
3419 result of the scc in *OP0 and compare it against zero. */
3420 bool invert = false;
3421 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3423 mips_emit_int_relational (*code, &invert, *op0,
3424 cmp_operands[0], cmp_operands[1]);
3425 *code = (invert ? EQ : NE);
3430 enum rtx_code cmp_code;
3432 /* Floating-point tests use a separate c.cond.fmt comparison to
3433 set a condition code register. The branch or conditional move
3434 will then compare that register against zero.
3436 Set CMP_CODE to the code of the comparison instruction and
3437 *CODE to the code that the branch or move should use. */
3439 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3441 ? gen_reg_rtx (CCmode)
3442 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3444 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3448 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3449 Store the result in TARGET and return true if successful.
3451 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3454 mips_emit_scc (enum rtx_code code, rtx target)
3456 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3459 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3460 if (code == EQ || code == NE)
3462 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3463 mips_emit_binary (code, target, zie, const0_rtx);
3466 mips_emit_int_relational (code, 0, target,
3467 cmp_operands[0], cmp_operands[1]);
3471 /* Emit the common code for doing conditional branches.
3472 operand[0] is the label to jump to.
3473 The comparison operands are saved away by cmp{si,di,sf,df}. */
3476 gen_conditional_branch (rtx *operands, enum rtx_code code)
3478 rtx op0, op1, condition;
3480 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3481 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3482 emit_jump_insn (gen_condjump (condition, operands[0]));
3487 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3488 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3491 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3492 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3497 reversed_p = mips_reverse_fp_cond_p (&cond);
3498 cmp_result = gen_reg_rtx (CCV2mode);
3499 emit_insn (gen_scc_ps (cmp_result,
3500 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3502 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3505 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3509 /* Emit the common code for conditional moves. OPERANDS is the array
3510 of operands passed to the conditional move define_expand. */
3513 gen_conditional_move (rtx *operands)
3518 code = GET_CODE (operands[1]);
3519 mips_emit_compare (&code, &op0, &op1, true);
3520 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3521 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3522 gen_rtx_fmt_ee (code,
3525 operands[2], operands[3])));
3528 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3529 the conditional_trap expander. */
3532 mips_gen_conditional_trap (rtx *operands)
3535 enum rtx_code cmp_code = GET_CODE (operands[0]);
3536 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3538 /* MIPS conditional trap machine instructions don't have GT or LE
3539 flavors, so we must invert the comparison and convert to LT and
3540 GE, respectively. */
3543 case GT: cmp_code = LT; break;
3544 case LE: cmp_code = GE; break;
3545 case GTU: cmp_code = LTU; break;
3546 case LEU: cmp_code = GEU; break;
3549 if (cmp_code == GET_CODE (operands[0]))
3551 op0 = cmp_operands[0];
3552 op1 = cmp_operands[1];
3556 op0 = cmp_operands[1];
3557 op1 = cmp_operands[0];
3559 op0 = force_reg (mode, op0);
3560 if (!arith_operand (op1, mode))
3561 op1 = force_reg (mode, op1);
3563 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3564 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3568 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3571 mips_ok_for_lazy_binding_p (rtx x)
3573 return (TARGET_USE_GOT
3574 && GET_CODE (x) == SYMBOL_REF
3575 && !mips_symbol_binds_local_p (x));
3578 /* Load function address ADDR into register DEST. SIBCALL_P is true
3579 if the address is needed for a sibling call. */
3582 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3584 /* If we're generating PIC, and this call is to a global function,
3585 try to allow its address to be resolved lazily. This isn't
3586 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3587 to the stub would be our caller's gp, not ours. */
3588 if (TARGET_EXPLICIT_RELOCS
3589 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3590 && mips_ok_for_lazy_binding_p (addr))
3592 rtx high, lo_sum_symbol;
3594 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3595 addr, SYMBOL_GOTOFF_CALL);
3596 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3597 if (Pmode == SImode)
3598 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3600 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3603 emit_move_insn (dest, addr);
3607 /* Expand a call or call_value instruction. RESULT is where the
3608 result will go (null for calls), ADDR is the address of the
3609 function, ARGS_SIZE is the size of the arguments and AUX is
3610 the value passed to us by mips_function_arg. SIBCALL_P is true
3611 if we are expanding a sibling call, false if we're expanding
3615 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3617 rtx orig_addr, pattern, insn;
3620 if (!call_insn_operand (addr, VOIDmode))
3622 addr = gen_reg_rtx (Pmode);
3623 mips_load_call_address (addr, orig_addr, sibcall_p);
3626 if (mips16_hard_float
3627 && build_mips16_call_stub (result, addr, args_size,
3628 aux == 0 ? 0 : (int) GET_MODE (aux)))
3632 pattern = (sibcall_p
3633 ? gen_sibcall_internal (addr, args_size)
3634 : gen_call_internal (addr, args_size));
3635 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3639 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3640 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3643 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3644 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3647 pattern = (sibcall_p
3648 ? gen_sibcall_value_internal (result, addr, args_size)
3649 : gen_call_value_internal (result, addr, args_size));
3651 insn = emit_call_insn (pattern);
3653 /* Lazy-binding stubs require $gp to be valid on entry. */
3654 if (mips_ok_for_lazy_binding_p (orig_addr))
3655 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3659 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3662 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3663 tree exp ATTRIBUTE_UNUSED)
3665 return TARGET_SIBCALLS;
3668 /* Emit code to move general operand SRC into condition-code
3669 register DEST. SCRATCH is a scratch TFmode float register.
3676 where FP1 and FP2 are single-precision float registers
3677 taken from SCRATCH. */
3680 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3684 /* Change the source to SFmode. */
3686 src = adjust_address (src, SFmode, 0);
3687 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3688 src = gen_rtx_REG (SFmode, true_regnum (src));
3690 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3691 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3693 emit_move_insn (copy_rtx (fp1), src);
3694 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3695 emit_insn (gen_slt_sf (dest, fp2, fp1));
3698 /* Emit code to change the current function's return address to
3699 ADDRESS. SCRATCH is available as a scratch register, if needed.
3700 ADDRESS and SCRATCH are both word-mode GPRs. */
3703 mips_set_return_address (rtx address, rtx scratch)
3707 compute_frame_size (get_frame_size ());
3708 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3709 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3710 cfun->machine->frame.gp_sp_offset);
3712 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3715 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3716 Assume that the areas do not overlap. */
3719 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3721 HOST_WIDE_INT offset, delta;
3722 unsigned HOST_WIDE_INT bits;
3724 enum machine_mode mode;
3727 /* Work out how many bits to move at a time. If both operands have
3728 half-word alignment, it is usually better to move in half words.
3729 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3730 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3731 Otherwise move word-sized chunks. */
3732 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3733 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3734 bits = BITS_PER_WORD / 2;
3736 bits = BITS_PER_WORD;
3738 mode = mode_for_size (bits, MODE_INT, 0);
3739 delta = bits / BITS_PER_UNIT;
3741 /* Allocate a buffer for the temporary registers. */
3742 regs = alloca (sizeof (rtx) * length / delta);
3744 /* Load as many BITS-sized chunks as possible. Use a normal load if
3745 the source has enough alignment, otherwise use left/right pairs. */
3746 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3748 regs[i] = gen_reg_rtx (mode);
3749 if (MEM_ALIGN (src) >= bits)
3750 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3753 rtx part = adjust_address (src, BLKmode, offset);
3754 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3759 /* Copy the chunks to the destination. */
3760 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3761 if (MEM_ALIGN (dest) >= bits)
3762 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3765 rtx part = adjust_address (dest, BLKmode, offset);
3766 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3770 /* Mop up any left-over bytes. */
3771 if (offset < length)
3773 src = adjust_address (src, BLKmode, offset);
3774 dest = adjust_address (dest, BLKmode, offset);
3775 move_by_pieces (dest, src, length - offset,
3776 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3780 #define MAX_MOVE_REGS 4
3781 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3784 /* Helper function for doing a loop-based block operation on memory
3785 reference MEM. Each iteration of the loop will operate on LENGTH
3788 Create a new base register for use within the loop and point it to
3789 the start of MEM. Create a new memory reference that uses this
3790 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3793 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3794 rtx *loop_reg, rtx *loop_mem)
3796 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3798 /* Although the new mem does not refer to a known location,
3799 it does keep up to LENGTH bytes of alignment. */
3800 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3801 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3805 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3806 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3807 memory regions do not overlap. */
3810 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3812 rtx label, src_reg, dest_reg, final_src;
3813 HOST_WIDE_INT leftover;
3815 leftover = length % MAX_MOVE_BYTES;
3818 /* Create registers and memory references for use within the loop. */
3819 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3820 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3822 /* Calculate the value that SRC_REG should have after the last iteration
3824 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3827 /* Emit the start of the loop. */
3828 label = gen_label_rtx ();
3831 /* Emit the loop body. */
3832 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3834 /* Move on to the next block. */
3835 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3836 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3838 /* Emit the loop condition. */
3839 if (Pmode == DImode)
3840 emit_insn (gen_cmpdi (src_reg, final_src));
3842 emit_insn (gen_cmpsi (src_reg, final_src));
3843 emit_jump_insn (gen_bne (label));
3845 /* Mop up any left-over bytes. */
3847 mips_block_move_straight (dest, src, leftover);
3850 /* Expand a movmemsi instruction. */
3853 mips_expand_block_move (rtx dest, rtx src, rtx length)
3855 if (GET_CODE (length) == CONST_INT)
3857 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3859 mips_block_move_straight (dest, src, INTVAL (length));
3864 mips_block_move_loop (dest, src, INTVAL (length));
3871 /* Argument support functions. */
3873 /* Initialize CUMULATIVE_ARGS for a function. */
3876 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3877 rtx libname ATTRIBUTE_UNUSED)
3879 static CUMULATIVE_ARGS zero_cum;
3880 tree param, next_param;
3883 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3885 /* Determine if this function has variable arguments. This is
3886 indicated by the last argument being 'void_type_mode' if there
3887 are no variable arguments. The standard MIPS calling sequence
3888 passes all arguments in the general purpose registers in this case. */
3890 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3891 param != 0; param = next_param)
3893 next_param = TREE_CHAIN (param);
3894 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3895 cum->gp_reg_found = 1;
3900 /* Fill INFO with information about a single argument. CUM is the
3901 cumulative state for earlier arguments. MODE is the mode of this
3902 argument and TYPE is its type (if known). NAMED is true if this
3903 is a named (fixed) argument rather than a variable one. */
3906 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3907 tree type, int named, struct mips_arg_info *info)
3909 bool doubleword_aligned_p;
3910 unsigned int num_bytes, num_words, max_regs;
3912 /* Work out the size of the argument. */
3913 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3914 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3916 /* Decide whether it should go in a floating-point register, assuming
3917 one is free. Later code checks for availability.
3919 The checks against UNITS_PER_FPVALUE handle the soft-float and
3920 single-float cases. */
3924 /* The EABI conventions have traditionally been defined in terms
3925 of TYPE_MODE, regardless of the actual type. */
3926 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3927 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3928 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3933 /* Only leading floating-point scalars are passed in
3934 floating-point registers. We also handle vector floats the same
3935 say, which is OK because they are not covered by the standard ABI. */
3936 info->fpr_p = (!cum->gp_reg_found
3937 && cum->arg_number < 2
3938 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3939 || VECTOR_FLOAT_TYPE_P (type))
3940 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3941 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3942 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3947 /* Scalar and complex floating-point types are passed in
3948 floating-point registers. */
3949 info->fpr_p = (named
3950 && (type == 0 || FLOAT_TYPE_P (type))
3951 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3952 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3953 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3954 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3956 /* ??? According to the ABI documentation, the real and imaginary
3957 parts of complex floats should be passed in individual registers.
3958 The real and imaginary parts of stack arguments are supposed
3959 to be contiguous and there should be an extra word of padding
3962 This has two problems. First, it makes it impossible to use a
3963 single "void *" va_list type, since register and stack arguments
3964 are passed differently. (At the time of writing, MIPSpro cannot
3965 handle complex float varargs correctly.) Second, it's unclear
3966 what should happen when there is only one register free.
3968 For now, we assume that named complex floats should go into FPRs
3969 if there are two FPRs free, otherwise they should be passed in the
3970 same way as a struct containing two floats. */
3972 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3973 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3975 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3976 info->fpr_p = false;
3986 /* See whether the argument has doubleword alignment. */
3987 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3989 /* Set REG_OFFSET to the register count we're interested in.
3990 The EABI allocates the floating-point registers separately,
3991 but the other ABIs allocate them like integer registers. */
3992 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3996 /* Advance to an even register if the argument is doubleword-aligned. */
3997 if (doubleword_aligned_p)
3998 info->reg_offset += info->reg_offset & 1;
4000 /* Work out the offset of a stack argument. */
4001 info->stack_offset = cum->stack_words;
4002 if (doubleword_aligned_p)
4003 info->stack_offset += info->stack_offset & 1;
4005 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4007 /* Partition the argument between registers and stack. */
4008 info->reg_words = MIN (num_words, max_regs);
4009 info->stack_words = num_words - info->reg_words;
4013 /* INFO describes an argument that is passed in a single-register value.
4014 Return the register it uses, assuming that FPRs are available if
4018 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4020 if (!info->fpr_p || !hard_float_p)
4021 return GP_ARG_FIRST + info->reg_offset;
4022 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4023 /* In o32, the second argument is always passed in $f14
4024 for TARGET_DOUBLE_FLOAT, regardless of whether the
4025 first argument was a word or doubleword. */
4026 return FP_ARG_FIRST + 2;
4028 return FP_ARG_FIRST + info->reg_offset;
4031 /* Implement FUNCTION_ARG_ADVANCE. */
4034 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4035 tree type, int named)
4037 struct mips_arg_info info;
4039 mips_arg_info (cum, mode, type, named, &info);
4042 cum->gp_reg_found = true;
4044 /* See the comment above the cumulative args structure in mips.h
4045 for an explanation of what this code does. It assumes the O32
4046 ABI, which passes at most 2 arguments in float registers. */
4047 if (cum->arg_number < 2 && info.fpr_p)
4048 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4050 if (mips_abi != ABI_EABI || !info.fpr_p)
4051 cum->num_gprs = info.reg_offset + info.reg_words;
4052 else if (info.reg_words > 0)
4053 cum->num_fprs += MAX_FPRS_PER_FMT;
4055 if (info.stack_words > 0)
4056 cum->stack_words = info.stack_offset + info.stack_words;
4061 /* Implement FUNCTION_ARG. */
4064 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4065 tree type, int named)
4067 struct mips_arg_info info;
4069 /* We will be called with a mode of VOIDmode after the last argument
4070 has been seen. Whatever we return will be passed to the call
4071 insn. If we need a mips16 fp_code, return a REG with the code
4072 stored as the mode. */
4073 if (mode == VOIDmode)
4075 if (TARGET_MIPS16 && cum->fp_code != 0)
4076 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4082 mips_arg_info (cum, mode, type, named, &info);
4084 /* Return straight away if the whole argument is passed on the stack. */
4085 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4089 && TREE_CODE (type) == RECORD_TYPE
4091 && TYPE_SIZE_UNIT (type)
4092 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4095 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4096 structure contains a double in its entirety, then that 64-bit
4097 chunk is passed in a floating point register. */
4100 /* First check to see if there is any such field. */
4101 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4102 if (TREE_CODE (field) == FIELD_DECL
4103 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4104 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4105 && host_integerp (bit_position (field), 0)
4106 && int_bit_position (field) % BITS_PER_WORD == 0)
4111 /* Now handle the special case by returning a PARALLEL
4112 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4113 chunks are passed in registers. */
4115 HOST_WIDE_INT bitpos;
4118 /* assign_parms checks the mode of ENTRY_PARM, so we must
4119 use the actual mode here. */
4120 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4123 field = TYPE_FIELDS (type);
4124 for (i = 0; i < info.reg_words; i++)
4128 for (; field; field = TREE_CHAIN (field))
4129 if (TREE_CODE (field) == FIELD_DECL
4130 && int_bit_position (field) >= bitpos)
4134 && int_bit_position (field) == bitpos
4135 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4136 && !TARGET_SOFT_FLOAT
4137 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4138 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4140 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4143 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4144 GEN_INT (bitpos / BITS_PER_UNIT));
4146 bitpos += BITS_PER_WORD;
4152 /* Handle the n32/n64 conventions for passing complex floating-point
4153 arguments in FPR pairs. The real part goes in the lower register
4154 and the imaginary part goes in the upper register. */
4157 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4160 enum machine_mode inner;
4163 inner = GET_MODE_INNER (mode);
4164 reg = FP_ARG_FIRST + info.reg_offset;
4165 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4167 /* Real part in registers, imaginary part on stack. */
4168 gcc_assert (info.stack_words == info.reg_words);
4169 return gen_rtx_REG (inner, reg);
4173 gcc_assert (info.stack_words == 0);
4174 real = gen_rtx_EXPR_LIST (VOIDmode,
4175 gen_rtx_REG (inner, reg),
4177 imag = gen_rtx_EXPR_LIST (VOIDmode,
4179 reg + info.reg_words / 2),
4180 GEN_INT (GET_MODE_SIZE (inner)));
4181 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4185 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4189 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4192 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4193 enum machine_mode mode, tree type, bool named)
4195 struct mips_arg_info info;
4197 mips_arg_info (cum, mode, type, named, &info);
4198 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4202 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4203 PARM_BOUNDARY bits of alignment, but will be given anything up
4204 to STACK_BOUNDARY bits if the type requires it. */
4207 function_arg_boundary (enum machine_mode mode, tree type)
4209 unsigned int alignment;
4211 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4212 if (alignment < PARM_BOUNDARY)
4213 alignment = PARM_BOUNDARY;
4214 if (alignment > STACK_BOUNDARY)
4215 alignment = STACK_BOUNDARY;
4219 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4220 upward rather than downward. In other words, return true if the
4221 first byte of the stack slot has useful data, false if the last
4225 mips_pad_arg_upward (enum machine_mode mode, tree type)
4227 /* On little-endian targets, the first byte of every stack argument
4228 is passed in the first byte of the stack slot. */
4229 if (!BYTES_BIG_ENDIAN)
4232 /* Otherwise, integral types are padded downward: the last byte of a
4233 stack argument is passed in the last byte of the stack slot. */
4235 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4236 : GET_MODE_CLASS (mode) == MODE_INT)
4239 /* Big-endian o64 pads floating-point arguments downward. */
4240 if (mips_abi == ABI_O64)
4241 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4244 /* Other types are padded upward for o32, o64, n32 and n64. */
4245 if (mips_abi != ABI_EABI)
4248 /* Arguments smaller than a stack slot are padded downward. */
4249 if (mode != BLKmode)
4250 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4252 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4256 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4257 if the least significant byte of the register has useful data. Return
4258 the opposite if the most significant byte does. */
4261 mips_pad_reg_upward (enum machine_mode mode, tree type)
4263 /* No shifting is required for floating-point arguments. */
4264 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4265 return !BYTES_BIG_ENDIAN;
4267 /* Otherwise, apply the same padding to register arguments as we do
4268 to stack arguments. */
4269 return mips_pad_arg_upward (mode, type);
4273 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4274 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4277 CUMULATIVE_ARGS local_cum;
4278 int gp_saved, fp_saved;
4280 /* The caller has advanced CUM up to, but not beyond, the last named
4281 argument. Advance a local copy of CUM past the last "real" named
4282 argument, to find out how many registers are left over. */
4285 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4287 /* Found out how many registers we need to save. */
4288 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4289 fp_saved = (EABI_FLOAT_VARARGS_P
4290 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4299 ptr = plus_constant (virtual_incoming_args_rtx,
4300 REG_PARM_STACK_SPACE (cfun->decl)
4301 - gp_saved * UNITS_PER_WORD);
4302 mem = gen_rtx_MEM (BLKmode, ptr);
4303 set_mem_alias_set (mem, get_varargs_alias_set ());
4305 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4310 /* We can't use move_block_from_reg, because it will use
4312 enum machine_mode mode;
4315 /* Set OFF to the offset from virtual_incoming_args_rtx of
4316 the first float register. The FP save area lies below
4317 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4318 off = -gp_saved * UNITS_PER_WORD;
4319 off &= ~(UNITS_PER_FPVALUE - 1);
4320 off -= fp_saved * UNITS_PER_FPREG;
4322 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4324 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4325 i += MAX_FPRS_PER_FMT)
4329 ptr = plus_constant (virtual_incoming_args_rtx, off);
4330 mem = gen_rtx_MEM (mode, ptr);
4331 set_mem_alias_set (mem, get_varargs_alias_set ());
4332 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4333 off += UNITS_PER_HWFPVALUE;
4337 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4338 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4339 + fp_saved * UNITS_PER_FPREG);
4342 /* Create the va_list data type.
4343 We keep 3 pointers, and two offsets.
4344 Two pointers are to the overflow area, which starts at the CFA.
4345 One of these is constant, for addressing into the GPR save area below it.
4346 The other is advanced up the stack through the overflow region.
4347 The third pointer is to the GPR save area. Since the FPR save area
4348 is just below it, we can address FPR slots off this pointer.
4349 We also keep two one-byte offsets, which are to be subtracted from the
4350 constant pointers to yield addresses in the GPR and FPR save areas.
4351 These are downcounted as float or non-float arguments are used,
4352 and when they get to zero, the argument must be obtained from the
4354 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4355 pointer is enough. It's started at the GPR save area, and is
4357 Note that the GPR save area is not constant size, due to optimization
4358 in the prologue. Hence, we can't use a design with two pointers
4359 and two offsets, although we could have designed this with two pointers
4360 and three offsets. */
4363 mips_build_builtin_va_list (void)
4365 if (EABI_FLOAT_VARARGS_P)
4367 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4370 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4372 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4374 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4376 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4378 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4379 unsigned_char_type_node);
4380 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4381 unsigned_char_type_node);
4382 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4383 warn on every user file. */
4384 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4385 array = build_array_type (unsigned_char_type_node,
4386 build_index_type (index));
4387 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4389 DECL_FIELD_CONTEXT (f_ovfl) = record;
4390 DECL_FIELD_CONTEXT (f_gtop) = record;
4391 DECL_FIELD_CONTEXT (f_ftop) = record;
4392 DECL_FIELD_CONTEXT (f_goff) = record;
4393 DECL_FIELD_CONTEXT (f_foff) = record;
4394 DECL_FIELD_CONTEXT (f_res) = record;
4396 TYPE_FIELDS (record) = f_ovfl;
4397 TREE_CHAIN (f_ovfl) = f_gtop;
4398 TREE_CHAIN (f_gtop) = f_ftop;
4399 TREE_CHAIN (f_ftop) = f_goff;
4400 TREE_CHAIN (f_goff) = f_foff;
4401 TREE_CHAIN (f_foff) = f_res;
4403 layout_type (record);
4406 else if (TARGET_IRIX && TARGET_IRIX6)
4407 /* On IRIX 6, this type is 'char *'. */
4408 return build_pointer_type (char_type_node);
4410 /* Otherwise, we use 'void *'. */
4411 return ptr_type_node;
4414 /* Implement va_start. */
4417 mips_va_start (tree valist, rtx nextarg)
4419 if (EABI_FLOAT_VARARGS_P)
4421 const CUMULATIVE_ARGS *cum;
4422 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4423 tree ovfl, gtop, ftop, goff, foff;
4425 int gpr_save_area_size;
4426 int fpr_save_area_size;
4429 cum = ¤t_function_args_info;
4431 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4433 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4435 f_ovfl = TYPE_FIELDS (va_list_type_node);
4436 f_gtop = TREE_CHAIN (f_ovfl);
4437 f_ftop = TREE_CHAIN (f_gtop);
4438 f_goff = TREE_CHAIN (f_ftop);
4439 f_foff = TREE_CHAIN (f_goff);
4441 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4443 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4445 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4447 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4449 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4452 /* Emit code to initialize OVFL, which points to the next varargs
4453 stack argument. CUM->STACK_WORDS gives the number of stack
4454 words used by named arguments. */
4455 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4456 if (cum->stack_words > 0)
4457 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4458 size_int (cum->stack_words * UNITS_PER_WORD));
4459 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4460 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4462 /* Emit code to initialize GTOP, the top of the GPR save area. */
4463 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4464 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4465 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4467 /* Emit code to initialize FTOP, the top of the FPR save area.
4468 This address is gpr_save_area_bytes below GTOP, rounded
4469 down to the next fp-aligned boundary. */
4470 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4471 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4472 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4474 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4475 size_int (-fpr_offset));
4476 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4477 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4479 /* Emit code to initialize GOFF, the offset from GTOP of the
4480 next GPR argument. */
4481 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4482 build_int_cst (NULL_TREE, gpr_save_area_size));
4483 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4485 /* Likewise emit code to initialize FOFF, the offset from FTOP
4486 of the next FPR argument. */
4487 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4488 build_int_cst (NULL_TREE, fpr_save_area_size));
4489 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4493 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4494 std_expand_builtin_va_start (valist, nextarg);
4498 /* Implement va_arg. */
4501 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4503 HOST_WIDE_INT size, rsize;
4507 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4510 type = build_pointer_type (type);
4512 size = int_size_in_bytes (type);
4513 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4515 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4516 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4519 /* Not a simple merged stack. */
4521 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4522 tree ovfl, top, off, align;
4523 HOST_WIDE_INT osize;
4526 f_ovfl = TYPE_FIELDS (va_list_type_node);
4527 f_gtop = TREE_CHAIN (f_ovfl);
4528 f_ftop = TREE_CHAIN (f_gtop);
4529 f_goff = TREE_CHAIN (f_ftop);
4530 f_foff = TREE_CHAIN (f_goff);
4532 /* We maintain separate pointers and offsets for floating-point
4533 and integer arguments, but we need similar code in both cases.
4536 TOP be the top of the register save area;
4537 OFF be the offset from TOP of the next register;
4538 ADDR_RTX be the address of the argument;
4539 RSIZE be the number of bytes used to store the argument
4540 when it's in the register save area;
4541 OSIZE be the number of bytes used to store it when it's
4542 in the stack overflow area; and
4543 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4545 The code we want is:
4547 1: off &= -rsize; // round down
4550 4: addr_rtx = top - off;
4555 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4556 10: addr_rtx = ovfl + PADDING;
4560 [1] and [9] can sometimes be optimized away. */
4562 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4565 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4566 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4568 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4570 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4573 /* When floating-point registers are saved to the stack,
4574 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4575 of the float's precision. */
4576 rsize = UNITS_PER_HWFPVALUE;
4578 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4579 (= PARM_BOUNDARY bits). This can be different from RSIZE
4582 (1) On 32-bit targets when TYPE is a structure such as:
4584 struct s { float f; };
4586 Such structures are passed in paired FPRs, so RSIZE
4587 will be 8 bytes. However, the structure only takes
4588 up 4 bytes of memory, so OSIZE will only be 4.
4590 (2) In combinations such as -mgp64 -msingle-float
4591 -fshort-double. Doubles passed in registers
4592 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4593 but those passed on the stack take up
4594 UNITS_PER_WORD bytes. */
4595 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4599 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4601 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4603 if (rsize > UNITS_PER_WORD)
4605 /* [1] Emit code for: off &= -rsize. */
4606 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4607 build_int_cst (NULL_TREE, -rsize));
4608 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4609 gimplify_and_add (t, pre_p);
4614 /* [2] Emit code to branch if off == 0. */
4615 t = build2 (NE_EXPR, boolean_type_node, off,
4616 build_int_cst (TREE_TYPE (off), 0));
4617 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4619 /* [5] Emit code for: off -= rsize. We do this as a form of
4620 post-increment not available to C. Also widen for the
4621 coming pointer arithmetic. */
4622 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4623 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4624 t = fold_convert (sizetype, t);
4625 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4627 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4628 the argument has RSIZE - SIZE bytes of leading padding. */
4629 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4630 if (BYTES_BIG_ENDIAN && rsize > size)
4632 u = size_int (rsize - size);
4633 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4635 COND_EXPR_THEN (addr) = t;
4637 if (osize > UNITS_PER_WORD)
4639 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4640 u = size_int (osize - 1);
4641 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4642 t = fold_convert (sizetype, t);
4643 u = size_int (-osize);
4644 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4645 t = fold_convert (TREE_TYPE (ovfl), t);
4646 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4651 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4652 post-increment ovfl by osize. On big-endian machines,
4653 the argument has OSIZE - SIZE bytes of leading padding. */
4654 u = fold_convert (TREE_TYPE (ovfl),
4655 build_int_cst (NULL_TREE, osize));
4656 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4657 if (BYTES_BIG_ENDIAN && osize > size)
4659 u = size_int (osize - size);
4660 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4663 /* String [9] and [10,11] together. */
4665 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4666 COND_EXPR_ELSE (addr) = t;
4668 addr = fold_convert (build_pointer_type (type), addr);
4669 addr = build_va_arg_indirect_ref (addr);
4673 addr = build_va_arg_indirect_ref (addr);
4678 /* Return true if it is possible to use left/right accesses for a
4679 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4680 returning true, update *OP, *LEFT and *RIGHT as follows:
4682 *OP is a BLKmode reference to the whole field.
4684 *LEFT is a QImode reference to the first byte if big endian or
4685 the last byte if little endian. This address can be used in the
4686 left-side instructions (lwl, swl, ldl, sdl).
4688 *RIGHT is a QImode reference to the opposite end of the field and
4689 can be used in the patterning right-side instruction. */
4692 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4693 rtx *left, rtx *right)
4697 /* Check that the operand really is a MEM. Not all the extv and
4698 extzv predicates are checked. */
4702 /* Check that the size is valid. */
4703 if (width != 32 && (!TARGET_64BIT || width != 64))
4706 /* We can only access byte-aligned values. Since we are always passed
4707 a reference to the first byte of the field, it is not necessary to
4708 do anything with BITPOS after this check. */
4709 if (bitpos % BITS_PER_UNIT != 0)
4712 /* Reject aligned bitfields: we want to use a normal load or store
4713 instead of a left/right pair. */
4714 if (MEM_ALIGN (*op) >= width)
4717 /* Adjust *OP to refer to the whole field. This also has the effect
4718 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4719 *op = adjust_address (*op, BLKmode, 0);
4720 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4722 /* Get references to both ends of the field. We deliberately don't
4723 use the original QImode *OP for FIRST since the new BLKmode one
4724 might have a simpler address. */
4725 first = adjust_address (*op, QImode, 0);
4726 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4728 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4729 be the upper word and RIGHT the lower word. */
4730 if (TARGET_BIG_ENDIAN)
4731 *left = first, *right = last;
4733 *left = last, *right = first;
4739 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4740 Return true on success. We only handle cases where zero_extract is
4741 equivalent to sign_extract. */
4744 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4746 rtx left, right, temp;
4748 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4749 paradoxical word_mode subreg. This is the only case in which
4750 we allow the destination to be larger than the source. */
4751 if (GET_CODE (dest) == SUBREG
4752 && GET_MODE (dest) == DImode
4753 && SUBREG_BYTE (dest) == 0
4754 && GET_MODE (SUBREG_REG (dest)) == SImode)
4755 dest = SUBREG_REG (dest);
4757 /* After the above adjustment, the destination must be the same
4758 width as the source. */
4759 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4762 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4765 temp = gen_reg_rtx (GET_MODE (dest));
4766 if (GET_MODE (dest) == DImode)
4768 emit_insn (gen_mov_ldl (temp, src, left));
4769 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4773 emit_insn (gen_mov_lwl (temp, src, left));
4774 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4780 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4784 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4787 enum machine_mode mode;
4789 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4792 mode = mode_for_size (width, MODE_INT, 0);
4793 src = gen_lowpart (mode, src);
4797 emit_insn (gen_mov_sdl (dest, src, left));
4798 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4802 emit_insn (gen_mov_swl (dest, src, left));
4803 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4808 /* Return true if X is a MEM with the same size as MODE. */
4811 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4818 size = MEM_SIZE (x);
4819 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4822 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4823 source of an "ext" instruction or the destination of an "ins"
4824 instruction. OP must be a register operand and the following
4825 conditions must hold:
4827 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4828 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4829 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4831 Also reject lengths equal to a word as they are better handled
4832 by the move patterns. */
4835 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4837 HOST_WIDE_INT len, pos;
4839 if (!ISA_HAS_EXT_INS
4840 || !register_operand (op, VOIDmode)
4841 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4844 len = INTVAL (size);
4845 pos = INTVAL (position);
4847 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4848 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4854 /* Set up globals to generate code for the ISA or processor
4855 described by INFO. */
4858 mips_set_architecture (const struct mips_cpu_info *info)
4862 mips_arch_info = info;
4863 mips_arch = info->cpu;
4864 mips_isa = info->isa;
4869 /* Likewise for tuning. */
4872 mips_set_tune (const struct mips_cpu_info *info)
4876 mips_tune_info = info;
4877 mips_tune = info->cpu;
4881 /* Implement TARGET_HANDLE_OPTION. */
4884 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4889 if (strcmp (arg, "32") == 0)
4891 else if (strcmp (arg, "o64") == 0)
4893 else if (strcmp (arg, "n32") == 0)
4895 else if (strcmp (arg, "64") == 0)
4897 else if (strcmp (arg, "eabi") == 0)
4898 mips_abi = ABI_EABI;
4905 return mips_parse_cpu (arg) != 0;
4908 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4909 return mips_isa_info != 0;
4911 case OPT_mno_flush_func:
4912 mips_cache_flush_func = NULL;
4920 /* Set up the threshold for data to go into the small data area, instead
4921 of the normal data area, and detect any conflicts in the switches. */
4924 override_options (void)
4926 int i, start, regno;
4927 enum machine_mode mode;
4929 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4930 SUBTARGET_OVERRIDE_OPTIONS;
4933 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4935 /* The following code determines the architecture and register size.
4936 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4937 The GAS and GCC code should be kept in sync as much as possible. */
4939 if (mips_arch_string != 0)
4940 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4942 if (mips_isa_info != 0)
4944 if (mips_arch_info == 0)
4945 mips_set_architecture (mips_isa_info);
4946 else if (mips_arch_info->isa != mips_isa_info->isa)
4947 error ("-%s conflicts with the other architecture options, "
4948 "which specify a %s processor",
4949 mips_isa_info->name,
4950 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4953 if (mips_arch_info == 0)
4955 #ifdef MIPS_CPU_STRING_DEFAULT
4956 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4958 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4962 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4963 error ("-march=%s is not compatible with the selected ABI",
4964 mips_arch_info->name);
4966 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4967 if (mips_tune_string != 0)
4968 mips_set_tune (mips_parse_cpu (mips_tune_string));
4970 if (mips_tune_info == 0)
4971 mips_set_tune (mips_arch_info);
4973 /* Set cost structure for the processor. */
4975 mips_cost = &mips_rtx_cost_optimize_size;
4977 mips_cost = &mips_rtx_cost_data[mips_tune];
4979 if ((target_flags_explicit & MASK_64BIT) != 0)
4981 /* The user specified the size of the integer registers. Make sure
4982 it agrees with the ABI and ISA. */
4983 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4984 error ("-mgp64 used with a 32-bit processor");
4985 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4986 error ("-mgp32 used with a 64-bit ABI");
4987 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4988 error ("-mgp64 used with a 32-bit ABI");
4992 /* Infer the integer register size from the ABI and processor.
4993 Restrict ourselves to 32-bit registers if that's all the
4994 processor has, or if the ABI cannot handle 64-bit registers. */
4995 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4996 target_flags &= ~MASK_64BIT;
4998 target_flags |= MASK_64BIT;
5001 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5003 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5004 only one right answer here. */
5005 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5006 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5007 else if (!TARGET_64BIT && TARGET_FLOAT64
5008 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5009 error ("-mgp32 and -mfp64 can only be combined if the target"
5010 " supports the mfhc1 and mthc1 instructions");
5011 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5012 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5016 /* -msingle-float selects 32-bit float registers. Otherwise the
5017 float registers should be the same size as the integer ones. */
5018 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5019 target_flags |= MASK_FLOAT64;
5021 target_flags &= ~MASK_FLOAT64;
5024 /* End of code shared with GAS. */
5026 if ((target_flags_explicit & MASK_LONG64) == 0)
5028 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5029 target_flags |= MASK_LONG64;
5031 target_flags &= ~MASK_LONG64;
5034 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5035 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
5037 /* For some configurations, it is useful to have -march control
5038 the default setting of MASK_SOFT_FLOAT. */
5039 switch ((int) mips_arch)
5041 case PROCESSOR_R4100:
5042 case PROCESSOR_R4111:
5043 case PROCESSOR_R4120:
5044 case PROCESSOR_R4130:
5045 target_flags |= MASK_SOFT_FLOAT;
5049 target_flags &= ~MASK_SOFT_FLOAT;
5055 flag_pcc_struct_return = 0;
5057 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5059 /* If neither -mbranch-likely nor -mno-branch-likely was given
5060 on the command line, set MASK_BRANCHLIKELY based on the target
5063 By default, we enable use of Branch Likely instructions on
5064 all architectures which support them with the following
5065 exceptions: when creating MIPS32 or MIPS64 code, and when
5066 tuning for architectures where their use tends to hurt
5069 The MIPS32 and MIPS64 architecture specifications say "Software
5070 is strongly encouraged to avoid use of Branch Likely
5071 instructions, as they will be removed from a future revision
5072 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5073 issue those instructions unless instructed to do so by
5075 if (ISA_HAS_BRANCHLIKELY
5076 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5077 && !(TUNE_MIPS5500 || TUNE_SB1))
5078 target_flags |= MASK_BRANCHLIKELY;
5080 target_flags &= ~MASK_BRANCHLIKELY;
5082 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5083 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5085 /* The effect of -mabicalls isn't defined for the EABI. */
5086 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5088 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5089 target_flags &= ~MASK_ABICALLS;
5092 if (TARGET_ABICALLS)
5094 /* We need to set flag_pic for executables as well as DSOs
5095 because we may reference symbols that are not defined in
5096 the final executable. (MIPS does not use things like
5097 copy relocs, for example.)
5099 Also, there is a body of code that uses __PIC__ to distinguish
5100 between -mabicalls and -mno-abicalls code. */
5102 if (mips_section_threshold > 0)
5103 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5106 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5107 warning (0, "-G and -mrtp are incompatible");
5109 /* mips_split_addresses is a half-way house between explicit
5110 relocations and the traditional assembler macros. It can
5111 split absolute 32-bit symbolic constants into a high/lo_sum
5112 pair but uses macros for other sorts of access.
5114 Like explicit relocation support for REL targets, it relies
5115 on GNU extensions in the assembler and the linker.
5117 Although this code should work for -O0, it has traditionally
5118 been treated as an optimization. */
5119 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5120 && optimize && !flag_pic
5121 && !ABI_HAS_64BIT_SYMBOLS)
5122 mips_split_addresses = 1;
5124 mips_split_addresses = 0;
5126 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5127 faster code, but at the expense of more nops. Enable it at -O3 and
5129 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5130 target_flags |= MASK_VR4130_ALIGN;
5132 /* When compiling for the mips16, we cannot use floating point. We
5133 record the original hard float value in mips16_hard_float. */
5136 if (TARGET_SOFT_FLOAT)
5137 mips16_hard_float = 0;
5139 mips16_hard_float = 1;
5140 target_flags |= MASK_SOFT_FLOAT;
5142 /* Don't run the scheduler before reload, since it tends to
5143 increase register pressure. */
5144 flag_schedule_insns = 0;
5146 /* Don't do hot/cold partitioning. The constant layout code expects
5147 the whole function to be in a single section. */
5148 flag_reorder_blocks_and_partition = 0;
5150 /* Silently disable -mexplicit-relocs since it doesn't apply
5151 to mips16 code. Even so, it would overly pedantic to warn
5152 about "-mips16 -mexplicit-relocs", especially given that
5153 we use a %gprel() operator. */
5154 target_flags &= ~MASK_EXPLICIT_RELOCS;
5157 /* When using explicit relocs, we call dbr_schedule from within
5159 if (TARGET_EXPLICIT_RELOCS)
5161 mips_flag_delayed_branch = flag_delayed_branch;
5162 flag_delayed_branch = 0;
5165 #ifdef MIPS_TFMODE_FORMAT
5166 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5169 /* Make sure that the user didn't turn off paired single support when
5170 MIPS-3D support is requested. */
5171 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5172 && !TARGET_PAIRED_SINGLE_FLOAT)
5173 error ("-mips3d requires -mpaired-single");
5175 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5177 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5179 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5180 and TARGET_HARD_FLOAT are both true. */
5181 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5182 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5184 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5186 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5187 error ("-mips3d/-mpaired-single must be used with -mips64");
5189 /* If TARGET_DSPR2, enable MASK_DSP. */
5191 target_flags |= MASK_DSP;
5193 if (TARGET_MIPS16 && TARGET_DSP)
5194 error ("-mips16 and -mdsp cannot be used together");
5196 mips_print_operand_punct['?'] = 1;
5197 mips_print_operand_punct['#'] = 1;
5198 mips_print_operand_punct['/'] = 1;
5199 mips_print_operand_punct['&'] = 1;
5200 mips_print_operand_punct['!'] = 1;
5201 mips_print_operand_punct['*'] = 1;
5202 mips_print_operand_punct['@'] = 1;
5203 mips_print_operand_punct['.'] = 1;
5204 mips_print_operand_punct['('] = 1;
5205 mips_print_operand_punct[')'] = 1;
5206 mips_print_operand_punct['['] = 1;
5207 mips_print_operand_punct[']'] = 1;
5208 mips_print_operand_punct['<'] = 1;
5209 mips_print_operand_punct['>'] = 1;
5210 mips_print_operand_punct['{'] = 1;
5211 mips_print_operand_punct['}'] = 1;
5212 mips_print_operand_punct['^'] = 1;
5213 mips_print_operand_punct['$'] = 1;
5214 mips_print_operand_punct['+'] = 1;
5215 mips_print_operand_punct['~'] = 1;
5217 /* Set up array to map GCC register number to debug register number.
5218 Ignore the special purpose register numbers. */
5220 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5221 mips_dbx_regno[i] = -1;
5223 start = GP_DBX_FIRST - GP_REG_FIRST;
5224 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5225 mips_dbx_regno[i] = i + start;
5227 start = FP_DBX_FIRST - FP_REG_FIRST;
5228 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5229 mips_dbx_regno[i] = i + start;
5231 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5232 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5234 /* Set up array giving whether a given register can hold a given mode. */
5236 for (mode = VOIDmode;
5237 mode != MAX_MACHINE_MODE;
5238 mode = (enum machine_mode) ((int)mode + 1))
5240 register int size = GET_MODE_SIZE (mode);
5241 register enum mode_class class = GET_MODE_CLASS (mode);
5243 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5247 if (mode == CCV2mode)
5250 && (regno - ST_REG_FIRST) % 2 == 0);
5252 else if (mode == CCV4mode)
5255 && (regno - ST_REG_FIRST) % 4 == 0);
5257 else if (mode == CCmode)
5260 temp = (regno == FPSW_REGNUM);
5262 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5263 || FP_REG_P (regno));
5266 else if (GP_REG_P (regno))
5267 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5269 else if (FP_REG_P (regno))
5270 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5271 || (MIN_FPRS_PER_FMT == 1
5272 && size <= UNITS_PER_FPREG))
5273 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5274 || class == MODE_VECTOR_FLOAT)
5275 && size <= UNITS_PER_FPVALUE)
5276 /* Allow integer modes that fit into a single
5277 register. We need to put integers into FPRs
5278 when using instructions like cvt and trunc.
5279 We can't allow sizes smaller than a word,
5280 the FPU has no appropriate load/store
5281 instructions for those. */
5282 || (class == MODE_INT
5283 && size >= MIN_UNITS_PER_WORD
5284 && size <= UNITS_PER_FPREG)
5285 /* Allow TFmode for CCmode reloads. */
5286 || (ISA_HAS_8CC && mode == TFmode)));
5288 else if (ACC_REG_P (regno))
5289 temp = (INTEGRAL_MODE_P (mode)
5290 && (size <= UNITS_PER_WORD
5291 || (ACC_HI_REG_P (regno)
5292 && size == 2 * UNITS_PER_WORD)));
5294 else if (ALL_COP_REG_P (regno))
5295 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5299 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5303 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5304 initialized yet, so we can't use that here. */
5305 gpr_mode = TARGET_64BIT ? DImode : SImode;
5307 /* Provide default values for align_* for 64-bit targets. */
5308 if (TARGET_64BIT && !TARGET_MIPS16)
5310 if (align_loops == 0)
5312 if (align_jumps == 0)
5314 if (align_functions == 0)
5315 align_functions = 8;
5318 /* Function to allocate machine-dependent function status. */
5319 init_machine_status = &mips_init_machine_status;
5321 if (ABI_HAS_64BIT_SYMBOLS)
5323 if (TARGET_EXPLICIT_RELOCS)
5325 mips_split_p[SYMBOL_64_HIGH] = true;
5326 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5327 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5329 mips_split_p[SYMBOL_64_MID] = true;
5330 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5331 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5333 mips_split_p[SYMBOL_64_LOW] = true;
5334 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5335 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5337 mips_split_p[SYMBOL_GENERAL] = true;
5338 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5343 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5345 mips_split_p[SYMBOL_GENERAL] = true;
5346 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5347 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5353 /* The high part is provided by a pseudo copy of $gp. */
5354 mips_split_p[SYMBOL_SMALL_DATA] = true;
5355 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5358 if (TARGET_EXPLICIT_RELOCS)
5360 /* Small data constants are kept whole until after reload,
5361 then lowered by mips_rewrite_small_data. */
5362 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5364 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5367 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5368 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5372 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5373 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5378 /* The HIGH and LO_SUM are matched by special .md patterns. */
5379 mips_split_p[SYMBOL_GOT_DISP] = true;
5381 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5382 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5383 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5385 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5386 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5387 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5392 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5394 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5395 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5401 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5402 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5403 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5406 /* Thread-local relocation operators. */
5407 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5408 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5409 mips_split_p[SYMBOL_DTPREL] = 1;
5410 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5411 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5412 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5413 mips_split_p[SYMBOL_TPREL] = 1;
5414 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5415 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5417 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5419 /* We don't have a thread pointer access instruction on MIPS16, or
5420 appropriate TLS relocations. */
5422 targetm.have_tls = false;
5424 /* Default to working around R4000 errata only if the processor
5425 was selected explicitly. */
5426 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5427 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5428 target_flags |= MASK_FIX_R4000;
5430 /* Default to working around R4400 errata only if the processor
5431 was selected explicitly. */
5432 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5433 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5434 target_flags |= MASK_FIX_R4400;
5437 /* Implement CONDITIONAL_REGISTER_USAGE. */
5440 mips_conditional_register_usage (void)
5446 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5447 fixed_regs[regno] = call_used_regs[regno] = 1;
5449 if (!TARGET_HARD_FLOAT)
5453 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5454 fixed_regs[regno] = call_used_regs[regno] = 1;
5455 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5456 fixed_regs[regno] = call_used_regs[regno] = 1;
5458 else if (! ISA_HAS_8CC)
5462 /* We only have a single condition code register. We
5463 implement this by hiding all the condition code registers,
5464 and generating RTL that refers directly to ST_REG_FIRST. */
5465 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5466 fixed_regs[regno] = call_used_regs[regno] = 1;
5468 /* In mips16 mode, we permit the $t temporary registers to be used
5469 for reload. We prohibit the unused $s registers, since they
5470 are caller saved, and saving them via a mips16 register would
5471 probably waste more time than just reloading the value. */
5474 fixed_regs[18] = call_used_regs[18] = 1;
5475 fixed_regs[19] = call_used_regs[19] = 1;
5476 fixed_regs[20] = call_used_regs[20] = 1;
5477 fixed_regs[21] = call_used_regs[21] = 1;
5478 fixed_regs[22] = call_used_regs[22] = 1;
5479 fixed_regs[23] = call_used_regs[23] = 1;
5480 fixed_regs[26] = call_used_regs[26] = 1;
5481 fixed_regs[27] = call_used_regs[27] = 1;
5482 fixed_regs[30] = call_used_regs[30] = 1;
5484 /* fp20-23 are now caller saved. */
5485 if (mips_abi == ABI_64)
5488 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5489 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5491 /* Odd registers from fp21 to fp31 are now caller saved. */
5492 if (mips_abi == ABI_N32)
5495 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5496 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5500 /* Allocate a chunk of memory for per-function machine-dependent data. */
5501 static struct machine_function *
5502 mips_init_machine_status (void)
5504 return ((struct machine_function *)
5505 ggc_alloc_cleared (sizeof (struct machine_function)));
5508 /* On the mips16, we want to allocate $24 (T_REG) before other
5509 registers for instructions for which it is possible. This helps
5510 avoid shuffling registers around in order to set up for an xor,
5511 encouraging the compiler to use a cmp instead. */
5514 mips_order_regs_for_local_alloc (void)
5518 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5519 reg_alloc_order[i] = i;
5523 /* It really doesn't matter where we put register 0, since it is
5524 a fixed register anyhow. */
5525 reg_alloc_order[0] = 24;
5526 reg_alloc_order[24] = 0;
5531 /* The MIPS debug format wants all automatic variables and arguments
5532 to be in terms of the virtual frame pointer (stack pointer before
5533 any adjustment in the function), while the MIPS 3.0 linker wants
5534 the frame pointer to be the stack pointer after the initial
5535 adjustment. So, we do the adjustment here. The arg pointer (which
5536 is eliminated) points to the virtual frame pointer, while the frame
5537 pointer (which may be eliminated) points to the stack pointer after
5538 the initial adjustments. */
5541 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5543 rtx offset2 = const0_rtx;
5544 rtx reg = eliminate_constant_term (addr, &offset2);
5547 offset = INTVAL (offset2);
5549 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5550 || reg == hard_frame_pointer_rtx)
5552 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5553 ? compute_frame_size (get_frame_size ())
5554 : cfun->machine->frame.total_size;
5556 /* MIPS16 frame is smaller */
5557 if (frame_pointer_needed && TARGET_MIPS16)
5558 frame_size -= cfun->machine->frame.args_size;
5560 offset = offset - frame_size;
5563 /* sdbout_parms does not want this to crash for unrecognized cases. */
5565 else if (reg != arg_pointer_rtx)
5566 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5573 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5575 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5576 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5577 'h' OP is HIGH, prints %hi(X),
5578 'd' output integer constant in decimal,
5579 'z' if the operand is 0, use $0 instead of normal operand.
5580 'D' print second part of double-word register or memory operand.
5581 'L' print low-order register of double-word register operand.
5582 'M' print high-order register of double-word register operand.
5583 'C' print part of opcode for a branch condition.
5584 'F' print part of opcode for a floating-point branch condition.
5585 'N' print part of opcode for a branch condition, inverted.
5586 'W' print part of opcode for a floating-point branch condition, inverted.
5587 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5588 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5589 't' like 'T', but with the EQ/NE cases reversed
5590 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5591 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5592 'R' print the reloc associated with LO_SUM
5593 'q' print DSP accumulator registers
5595 The punctuation characters are:
5597 '(' Turn on .set noreorder
5598 ')' Turn on .set reorder
5599 '[' Turn on .set noat
5601 '<' Turn on .set nomacro
5602 '>' Turn on .set macro
5603 '{' Turn on .set volatile (not GAS)
5604 '}' Turn on .set novolatile (not GAS)
5605 '&' Turn on .set noreorder if filling delay slots
5606 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5607 '!' Turn on .set nomacro if filling delay slots
5608 '#' Print nop if in a .set noreorder section.
5609 '/' Like '#', but does nothing within a delayed branch sequence
5610 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5611 '@' Print the name of the assembler temporary register (at or $1).
5612 '.' Print the name of the register with a hard-wired zero (zero or $0).
5613 '^' Print the name of the pic call-through register (t9 or $25).
5614 '$' Print the name of the stack pointer register (sp or $29).
5615 '+' Print the name of the gp register (usually gp or $28).
5616 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5619 print_operand (FILE *file, rtx op, int letter)
5621 register enum rtx_code code;
5623 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5628 if (mips_branch_likely)
5633 fputs (reg_names [GP_REG_FIRST + 1], file);
5637 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5641 fputs (reg_names [GP_REG_FIRST + 0], file);
5645 fputs (reg_names[STACK_POINTER_REGNUM], file);
5649 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5653 if (final_sequence != 0 && set_noreorder++ == 0)
5654 fputs (".set\tnoreorder\n\t", file);
5658 if (final_sequence != 0)
5660 if (set_noreorder++ == 0)
5661 fputs (".set\tnoreorder\n\t", file);
5663 if (set_nomacro++ == 0)
5664 fputs (".set\tnomacro\n\t", file);
5669 if (final_sequence != 0 && set_nomacro++ == 0)
5670 fputs ("\n\t.set\tnomacro", file);
5674 if (set_noreorder != 0)
5675 fputs ("\n\tnop", file);
5679 /* Print an extra newline so that the delayed insn is separated
5680 from the following ones. This looks neater and is consistent
5681 with non-nop delayed sequences. */
5682 if (set_noreorder != 0 && final_sequence == 0)
5683 fputs ("\n\tnop\n", file);
5687 if (set_noreorder++ == 0)
5688 fputs (".set\tnoreorder\n\t", file);
5692 if (set_noreorder == 0)
5693 error ("internal error: %%) found without a %%( in assembler pattern");
5695 else if (--set_noreorder == 0)
5696 fputs ("\n\t.set\treorder", file);
5701 if (set_noat++ == 0)
5702 fputs (".set\tnoat\n\t", file);
5707 error ("internal error: %%] found without a %%[ in assembler pattern");
5708 else if (--set_noat == 0)
5709 fputs ("\n\t.set\tat", file);
5714 if (set_nomacro++ == 0)
5715 fputs (".set\tnomacro\n\t", file);
5719 if (set_nomacro == 0)
5720 error ("internal error: %%> found without a %%< in assembler pattern");
5721 else if (--set_nomacro == 0)
5722 fputs ("\n\t.set\tmacro", file);
5727 if (set_volatile++ == 0)
5728 fputs ("#.set\tvolatile\n\t", file);
5732 if (set_volatile == 0)
5733 error ("internal error: %%} found without a %%{ in assembler pattern");
5734 else if (--set_volatile == 0)
5735 fputs ("\n\t#.set\tnovolatile", file);
5741 if (align_labels_log > 0)
5742 ASM_OUTPUT_ALIGN (file, align_labels_log);
5747 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5756 error ("PRINT_OPERAND null pointer");
5760 code = GET_CODE (op);
5765 case EQ: fputs ("eq", file); break;
5766 case NE: fputs ("ne", file); break;
5767 case GT: fputs ("gt", file); break;
5768 case GE: fputs ("ge", file); break;
5769 case LT: fputs ("lt", file); break;
5770 case LE: fputs ("le", file); break;
5771 case GTU: fputs ("gtu", file); break;
5772 case GEU: fputs ("geu", file); break;
5773 case LTU: fputs ("ltu", file); break;
5774 case LEU: fputs ("leu", file); break;
5776 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5779 else if (letter == 'N')
5782 case EQ: fputs ("ne", file); break;
5783 case NE: fputs ("eq", file); break;
5784 case GT: fputs ("le", file); break;
5785 case GE: fputs ("lt", file); break;
5786 case LT: fputs ("ge", file); break;
5787 case LE: fputs ("gt", file); break;
5788 case GTU: fputs ("leu", file); break;
5789 case GEU: fputs ("ltu", file); break;
5790 case LTU: fputs ("geu", file); break;
5791 case LEU: fputs ("gtu", file); break;
5793 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5796 else if (letter == 'F')
5799 case EQ: fputs ("c1f", file); break;
5800 case NE: fputs ("c1t", file); break;
5802 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5805 else if (letter == 'W')
5808 case EQ: fputs ("c1t", file); break;
5809 case NE: fputs ("c1f", file); break;
5811 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5814 else if (letter == 'h')
5816 if (GET_CODE (op) == HIGH)
5819 print_operand_reloc (file, op, mips_hi_relocs);
5822 else if (letter == 'R')
5823 print_operand_reloc (file, op, mips_lo_relocs);
5825 else if (letter == 'Y')
5827 if (GET_CODE (op) == CONST_INT
5828 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5829 < ARRAY_SIZE (mips_fp_conditions)))
5830 fputs (mips_fp_conditions[INTVAL (op)], file);
5832 output_operand_lossage ("invalid %%Y value");
5835 else if (letter == 'Z')
5839 print_operand (file, op, 0);
5844 else if (letter == 'q')
5849 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5851 regnum = REGNO (op);
5852 if (MD_REG_P (regnum))
5853 fprintf (file, "$ac0");
5854 else if (DSP_ACC_REG_P (regnum))
5855 fprintf (file, "$ac%c", reg_names[regnum][3]);
5857 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5860 else if (code == REG || code == SUBREG)
5862 register int regnum;
5865 regnum = REGNO (op);
5867 regnum = true_regnum (op);
5869 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5870 || (letter == 'L' && WORDS_BIG_ENDIAN)
5874 fprintf (file, "%s", reg_names[regnum]);
5877 else if (code == MEM)
5880 output_address (plus_constant (XEXP (op, 0), 4));
5882 output_address (XEXP (op, 0));
5885 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5886 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5888 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5889 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5891 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5892 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5894 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5895 fputs (reg_names[GP_REG_FIRST], file);
5897 else if (letter == 'd' || letter == 'x' || letter == 'X')
5898 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5900 else if (letter == 'T' || letter == 't')
5902 int truth = (code == NE) == (letter == 'T');
5903 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5906 else if (CONST_GP_P (op))
5907 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5910 output_addr_const (file, op);
5914 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5915 RELOCS is the array of relocations to use. */
5918 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5920 enum mips_symbol_type symbol_type;
5924 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5925 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5927 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5928 split_const (op, &base, &offset);
5929 if (UNSPEC_ADDRESS_P (base))
5930 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
5932 fputs (relocs[symbol_type], file);
5933 output_addr_const (file, op);
5934 for (p = relocs[symbol_type]; *p != 0; p++)
5939 /* Output address operand X to FILE. */
5942 print_operand_address (FILE *file, rtx x)
5944 struct mips_address_info addr;
5946 if (mips_classify_address (&addr, x, word_mode, true))
5950 print_operand (file, addr.offset, 0);
5951 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5954 case ADDRESS_LO_SUM:
5955 print_operand (file, addr.offset, 'R');
5956 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5959 case ADDRESS_CONST_INT:
5960 output_addr_const (file, x);
5961 fprintf (file, "(%s)", reg_names[0]);
5964 case ADDRESS_SYMBOLIC:
5965 output_addr_const (file, x);
5971 /* When using assembler macros, keep track of all of small-data externs
5972 so that mips_file_end can emit the appropriate declarations for them.
5974 In most cases it would be safe (though pointless) to emit .externs
5975 for other symbols too. One exception is when an object is within
5976 the -G limit but declared by the user to be in a section other
5977 than .sbss or .sdata. */
5980 mips_output_external (FILE *file, tree decl, const char *name)
5982 default_elf_asm_output_external (file, decl, name);
5984 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
5985 set in order to avoid putting out names that are never really
5987 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
5989 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5991 fputs ("\t.extern\t", file);
5992 assemble_name (file, name);
5993 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
5994 int_size_in_bytes (TREE_TYPE (decl)));
5996 else if (TARGET_IRIX
5997 && mips_abi == ABI_32
5998 && TREE_CODE (decl) == FUNCTION_DECL)
6000 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6001 `.global name .text' directive for every used but
6002 undefined function. If we don't, the linker may perform
6003 an optimization (skipping over the insns that set $gp)
6004 when it is unsafe. */
6005 fputs ("\t.globl ", file);
6006 assemble_name (file, name);
6007 fputs (" .text\n", file);
6012 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6013 put out a MIPS ECOFF file and a stab. */
6016 mips_output_filename (FILE *stream, const char *name)
6019 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6021 if (write_symbols == DWARF2_DEBUG)
6023 else if (mips_output_filename_first_time)
6025 mips_output_filename_first_time = 0;
6026 num_source_filenames += 1;
6027 current_function_file = name;
6028 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6029 output_quoted_string (stream, name);
6030 putc ('\n', stream);
6033 /* If we are emitting stabs, let dbxout.c handle this (except for
6034 the mips_output_filename_first_time case). */
6035 else if (write_symbols == DBX_DEBUG)
6038 else if (name != current_function_file
6039 && strcmp (name, current_function_file) != 0)
6041 num_source_filenames += 1;
6042 current_function_file = name;
6043 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6044 output_quoted_string (stream, name);
6045 putc ('\n', stream);
6049 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6050 that should be written before the opening quote, such as "\t.ascii\t"
6051 for real string data or "\t# " for a comment. */
6054 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6059 register const unsigned char *string =
6060 (const unsigned char *)string_param;
6062 fprintf (stream, "%s\"", prefix);
6063 for (i = 0; i < len; i++)
6065 register int c = string[i];
6069 if (c == '\\' || c == '\"')
6071 putc ('\\', stream);
6079 fprintf (stream, "\\%03o", c);
6083 if (cur_pos > 72 && i+1 < len)
6086 fprintf (stream, "\"\n%s\"", prefix);
6089 fprintf (stream, "\"\n");
6092 /* Implement TARGET_ASM_FILE_START. */
6095 mips_file_start (void)
6097 default_file_start ();
6101 #ifdef HAVE_AS_GNU_ATTRIBUTE
6102 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6103 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6106 /* Generate a special section to describe the ABI switches used to
6107 produce the resultant binary. This used to be done by the assembler
6108 setting bits in the ELF header's flags field, but we have run out of
6109 bits. GDB needs this information in order to be able to correctly
6110 debug these binaries. See the function mips_gdbarch_init() in
6111 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6112 causes unnecessary IRIX 6 ld warnings. */
6113 const char * abi_string = NULL;
6117 case ABI_32: abi_string = "abi32"; break;
6118 case ABI_N32: abi_string = "abiN32"; break;
6119 case ABI_64: abi_string = "abi64"; break;
6120 case ABI_O64: abi_string = "abiO64"; break;
6121 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6125 /* Note - we use fprintf directly rather than calling switch_to_section
6126 because in this way we can avoid creating an allocated section. We
6127 do not want this section to take up any space in the running
6129 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6131 /* There is no ELF header flag to distinguish long32 forms of the
6132 EABI from long64 forms. Emit a special section to help tools
6133 such as GDB. Do the same for o64, which is sometimes used with
6135 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6136 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6137 TARGET_LONG64 ? 64 : 32);
6139 /* Restore the default section. */
6140 fprintf (asm_out_file, "\t.previous\n");
6143 /* Generate the pseudo ops that System V.4 wants. */
6144 if (TARGET_ABICALLS)
6145 fprintf (asm_out_file, "\t.abicalls\n");
6148 fprintf (asm_out_file, "\t.set\tmips16\n");
6150 if (flag_verbose_asm)
6151 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6153 mips_section_threshold, mips_arch_info->name, mips_isa);
6156 #ifdef BSS_SECTION_ASM_OP
6157 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6158 in the use of sbss. */
6161 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6162 unsigned HOST_WIDE_INT size, int align)
6164 extern tree last_assemble_variable_decl;
6166 if (mips_in_small_data_p (decl))
6167 switch_to_section (get_named_section (NULL, ".sbss", 0));
6169 switch_to_section (bss_section);
6170 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6171 last_assemble_variable_decl = decl;
6172 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6173 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6177 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6178 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6181 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6182 unsigned HOST_WIDE_INT size,
6185 /* If the target wants uninitialized const declarations in
6186 .rdata then don't put them in .comm. */
6187 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6188 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6189 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6191 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6192 targetm.asm_out.globalize_label (stream, name);
6194 switch_to_section (readonly_data_section);
6195 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6196 mips_declare_object (stream, name, "",
6197 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6201 mips_declare_common_object (stream, name, "\n\t.comm\t",
6205 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6206 NAME is the name of the object and ALIGN is the required alignment
6207 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6208 alignment argument. */
6211 mips_declare_common_object (FILE *stream, const char *name,
6212 const char *init_string,
6213 unsigned HOST_WIDE_INT size,
6214 unsigned int align, bool takes_alignment_p)
6216 if (!takes_alignment_p)
6218 size += (align / BITS_PER_UNIT) - 1;
6219 size -= size % (align / BITS_PER_UNIT);
6220 mips_declare_object (stream, name, init_string,
6221 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6224 mips_declare_object (stream, name, init_string,
6225 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6226 size, align / BITS_PER_UNIT);
6229 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6230 macros, mark the symbol as written so that mips_file_end won't emit an
6231 .extern for it. STREAM is the output file, NAME is the name of the
6232 symbol, INIT_STRING is the string that should be written before the
6233 symbol and FINAL_STRING is the string that should be written after it.
6234 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6237 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6238 const char *final_string, ...)
6242 fputs (init_string, stream);
6243 assemble_name (stream, name);
6244 va_start (ap, final_string);
6245 vfprintf (stream, final_string, ap);
6248 if (!TARGET_EXPLICIT_RELOCS)
6250 tree name_tree = get_identifier (name);
6251 TREE_ASM_WRITTEN (name_tree) = 1;
6255 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6256 extern int size_directive_output;
6258 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6259 definitions except that it uses mips_declare_object() to emit the label. */
6262 mips_declare_object_name (FILE *stream, const char *name,
6263 tree decl ATTRIBUTE_UNUSED)
6265 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6266 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6269 size_directive_output = 0;
6270 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6274 size_directive_output = 1;
6275 size = int_size_in_bytes (TREE_TYPE (decl));
6276 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6279 mips_declare_object (stream, name, "", ":\n");
6282 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6285 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6289 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6290 if (!flag_inhibit_size_directive
6291 && DECL_SIZE (decl) != 0
6292 && !at_end && top_level
6293 && DECL_INITIAL (decl) == error_mark_node
6294 && !size_directive_output)
6298 size_directive_output = 1;
6299 size = int_size_in_bytes (TREE_TYPE (decl));
6300 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6305 /* Return true if X is a small data address that can be rewritten
6309 mips_rewrite_small_data_p (rtx x)
6311 enum mips_symbol_type symbol_type;
6313 return (TARGET_EXPLICIT_RELOCS
6314 && mips_symbolic_constant_p (x, &symbol_type)
6315 && symbol_type == SYMBOL_SMALL_DATA);
6319 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6322 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6324 if (GET_CODE (*loc) == LO_SUM)
6327 return mips_rewrite_small_data_p (*loc);
6330 /* Return true if OP refers to small data symbols directly, not through
6334 mips_small_data_pattern_p (rtx op)
6336 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6339 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6342 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6344 if (mips_rewrite_small_data_p (*loc))
6345 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6347 if (GET_CODE (*loc) == LO_SUM)
6353 /* If possible, rewrite OP so that it refers to small data using
6354 explicit relocations. */
6357 mips_rewrite_small_data (rtx op)
6359 op = copy_insn (op);
6360 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6364 /* Return true if the current function has an insn that implicitly
6368 mips_function_has_gp_insn (void)
6370 /* Don't bother rechecking if we found one last time. */
6371 if (!cfun->machine->has_gp_insn_p)
6375 push_topmost_sequence ();
6376 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6378 && GET_CODE (PATTERN (insn)) != USE
6379 && GET_CODE (PATTERN (insn)) != CLOBBER
6380 && (get_attr_got (insn) != GOT_UNSET
6381 || small_data_pattern (PATTERN (insn), VOIDmode)))
6383 pop_topmost_sequence ();
6385 cfun->machine->has_gp_insn_p = (insn != 0);
6387 return cfun->machine->has_gp_insn_p;
6391 /* Return the register that should be used as the global pointer
6392 within this function. Return 0 if the function doesn't need
6393 a global pointer. */
6396 mips_global_pointer (void)
6400 /* $gp is always available unless we're using a GOT. */
6401 if (!TARGET_USE_GOT)
6402 return GLOBAL_POINTER_REGNUM;
6404 /* We must always provide $gp when it is used implicitly. */
6405 if (!TARGET_EXPLICIT_RELOCS)
6406 return GLOBAL_POINTER_REGNUM;
6408 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6410 if (current_function_profile)
6411 return GLOBAL_POINTER_REGNUM;
6413 /* If the function has a nonlocal goto, $gp must hold the correct
6414 global pointer for the target function. */
6415 if (current_function_has_nonlocal_goto)
6416 return GLOBAL_POINTER_REGNUM;
6418 /* If the gp is never referenced, there's no need to initialize it.
6419 Note that reload can sometimes introduce constant pool references
6420 into a function that otherwise didn't need them. For example,
6421 suppose we have an instruction like:
6423 (set (reg:DF R1) (float:DF (reg:SI R2)))
6425 If R2 turns out to be constant such as 1, the instruction may have a
6426 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6427 using this constant if R2 doesn't get allocated to a register.
6429 In cases like these, reload will have added the constant to the pool
6430 but no instruction will yet refer to it. */
6431 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6432 && !current_function_uses_const_pool
6433 && !mips_function_has_gp_insn ())
6436 /* We need a global pointer, but perhaps we can use a call-clobbered
6437 register instead of $gp. */
6438 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6439 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6440 if (!df_regs_ever_live_p (regno)
6441 && call_used_regs[regno]
6442 && !fixed_regs[regno]
6443 && regno != PIC_FUNCTION_ADDR_REGNUM)
6446 return GLOBAL_POINTER_REGNUM;
6450 /* Return true if the function return value MODE will get returned in a
6451 floating-point register. */
6454 mips_return_mode_in_fpr_p (enum machine_mode mode)
6456 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6457 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6458 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6459 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6462 /* Return a two-character string representing a function floating-point
6463 return mode, used to name MIPS16 function stubs. */
6466 mips16_call_stub_mode_suffix (enum machine_mode mode)
6470 else if (mode == DFmode)
6472 else if (mode == SCmode)
6474 else if (mode == DCmode)
6476 else if (mode == V2SFmode)
6482 /* Return true if the current function returns its value in a floating-point
6483 register in MIPS16 mode. */
6486 mips16_cfun_returns_in_fpr_p (void)
6488 tree return_type = DECL_RESULT (current_function_decl);
6489 return (mips16_hard_float
6490 && !aggregate_value_p (return_type, current_function_decl)
6491 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6495 /* Return true if the current function must save REGNO. */
6498 mips_save_reg_p (unsigned int regno)
6500 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6501 if we have not chosen a call-clobbered substitute. */
6502 if (regno == GLOBAL_POINTER_REGNUM)
6503 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6505 /* Check call-saved registers. */
6506 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6509 /* Save both registers in an FPR pair if either one is used. This is
6510 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6511 register to be used without the even register. */
6512 if (FP_REG_P (regno)
6513 && MAX_FPRS_PER_FMT == 2
6514 && df_regs_ever_live_p (regno + 1)
6515 && !call_used_regs[regno + 1])
6518 /* We need to save the old frame pointer before setting up a new one. */
6519 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6522 /* We need to save the incoming return address if it is ever clobbered
6523 within the function. */
6524 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6529 /* $18 is a special case in mips16 code. It may be used to call
6530 a function which returns a floating point value, but it is
6531 marked in call_used_regs. */
6532 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6535 /* $31 is also a special case. It will be used to copy a return
6536 value into the floating point registers if the return value is
6538 if (regno == GP_REG_FIRST + 31
6539 && mips16_cfun_returns_in_fpr_p ())
6546 /* Return the index of the lowest X in the range [0, SIZE) for which
6547 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6550 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6555 for (i = 0; i < size; i++)
6556 if (BITSET_P (mask, regs[i]))
6562 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6563 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6564 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6565 the same is true for all indexes (X, SIZE). */
6568 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6569 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6573 i = mips16e_find_first_register (*mask_ptr, regs, size);
6574 for (i++; i < size; i++)
6575 if (!BITSET_P (*mask_ptr, regs[i]))
6577 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6578 *mask_ptr |= 1 << regs[i];
6582 /* Return the bytes needed to compute the frame pointer from the current
6583 stack pointer. SIZE is the size (in bytes) of the local variables.
6585 MIPS stack frames look like:
6587 Before call After call
6588 high +-----------------------+ +-----------------------+
6590 | caller's temps. | | caller's temps. |
6592 +-----------------------+ +-----------------------+
6594 | arguments on stack. | | arguments on stack. |
6596 +-----------------------+ +-----------------------+
6597 | 4 words to save | | 4 words to save |
6598 | arguments passed | | arguments passed |
6599 | in registers, even | | in registers, even |
6600 | if not passed. | | if not passed. |
6601 SP->+-----------------------+ VFP->+-----------------------+
6602 (VFP = SP+fp_sp_offset) | |\
6603 | fp register save | | fp_reg_size
6605 SP+gp_sp_offset->+-----------------------+
6607 | | gp register save | | gp_reg_size
6608 gp_reg_rounded | | |/
6609 | +-----------------------+
6610 \| alignment padding |
6611 +-----------------------+
6613 | local variables | | var_size
6615 +-----------------------+
6617 | alloca allocations |
6619 +-----------------------+
6621 cprestore_size | | GP save for V.4 abi |
6623 +-----------------------+
6625 | arguments on stack | |
6627 +-----------------------+ |
6628 | 4 words to save | | args_size
6629 | arguments passed | |
6630 | in registers, even | |
6631 | if not passed. | |
6632 low | (TARGET_OLDABI only) |/
6633 memory SP->+-----------------------+
6638 compute_frame_size (HOST_WIDE_INT size)
6641 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6642 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6643 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6644 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6645 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6646 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6647 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6648 unsigned int mask; /* mask of saved gp registers */
6649 unsigned int fmask; /* mask of saved fp registers */
6651 cfun->machine->global_pointer = mips_global_pointer ();
6657 var_size = MIPS_STACK_ALIGN (size);
6658 args_size = current_function_outgoing_args_size;
6659 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6661 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6662 functions. If the function has local variables, we're committed
6663 to allocating it anyway. Otherwise reclaim it here. */
6664 if (var_size == 0 && current_function_is_leaf)
6665 cprestore_size = args_size = 0;
6667 /* The MIPS 3.0 linker does not like functions that dynamically
6668 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6669 looks like we are trying to create a second frame pointer to the
6670 function, so allocate some stack space to make it happy. */
6672 if (args_size == 0 && current_function_calls_alloca)
6673 args_size = 4 * UNITS_PER_WORD;
6675 total_size = var_size + args_size + cprestore_size;
6677 /* Calculate space needed for gp registers. */
6678 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6679 if (mips_save_reg_p (regno))
6681 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6682 mask |= 1 << (regno - GP_REG_FIRST);
6685 /* We need to restore these for the handler. */
6686 if (current_function_calls_eh_return)
6691 regno = EH_RETURN_DATA_REGNO (i);
6692 if (regno == INVALID_REGNUM)
6694 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6695 mask |= 1 << (regno - GP_REG_FIRST);
6699 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6700 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6701 save all later registers too. */
6702 if (GENERATE_MIPS16E_SAVE_RESTORE)
6704 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6705 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6706 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6707 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6710 /* This loop must iterate over the same space as its companion in
6711 mips_for_each_saved_reg. */
6712 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6713 regno >= FP_REG_FIRST;
6714 regno -= MAX_FPRS_PER_FMT)
6716 if (mips_save_reg_p (regno))
6718 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6719 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6723 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6724 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6726 /* Add in the space required for saving incoming register arguments. */
6727 total_size += current_function_pretend_args_size;
6728 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6730 /* Save other computed information. */
6731 cfun->machine->frame.total_size = total_size;
6732 cfun->machine->frame.var_size = var_size;
6733 cfun->machine->frame.args_size = args_size;
6734 cfun->machine->frame.cprestore_size = cprestore_size;
6735 cfun->machine->frame.gp_reg_size = gp_reg_size;
6736 cfun->machine->frame.fp_reg_size = fp_reg_size;
6737 cfun->machine->frame.mask = mask;
6738 cfun->machine->frame.fmask = fmask;
6739 cfun->machine->frame.initialized = reload_completed;
6740 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6741 cfun->machine->frame.num_fp = (fp_reg_size
6742 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6746 HOST_WIDE_INT offset;
6748 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6749 to be aligned at the high end with any padding at the low end,
6750 so do it that way all the time. */
6751 offset = (total_size
6752 - MIPS_STACK_ALIGN (fp_reg_size)
6753 - GET_MODE_SIZE (gpr_mode));
6754 cfun->machine->frame.gp_sp_offset = offset;
6755 cfun->machine->frame.gp_save_offset = offset - total_size;
6759 cfun->machine->frame.gp_sp_offset = 0;
6760 cfun->machine->frame.gp_save_offset = 0;
6765 HOST_WIDE_INT offset;
6767 offset = (args_size + cprestore_size + var_size
6768 + gp_reg_rounded + fp_reg_size
6769 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
6770 cfun->machine->frame.fp_sp_offset = offset;
6771 cfun->machine->frame.fp_save_offset = offset - total_size;
6775 cfun->machine->frame.fp_sp_offset = 0;
6776 cfun->machine->frame.fp_save_offset = 0;
6779 /* Ok, we're done. */
6783 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6784 pointer or argument pointer. TO is either the stack pointer or
6785 hard frame pointer. */
6788 mips_initial_elimination_offset (int from, int to)
6790 HOST_WIDE_INT offset;
6792 compute_frame_size (get_frame_size ());
6794 /* Set OFFSET to the offset from the stack pointer. */
6797 case FRAME_POINTER_REGNUM:
6801 case ARG_POINTER_REGNUM:
6802 offset = (cfun->machine->frame.total_size
6803 - current_function_pretend_args_size);
6810 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6811 offset -= cfun->machine->frame.args_size;
6816 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6817 back to a previous frame. */
6819 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6824 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6827 /* Use FN to save or restore register REGNO. MODE is the register's
6828 mode and OFFSET is the offset of its save slot from the current
6832 mips_save_restore_reg (enum machine_mode mode, int regno,
6833 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6837 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6839 fn (gen_rtx_REG (mode, regno), mem);
6843 /* Call FN for each register that is saved by the current function.
6844 SP_OFFSET is the offset of the current stack pointer from the start
6848 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6850 enum machine_mode fpr_mode;
6851 HOST_WIDE_INT offset;
6854 /* Save registers starting from high to low. The debuggers prefer at least
6855 the return register be stored at func+4, and also it allows us not to
6856 need a nop in the epilogue if at least one register is reloaded in
6857 addition to return address. */
6858 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6859 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6860 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6862 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6863 offset -= GET_MODE_SIZE (gpr_mode);
6866 /* This loop must iterate over the same space as its companion in
6867 compute_frame_size. */
6868 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6869 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6870 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6871 regno >= FP_REG_FIRST;
6872 regno -= MAX_FPRS_PER_FMT)
6873 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6875 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6876 offset -= GET_MODE_SIZE (fpr_mode);
6880 /* If we're generating n32 or n64 abicalls, and the current function
6881 does not use $28 as its global pointer, emit a cplocal directive.
6882 Use pic_offset_table_rtx as the argument to the directive. */
6885 mips_output_cplocal (void)
6887 if (!TARGET_EXPLICIT_RELOCS
6888 && cfun->machine->global_pointer > 0
6889 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6890 output_asm_insn (".cplocal %+", 0);
6893 /* Return the style of GP load sequence that is being used for the
6894 current function. */
6896 enum mips_loadgp_style
6897 mips_current_loadgp_style (void)
6899 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
6905 if (TARGET_ABSOLUTE_ABICALLS)
6906 return LOADGP_ABSOLUTE;
6908 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6911 /* The __gnu_local_gp symbol. */
6913 static GTY(()) rtx mips_gnu_local_gp;
6915 /* If we're generating n32 or n64 abicalls, emit instructions
6916 to set up the global pointer. */
6919 mips_emit_loadgp (void)
6921 rtx addr, offset, incoming_address, base, index;
6923 switch (mips_current_loadgp_style ())
6925 case LOADGP_ABSOLUTE:
6926 if (mips_gnu_local_gp == NULL)
6928 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6929 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6931 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
6935 addr = XEXP (DECL_RTL (current_function_decl), 0);
6936 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6937 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6938 emit_insn (gen_loadgp_newabi (offset, incoming_address));
6939 if (!TARGET_EXPLICIT_RELOCS)
6940 emit_insn (gen_loadgp_blockage ());
6944 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
6945 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
6946 emit_insn (gen_loadgp_rtp (base, index));
6947 if (!TARGET_EXPLICIT_RELOCS)
6948 emit_insn (gen_loadgp_blockage ());
6956 /* Set up the stack and frame (if desired) for the function. */
6959 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6962 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6964 #ifdef SDB_DEBUGGING_INFO
6965 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6966 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6969 /* In mips16 mode, we may need to generate a 32 bit to handle
6970 floating point arguments. The linker will arrange for any 32-bit
6971 functions to call this stub, which will then jump to the 16-bit
6973 if (mips16_hard_float
6974 && current_function_args_info.fp_code != 0)
6975 build_mips16_function_stub (file);
6977 if (!FUNCTION_NAME_ALREADY_DECLARED)
6979 /* Get the function name the same way that toplev.c does before calling
6980 assemble_start_function. This is needed so that the name used here
6981 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6982 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6984 if (!flag_inhibit_size_directive)
6986 fputs ("\t.ent\t", file);
6987 assemble_name (file, fnname);
6991 assemble_name (file, fnname);
6992 fputs (":\n", file);
6995 /* Stop mips_file_end from treating this function as external. */
6996 if (TARGET_IRIX && mips_abi == ABI_32)
6997 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6999 if (!flag_inhibit_size_directive)
7001 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7003 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7004 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7005 ", args= " HOST_WIDE_INT_PRINT_DEC
7006 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7007 (reg_names[(frame_pointer_needed)
7008 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7009 ((frame_pointer_needed && TARGET_MIPS16)
7010 ? tsize - cfun->machine->frame.args_size
7012 reg_names[GP_REG_FIRST + 31],
7013 cfun->machine->frame.var_size,
7014 cfun->machine->frame.num_gp,
7015 cfun->machine->frame.num_fp,
7016 cfun->machine->frame.args_size,
7017 cfun->machine->frame.cprestore_size);
7019 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7020 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7021 cfun->machine->frame.mask,
7022 cfun->machine->frame.gp_save_offset);
7023 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7024 cfun->machine->frame.fmask,
7025 cfun->machine->frame.fp_save_offset);
7028 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7029 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7032 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7034 /* Handle the initialization of $gp for SVR4 PIC. */
7035 if (!cfun->machine->all_noreorder_p)
7036 output_asm_insn ("%(.cpload\t%^%)", 0);
7038 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7040 else if (cfun->machine->all_noreorder_p)
7041 output_asm_insn ("%(%<", 0);
7043 /* Tell the assembler which register we're using as the global
7044 pointer. This is needed for thunks, since they can use either
7045 explicit relocs or assembler macros. */
7046 mips_output_cplocal ();
7049 /* Make the last instruction frame related and note that it performs
7050 the operation described by FRAME_PATTERN. */
7053 mips_set_frame_expr (rtx frame_pattern)
7057 insn = get_last_insn ();
7058 RTX_FRAME_RELATED_P (insn) = 1;
7059 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7065 /* Return a frame-related rtx that stores REG at MEM.
7066 REG must be a single register. */
7069 mips_frame_set (rtx mem, rtx reg)
7073 /* If we're saving the return address register and the dwarf return
7074 address column differs from the hard register number, adjust the
7075 note reg to refer to the former. */
7076 if (REGNO (reg) == GP_REG_FIRST + 31
7077 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7078 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7080 set = gen_rtx_SET (VOIDmode, mem, reg);
7081 RTX_FRAME_RELATED_P (set) = 1;
7087 /* Save register REG to MEM. Make the instruction frame-related. */
7090 mips_save_reg (rtx reg, rtx mem)
7092 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7096 if (mips_split_64bit_move_p (mem, reg))
7097 mips_split_64bit_move (mem, reg);
7099 emit_move_insn (mem, reg);
7101 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7102 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7103 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7108 && REGNO (reg) != GP_REG_FIRST + 31
7109 && !M16_REG_P (REGNO (reg)))
7111 /* Save a non-mips16 register by moving it through a temporary.
7112 We don't need to do this for $31 since there's a special
7113 instruction for it. */
7114 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7115 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7118 emit_move_insn (mem, reg);
7120 mips_set_frame_expr (mips_frame_set (mem, reg));
7124 /* Return a move between register REGNO and memory location SP + OFFSET.
7125 Make the move a load if RESTORE_P, otherwise make it a frame-related
7129 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7134 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7135 reg = gen_rtx_REG (SImode, regno);
7137 ? gen_rtx_SET (VOIDmode, reg, mem)
7138 : mips_frame_set (mem, reg));
7141 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7142 The instruction must:
7144 - Allocate or deallocate SIZE bytes in total; SIZE is known
7147 - Save or restore as many registers in *MASK_PTR as possible.
7148 The instruction saves the first registers at the top of the
7149 allocated area, with the other registers below it.
7151 - Save NARGS argument registers above the allocated area.
7153 (NARGS is always zero if RESTORE_P.)
7155 The SAVE and RESTORE instructions cannot save and restore all general
7156 registers, so there may be some registers left over for the caller to
7157 handle. Destructively modify *MASK_PTR so that it contains the registers
7158 that still need to be saved or restored. The caller can save these
7159 registers in the memory immediately below *OFFSET_PTR, which is a
7160 byte offset from the bottom of the allocated stack area. */
7163 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7164 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7168 HOST_WIDE_INT offset, top_offset;
7169 unsigned int i, regno;
7172 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7174 /* Calculate the number of elements in the PARALLEL. We need one element
7175 for the stack adjustment, one for each argument register save, and one
7176 for each additional register move. */
7178 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7179 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7182 /* Create the final PARALLEL. */
7183 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7186 /* Add the stack pointer adjustment. */
7187 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7188 plus_constant (stack_pointer_rtx,
7189 restore_p ? size : -size));
7190 RTX_FRAME_RELATED_P (set) = 1;
7191 XVECEXP (pattern, 0, n++) = set;
7193 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7194 top_offset = restore_p ? size : 0;
7196 /* Save the arguments. */
7197 for (i = 0; i < nargs; i++)
7199 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7200 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7201 XVECEXP (pattern, 0, n++) = set;
7204 /* Then fill in the other register moves. */
7205 offset = top_offset;
7206 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7208 regno = mips16e_save_restore_regs[i];
7209 if (BITSET_P (*mask_ptr, regno))
7211 offset -= UNITS_PER_WORD;
7212 set = mips16e_save_restore_reg (restore_p, offset, regno);
7213 XVECEXP (pattern, 0, n++) = set;
7214 *mask_ptr &= ~(1 << regno);
7218 /* Tell the caller what offset it should use for the remaining registers. */
7219 *offset_ptr = size + (offset - top_offset) + size;
7221 gcc_assert (n == XVECLEN (pattern, 0));
7226 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7227 pointer. Return true if PATTERN matches the kind of instruction
7228 generated by mips16e_build_save_restore. If INFO is nonnull,
7229 initialize it when returning true. */
7232 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7233 struct mips16e_save_restore_info *info)
7235 unsigned int i, nargs, mask;
7236 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7237 rtx set, reg, mem, base;
7240 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7243 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7244 top_offset = adjust > 0 ? adjust : 0;
7246 /* Interpret all other members of the PARALLEL. */
7247 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7251 for (n = 1; n < XVECLEN (pattern, 0); n++)
7253 /* Check that we have a SET. */
7254 set = XVECEXP (pattern, 0, n);
7255 if (GET_CODE (set) != SET)
7258 /* Check that the SET is a load (if restoring) or a store
7260 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7264 /* Check that the address is the sum of the stack pointer and a
7265 possibly-zero constant offset. */
7266 mips_split_plus (XEXP (mem, 0), &base, &offset);
7267 if (base != stack_pointer_rtx)
7270 /* Check that SET's other operand is a register. */
7271 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7275 /* Check for argument saves. */
7276 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7277 && REGNO (reg) == GP_ARG_FIRST + nargs)
7279 else if (offset == save_offset)
7281 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7282 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7285 mask |= 1 << REGNO (reg);
7286 save_offset -= GET_MODE_SIZE (gpr_mode);
7292 /* Check that the restrictions on register ranges are met. */
7294 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7295 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7296 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7297 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7301 /* Pass back information, if requested. */
7304 info->nargs = nargs;
7306 info->size = (adjust > 0 ? adjust : -adjust);
7312 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7313 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7314 the null terminator. */
7317 mips16e_add_register_range (char *s, unsigned int min_reg,
7318 unsigned int max_reg)
7320 if (min_reg != max_reg)
7321 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7323 s += sprintf (s, ",%s", reg_names[min_reg]);
7327 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7328 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7331 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7333 static char buffer[300];
7335 struct mips16e_save_restore_info info;
7336 unsigned int i, end;
7339 /* Parse the pattern. */
7340 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7343 /* Add the mnemonic. */
7344 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7347 /* Save the arguments. */
7349 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7350 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7351 else if (info.nargs == 1)
7352 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7354 /* Emit the amount of stack space to allocate or deallocate. */
7355 s += sprintf (s, "%d", (int) info.size);
7357 /* Save or restore $16. */
7358 if (BITSET_P (info.mask, 16))
7359 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7361 /* Save or restore $17. */
7362 if (BITSET_P (info.mask, 17))
7363 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7365 /* Save or restore registers in the range $s2...$s8, which
7366 mips16e_s2_s8_regs lists in decreasing order. Note that this
7367 is a software register range; the hardware registers are not
7368 numbered consecutively. */
7369 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7370 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7372 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7373 mips16e_s2_s8_regs[i]);
7375 /* Save or restore registers in the range $a0...$a3. */
7376 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7377 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7379 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7380 mips16e_a0_a3_regs[end - 1]);
7382 /* Save or restore $31. */
7383 if (BITSET_P (info.mask, 31))
7384 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7389 /* Return a simplified form of X using the register values in REG_VALUES.
7390 REG_VALUES[R] is the last value assigned to hard register R, or null
7391 if R has not been modified.
7393 This function is rather limited, but is good enough for our purposes. */
7396 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7400 x = avoid_constant_pool_reference (x);
7404 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7405 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7406 x0, GET_MODE (XEXP (x, 0)));
7409 if (ARITHMETIC_P (x))
7411 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7412 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7413 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7417 && reg_values[REGNO (x)]
7418 && !rtx_unstable_p (reg_values[REGNO (x)]))
7419 return reg_values[REGNO (x)];
7424 /* Return true if (set DEST SRC) stores an argument register into its
7425 caller-allocated save slot. If the register is not included in
7426 [GP_ARG_FIRST, GP_ARG_LAST + *NARGS_PTR), destructively modify
7427 *NARGS_PTR such that this condition holds. REG_VALUES is as for
7428 mips16e_collect_propagate_value. */
7431 mips16e_collect_argument_save (rtx dest, rtx src, rtx *reg_values,
7432 unsigned int *nargs_ptr)
7434 unsigned int argno, regno;
7435 HOST_WIDE_INT offset, required_offset;
7438 /* Check that this is a word-mode store. */
7439 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7442 /* Check that the register being saved is an unmodified argument
7444 regno = REGNO (src);
7445 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7447 argno = regno - GP_ARG_FIRST;
7449 /* Check whether the address is an appropriate stack pointer or
7450 frame pointer access. The frame pointer is offset from the
7451 stack pointer by the size of the outgoing arguments. */
7452 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7453 mips_split_plus (addr, &base, &offset);
7454 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7455 if (base == hard_frame_pointer_rtx)
7456 required_offset -= cfun->machine->frame.args_size;
7457 else if (base != stack_pointer_rtx)
7459 if (offset != required_offset)
7462 /* Make sure that *NARGS_PTR is big enough. */
7463 if (*nargs_ptr <= argno)
7464 *nargs_ptr = argno + 1;
7469 /* A subroutine of mips_expand_prologue, called only when generating
7470 MIPS16e SAVE instructions. Search the start of the function for any
7471 instructions that save argument registers into their caller-allocated
7472 save slots. Delete such instructions and return a value N such that
7473 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7474 instructions redundant. */
7477 mips16e_collect_argument_saves (void)
7479 rtx reg_values[FIRST_PSEUDO_REGISTER];
7480 rtx insn, next, set, dest, src;
7483 push_topmost_sequence ();
7485 memset (reg_values, 0, sizeof (reg_values));
7486 for (insn = get_insns (); insn; insn = next)
7488 next = NEXT_INSN (insn);
7495 set = PATTERN (insn);
7496 if (GET_CODE (set) != SET)
7499 dest = SET_DEST (set);
7500 src = SET_SRC (set);
7501 if (mips16e_collect_argument_save (dest, src, reg_values, &nargs))
7503 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7504 reg_values[REGNO (dest)]
7505 = mips16e_collect_propagate_value (src, reg_values);
7509 pop_topmost_sequence ();
7514 /* Expand the prologue into a bunch of separate insns. */
7517 mips_expand_prologue (void)
7523 if (cfun->machine->global_pointer > 0)
7524 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7526 size = compute_frame_size (get_frame_size ());
7528 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7529 bytes beforehand; this is enough to cover the register save area
7530 without going out of range. */
7531 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7533 HOST_WIDE_INT step1;
7535 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7537 if (GENERATE_MIPS16E_SAVE_RESTORE)
7539 HOST_WIDE_INT offset;
7540 unsigned int mask, regno;
7542 /* Try to merge argument stores into the save instruction. */
7543 nargs = mips16e_collect_argument_saves ();
7545 /* Build the save instruction. */
7546 mask = cfun->machine->frame.mask;
7547 insn = mips16e_build_save_restore (false, &mask, &offset,
7549 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7552 /* Check if we need to save other registers. */
7553 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7554 if (BITSET_P (mask, regno - GP_REG_FIRST))
7556 offset -= GET_MODE_SIZE (gpr_mode);
7557 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7562 insn = gen_add3_insn (stack_pointer_rtx,
7565 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7567 mips_for_each_saved_reg (size, mips_save_reg);
7571 /* Allocate the rest of the frame. */
7574 if (SMALL_OPERAND (-size))
7575 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7577 GEN_INT (-size)))) = 1;
7580 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7583 /* There are no instructions to add or subtract registers
7584 from the stack pointer, so use the frame pointer as a
7585 temporary. We should always be using a frame pointer
7586 in this case anyway. */
7587 gcc_assert (frame_pointer_needed);
7588 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7589 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7590 hard_frame_pointer_rtx,
7591 MIPS_PROLOGUE_TEMP (Pmode)));
7592 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
7595 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7597 MIPS_PROLOGUE_TEMP (Pmode)));
7599 /* Describe the combined effect of the previous instructions. */
7601 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7602 plus_constant (stack_pointer_rtx, -size)));
7606 /* Set up the frame pointer, if we're using one. In mips16 code,
7607 we point the frame pointer ahead of the outgoing argument area.
7608 This should allow more variables & incoming arguments to be
7609 accessed with unextended instructions. */
7610 if (frame_pointer_needed)
7612 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7614 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7615 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7617 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7622 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
7623 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7624 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7625 hard_frame_pointer_rtx,
7626 MIPS_PROLOGUE_TEMP (Pmode)));
7628 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7629 plus_constant (stack_pointer_rtx,
7630 cfun->machine->frame.args_size)));
7634 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
7635 stack_pointer_rtx)) = 1;
7638 mips_emit_loadgp ();
7640 /* If generating o32/o64 abicalls, save $gp on the stack. */
7641 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7642 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7644 /* If we are profiling, make sure no instructions are scheduled before
7645 the call to mcount. */
7647 if (current_function_profile)
7648 emit_insn (gen_blockage ());
7651 /* Do any necessary cleanup after a function to restore stack, frame,
7654 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7657 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7658 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7660 /* Reinstate the normal $gp. */
7661 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7662 mips_output_cplocal ();
7664 if (cfun->machine->all_noreorder_p)
7666 /* Avoid using %>%) since it adds excess whitespace. */
7667 output_asm_insn (".set\tmacro", 0);
7668 output_asm_insn (".set\treorder", 0);
7669 set_noreorder = set_nomacro = 0;
7672 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7676 /* Get the function name the same way that toplev.c does before calling
7677 assemble_start_function. This is needed so that the name used here
7678 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7679 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7680 fputs ("\t.end\t", file);
7681 assemble_name (file, fnname);
7686 /* Emit instructions to restore register REG from slot MEM. */
7689 mips_restore_reg (rtx reg, rtx mem)
7691 /* There's no mips16 instruction to load $31 directly. Load into
7692 $7 instead and adjust the return insn appropriately. */
7693 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7694 reg = gen_rtx_REG (GET_MODE (reg), 7);
7696 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7698 /* Can't restore directly; move through a temporary. */
7699 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7700 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7703 emit_move_insn (reg, mem);
7707 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7708 if this epilogue precedes a sibling call, false if it is for a normal
7709 "epilogue" pattern. */
7712 mips_expand_epilogue (int sibcall_p)
7714 HOST_WIDE_INT step1, step2;
7717 if (!sibcall_p && mips_can_use_return_insn ())
7719 emit_jump_insn (gen_return ());
7723 /* In mips16 mode, if the return value should go into a floating-point
7724 register, we need to call a helper routine to copy it over. */
7725 if (mips16_cfun_returns_in_fpr_p ())
7734 enum machine_mode return_mode;
7736 return_type = DECL_RESULT (current_function_decl);
7737 return_mode = DECL_MODE (return_type);
7739 name = ACONCAT (("__mips16_ret_",
7740 mips16_call_stub_mode_suffix (return_mode),
7742 id = get_identifier (name);
7743 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7744 retval = gen_rtx_REG (return_mode, GP_RETURN);
7745 call = gen_call_value_internal (retval, func, const0_rtx);
7746 insn = emit_call_insn (call);
7747 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
7750 /* Split the frame into two. STEP1 is the amount of stack we should
7751 deallocate before restoring the registers. STEP2 is the amount we
7752 should deallocate afterwards.
7754 Start off by assuming that no registers need to be restored. */
7755 step1 = cfun->machine->frame.total_size;
7758 /* Work out which register holds the frame address. Account for the
7759 frame pointer offset used by mips16 code. */
7760 if (!frame_pointer_needed)
7761 base = stack_pointer_rtx;
7764 base = hard_frame_pointer_rtx;
7766 step1 -= cfun->machine->frame.args_size;
7769 /* If we need to restore registers, deallocate as much stack as
7770 possible in the second step without going out of range. */
7771 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7773 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7777 /* Set TARGET to BASE + STEP1. */
7783 /* Get an rtx for STEP1 that we can add to BASE. */
7784 adjust = GEN_INT (step1);
7785 if (!SMALL_OPERAND (step1))
7787 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7788 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7791 /* Normal mode code can copy the result straight into $sp. */
7793 target = stack_pointer_rtx;
7795 emit_insn (gen_add3_insn (target, base, adjust));
7798 /* Copy TARGET into the stack pointer. */
7799 if (target != stack_pointer_rtx)
7800 emit_move_insn (stack_pointer_rtx, target);
7802 /* If we're using addressing macros, $gp is implicitly used by all
7803 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
7805 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
7806 emit_insn (gen_blockage ());
7808 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
7810 unsigned int regno, mask;
7811 HOST_WIDE_INT offset;
7814 /* Generate the restore instruction. */
7815 mask = cfun->machine->frame.mask;
7816 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
7818 /* Restore any other registers manually. */
7819 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7820 if (BITSET_P (mask, regno - GP_REG_FIRST))
7822 offset -= GET_MODE_SIZE (gpr_mode);
7823 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
7826 /* Restore the remaining registers and deallocate the final bit
7828 emit_insn (restore);
7832 /* Restore the registers. */
7833 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7836 /* Deallocate the final bit of the frame. */
7838 emit_insn (gen_add3_insn (stack_pointer_rtx,
7843 /* Add in the __builtin_eh_return stack adjustment. We need to
7844 use a temporary in mips16 code. */
7845 if (current_function_calls_eh_return)
7849 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7850 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7851 MIPS_EPILOGUE_TEMP (Pmode),
7852 EH_RETURN_STACKADJ_RTX));
7853 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7856 emit_insn (gen_add3_insn (stack_pointer_rtx,
7858 EH_RETURN_STACKADJ_RTX));
7863 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
7864 path will restore the return address into $7 rather than $31. */
7866 && !GENERATE_MIPS16E_SAVE_RESTORE
7867 && (cfun->machine->frame.mask & RA_MASK) != 0)
7868 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7869 GP_REG_FIRST + 7)));
7871 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7872 GP_REG_FIRST + 31)));
7876 /* Return nonzero if this function is known to have a null epilogue.
7877 This allows the optimizer to omit jumps to jumps if no stack
7881 mips_can_use_return_insn (void)
7883 if (! reload_completed)
7886 if (df_regs_ever_live_p (31) || current_function_profile)
7889 /* In mips16 mode, a function that returns a floating point value
7890 needs to arrange to copy the return value into the floating point
7892 if (mips16_cfun_returns_in_fpr_p ())
7895 if (cfun->machine->frame.initialized)
7896 return cfun->machine->frame.total_size == 0;
7898 return compute_frame_size (get_frame_size ()) == 0;
7901 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7902 in order to avoid duplicating too much logic from elsewhere. */
7905 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7906 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7909 rtx this, temp1, temp2, insn, fnaddr;
7911 /* Pretend to be a post-reload pass while generating rtl. */
7913 reload_completed = 1;
7915 /* Mark the end of the (empty) prologue. */
7916 emit_note (NOTE_INSN_PROLOGUE_END);
7918 /* Pick a global pointer. Use a call-clobbered register if
7919 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
7922 cfun->machine->global_pointer =
7923 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
7925 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7929 /* Set up the global pointer for n32 or n64 abicalls. If
7930 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
7931 no need to load it.*/
7932 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
7933 || !targetm.binds_local_p (function))
7934 mips_emit_loadgp ();
7936 /* We need two temporary registers in some cases. */
7937 temp1 = gen_rtx_REG (Pmode, 2);
7938 temp2 = gen_rtx_REG (Pmode, 3);
7940 /* Find out which register contains the "this" pointer. */
7941 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7942 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7944 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7946 /* Add DELTA to THIS. */
7949 rtx offset = GEN_INT (delta);
7950 if (!SMALL_OPERAND (delta))
7952 emit_move_insn (temp1, offset);
7955 emit_insn (gen_add3_insn (this, this, offset));
7958 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7959 if (vcall_offset != 0)
7963 /* Set TEMP1 to *THIS. */
7964 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7966 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7967 addr = mips_add_offset (temp2, temp1, vcall_offset);
7969 /* Load the offset and add it to THIS. */
7970 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7971 emit_insn (gen_add3_insn (this, this, temp1));
7974 /* Jump to the target function. Use a sibcall if direct jumps are
7975 allowed, otherwise load the address into a register first. */
7976 fnaddr = XEXP (DECL_RTL (function), 0);
7977 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
7979 /* This is messy. gas treats "la $25,foo" as part of a call
7980 sequence and may allow a global "foo" to be lazily bound.
7981 The general move patterns therefore reject this combination.
7983 In this context, lazy binding would actually be OK
7984 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
7985 TARGET_CALL_SAVED_GP; see mips_load_call_address.
7986 We must therefore load the address via a temporary
7987 register if mips_dangerous_for_la25_p.
7989 If we jump to the temporary register rather than $25, the assembler
7990 can use the move insn to fill the jump's delay slot. */
7991 if (TARGET_USE_PIC_FN_ADDR_REG
7992 && !mips_dangerous_for_la25_p (fnaddr))
7993 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7994 mips_load_call_address (temp1, fnaddr, true);
7996 if (TARGET_USE_PIC_FN_ADDR_REG
7997 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7998 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7999 emit_jump_insn (gen_indirect_jump (temp1));
8003 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8004 SIBLING_CALL_P (insn) = 1;
8007 /* Run just enough of rest_of_compilation. This sequence was
8008 "borrowed" from alpha.c. */
8009 insn = get_insns ();
8010 insn_locators_alloc ();
8011 split_all_insns_noflow ();
8013 mips16_lay_out_constants ();
8014 shorten_branches (insn);
8015 final_start_function (insn, file, 1);
8016 final (insn, file, 1);
8017 final_end_function ();
8019 /* Clean up the vars set above. Note that final_end_function resets
8020 the global pointer for us. */
8021 reload_completed = 0;
8025 /* Returns nonzero if X contains a SYMBOL_REF. */
8028 symbolic_expression_p (rtx x)
8030 if (GET_CODE (x) == SYMBOL_REF)
8033 if (GET_CODE (x) == CONST)
8034 return symbolic_expression_p (XEXP (x, 0));
8037 return symbolic_expression_p (XEXP (x, 0));
8039 if (ARITHMETIC_P (x))
8040 return (symbolic_expression_p (XEXP (x, 0))
8041 || symbolic_expression_p (XEXP (x, 1)));
8046 /* Choose the section to use for the constant rtx expression X that has
8050 mips_select_rtx_section (enum machine_mode mode, rtx x,
8051 unsigned HOST_WIDE_INT align)
8055 /* In mips16 mode, the constant table always goes in the same section
8056 as the function, so that constants can be loaded using PC relative
8058 return function_section (current_function_decl);
8060 else if (TARGET_EMBEDDED_DATA)
8062 /* For embedded applications, always put constants in read-only data,
8063 in order to reduce RAM usage. */
8064 return mergeable_constant_section (mode, align, 0);
8068 /* For hosted applications, always put constants in small data if
8069 possible, as this gives the best performance. */
8070 /* ??? Consider using mergeable small data sections. */
8072 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8073 && mips_section_threshold > 0)
8074 return get_named_section (NULL, ".sdata", 0);
8075 else if (flag_pic && symbolic_expression_p (x))
8076 return get_named_section (NULL, ".data.rel.ro", 3);
8078 return mergeable_constant_section (mode, align, 0);
8082 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8084 The complication here is that, with the combination TARGET_ABICALLS
8085 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8086 therefore not be included in the read-only part of a DSO. Handle such
8087 cases by selecting a normal data section instead of a read-only one.
8088 The logic apes that in default_function_rodata_section. */
8091 mips_function_rodata_section (tree decl)
8093 if (!TARGET_ABICALLS || TARGET_GPWORD)
8094 return default_function_rodata_section (decl);
8096 if (decl && DECL_SECTION_NAME (decl))
8098 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8099 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8101 char *rname = ASTRDUP (name);
8103 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8105 else if (flag_function_sections && flag_data_sections
8106 && strncmp (name, ".text.", 6) == 0)
8108 char *rname = ASTRDUP (name);
8109 memcpy (rname + 1, "data", 4);
8110 return get_section (rname, SECTION_WRITE, decl);
8113 return data_section;
8116 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8117 locally-defined objects go in a small data section. It also controls
8118 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8119 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8122 mips_in_small_data_p (tree decl)
8126 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8129 /* We don't yet generate small-data references for -mabicalls or
8130 VxWorks RTP code. See the related -G handling in override_options. */
8131 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8134 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8138 /* Reject anything that isn't in a known small-data section. */
8139 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8140 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8143 /* If a symbol is defined externally, the assembler will use the
8144 usual -G rules when deciding how to implement macros. */
8145 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8148 else if (TARGET_EMBEDDED_DATA)
8150 /* Don't put constants into the small data section: we want them
8151 to be in ROM rather than RAM. */
8152 if (TREE_CODE (decl) != VAR_DECL)
8155 if (TREE_READONLY (decl)
8156 && !TREE_SIDE_EFFECTS (decl)
8157 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8161 size = int_size_in_bytes (TREE_TYPE (decl));
8162 return (size > 0 && size <= mips_section_threshold);
8165 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8166 anchors for small data: the GP register acts as an anchor in that
8167 case. We also don't want to use them for PC-relative accesses,
8168 where the PC acts as an anchor. */
8171 mips_use_anchors_for_symbol_p (rtx symbol)
8173 switch (mips_classify_symbol (symbol))
8175 case SYMBOL_CONSTANT_POOL:
8176 case SYMBOL_SMALL_DATA:
8184 /* See whether VALTYPE is a record whose fields should be returned in
8185 floating-point registers. If so, return the number of fields and
8186 list them in FIELDS (which should have two elements). Return 0
8189 For n32 & n64, a structure with one or two fields is returned in
8190 floating-point registers as long as every field has a floating-point
8194 mips_fpr_return_fields (tree valtype, tree *fields)
8202 if (TREE_CODE (valtype) != RECORD_TYPE)
8206 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8208 if (TREE_CODE (field) != FIELD_DECL)
8211 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8217 fields[i++] = field;
8223 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8224 a value in the most significant part of $2/$3 if:
8226 - the target is big-endian;
8228 - the value has a structure or union type (we generalize this to
8229 cover aggregates from other languages too); and
8231 - the structure is not returned in floating-point registers. */
8234 mips_return_in_msb (tree valtype)
8238 return (TARGET_NEWABI
8239 && TARGET_BIG_ENDIAN
8240 && AGGREGATE_TYPE_P (valtype)
8241 && mips_fpr_return_fields (valtype, fields) == 0);
8245 /* Return a composite value in a pair of floating-point registers.
8246 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8247 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8250 For n32 & n64, $f0 always holds the first value and $f2 the second.
8251 Otherwise the values are packed together as closely as possible. */
8254 mips_return_fpr_pair (enum machine_mode mode,
8255 enum machine_mode mode1, HOST_WIDE_INT offset1,
8256 enum machine_mode mode2, HOST_WIDE_INT offset2)
8260 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8261 return gen_rtx_PARALLEL
8264 gen_rtx_EXPR_LIST (VOIDmode,
8265 gen_rtx_REG (mode1, FP_RETURN),
8267 gen_rtx_EXPR_LIST (VOIDmode,
8268 gen_rtx_REG (mode2, FP_RETURN + inc),
8269 GEN_INT (offset2))));
8274 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8275 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8276 VALTYPE is null and MODE is the mode of the return value. */
8279 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
8280 enum machine_mode mode)
8287 mode = TYPE_MODE (valtype);
8288 unsignedp = TYPE_UNSIGNED (valtype);
8290 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8291 true, we must promote the mode just as PROMOTE_MODE does. */
8292 mode = promote_mode (valtype, mode, &unsignedp, 1);
8294 /* Handle structures whose fields are returned in $f0/$f2. */
8295 switch (mips_fpr_return_fields (valtype, fields))
8298 return gen_rtx_REG (mode, FP_RETURN);
8301 return mips_return_fpr_pair (mode,
8302 TYPE_MODE (TREE_TYPE (fields[0])),
8303 int_byte_position (fields[0]),
8304 TYPE_MODE (TREE_TYPE (fields[1])),
8305 int_byte_position (fields[1]));
8308 /* If a value is passed in the most significant part of a register, see
8309 whether we have to round the mode up to a whole number of words. */
8310 if (mips_return_in_msb (valtype))
8312 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8313 if (size % UNITS_PER_WORD != 0)
8315 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8316 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8320 /* For EABI, the class of return register depends entirely on MODE.
8321 For example, "struct { some_type x; }" and "union { some_type x; }"
8322 are returned in the same way as a bare "some_type" would be.
8323 Other ABIs only use FPRs for scalar, complex or vector types. */
8324 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8325 return gen_rtx_REG (mode, GP_RETURN);
8330 /* Handle long doubles for n32 & n64. */
8332 return mips_return_fpr_pair (mode,
8334 DImode, GET_MODE_SIZE (mode) / 2);
8336 if (mips_return_mode_in_fpr_p (mode))
8338 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8339 return mips_return_fpr_pair (mode,
8340 GET_MODE_INNER (mode), 0,
8341 GET_MODE_INNER (mode),
8342 GET_MODE_SIZE (mode) / 2);
8344 return gen_rtx_REG (mode, FP_RETURN);
8348 return gen_rtx_REG (mode, GP_RETURN);
8351 /* Return nonzero when an argument must be passed by reference. */
8354 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8355 enum machine_mode mode, tree type,
8356 bool named ATTRIBUTE_UNUSED)
8358 if (mips_abi == ABI_EABI)
8362 /* ??? How should SCmode be handled? */
8363 if (mode == DImode || mode == DFmode)
8366 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8367 return size == -1 || size > UNITS_PER_WORD;
8371 /* If we have a variable-sized parameter, we have no choice. */
8372 return targetm.calls.must_pass_in_stack (mode, type);
8377 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8378 enum machine_mode mode ATTRIBUTE_UNUSED,
8379 tree type ATTRIBUTE_UNUSED, bool named)
8381 return mips_abi == ABI_EABI && named;
8384 /* Return true if registers of class CLASS cannot change from mode FROM
8388 mips_cannot_change_mode_class (enum machine_mode from,
8389 enum machine_mode to, enum reg_class class)
8391 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8392 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8394 if (TARGET_BIG_ENDIAN)
8396 /* When a multi-word value is stored in paired floating-point
8397 registers, the first register always holds the low word.
8398 We therefore can't allow FPRs to change between single-word
8399 and multi-word modes. */
8400 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8405 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
8406 in LO and HI, the high word always comes first. We therefore
8407 can't allow values stored in HI to change between single-word
8408 and multi-word modes.
8409 This rule applies to both the original HI/LO pair and the new
8410 DSP accumulators. */
8411 if (reg_classes_intersect_p (ACC_REGS, class))
8416 /* gcc assumes that each word of a multiword register can be accessed
8417 individually using SUBREGs. This is not true for floating-point
8418 registers if they are bigger than a word. */
8419 if (UNITS_PER_FPREG > UNITS_PER_WORD
8420 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8421 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8422 && reg_classes_intersect_p (FP_REGS, class))
8425 /* Loading a 32-bit value into a 64-bit floating-point register
8426 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8427 We can't allow 64-bit float registers to change from SImode to
8432 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8433 && reg_classes_intersect_p (FP_REGS, class))
8439 /* Return true if X should not be moved directly into register $25.
8440 We need this because many versions of GAS will treat "la $25,foo" as
8441 part of a call sequence and so allow a global "foo" to be lazily bound. */
8444 mips_dangerous_for_la25_p (rtx x)
8446 return (!TARGET_EXPLICIT_RELOCS
8448 && GET_CODE (x) == SYMBOL_REF
8449 && mips_global_symbol_p (x));
8452 /* Implement PREFERRED_RELOAD_CLASS. */
8455 mips_preferred_reload_class (rtx x, enum reg_class class)
8457 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8460 if (TARGET_HARD_FLOAT
8461 && FLOAT_MODE_P (GET_MODE (x))
8462 && reg_class_subset_p (FP_REGS, class))
8465 if (reg_class_subset_p (GR_REGS, class))
8468 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8474 /* This function returns the register class required for a secondary
8475 register when copying between one of the registers in CLASS, and X,
8476 using MODE. If IN_P is nonzero, the copy is going from X to the
8477 register, otherwise the register is the source. A return value of
8478 NO_REGS means that no secondary register is required. */
8481 mips_secondary_reload_class (enum reg_class class,
8482 enum machine_mode mode, rtx x, int in_p)
8484 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8488 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8489 regno = true_regnum (x);
8491 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8493 if (mips_dangerous_for_la25_p (x))
8496 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8500 /* Copying from HI or LO to anywhere other than a general register
8501 requires a general register.
8502 This rule applies to both the original HI/LO pair and the new
8503 DSP accumulators. */
8504 if (reg_class_subset_p (class, ACC_REGS))
8506 if (TARGET_MIPS16 && in_p)
8508 /* We can't really copy to HI or LO at all in mips16 mode. */
8511 return gp_reg_p ? NO_REGS : gr_regs;
8513 if (ACC_REG_P (regno))
8515 if (TARGET_MIPS16 && ! in_p)
8517 /* We can't really copy to HI or LO at all in mips16 mode. */
8520 return class == gr_regs ? NO_REGS : gr_regs;
8523 /* We can only copy a value to a condition code register from a
8524 floating point register, and even then we require a scratch
8525 floating point register. We can only copy a value out of a
8526 condition code register into a general register. */
8527 if (class == ST_REGS)
8531 return gp_reg_p ? NO_REGS : gr_regs;
8533 if (ST_REG_P (regno))
8537 return class == gr_regs ? NO_REGS : gr_regs;
8540 if (class == FP_REGS)
8544 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8547 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8549 /* We can use the l.s and l.d macros to load floating-point
8550 constants. ??? For l.s, we could probably get better
8551 code by returning GR_REGS here. */
8554 else if (gp_reg_p || x == CONST0_RTX (mode))
8556 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8559 else if (FP_REG_P (regno))
8561 /* In this case we can use mov.s or mov.d. */
8566 /* Otherwise, we need to reload through an integer register. */
8571 /* In mips16 mode, going between memory and anything but M16_REGS
8572 requires an M16_REG. */
8575 if (class != M16_REGS && class != M16_NA_REGS)
8583 if (class == M16_REGS || class == M16_NA_REGS)
8592 /* Implement CLASS_MAX_NREGS.
8594 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8596 - ST_REGS are always hold CCmode values, and CCmode values are
8597 considered to be 4 bytes wide.
8599 All other register classes are covered by UNITS_PER_WORD. Note that
8600 this is true even for unions of integer and float registers when the
8601 latter are smaller than the former. The only supported combination
8602 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8603 words but 32-bit float registers. A word-based calculation is correct
8604 in that case since -msingle-float disallows multi-FPR values. */
8607 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8608 enum machine_mode mode)
8610 if (class == ST_REGS)
8611 return (GET_MODE_SIZE (mode) + 3) / 4;
8612 else if (class == FP_REGS)
8613 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8615 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8619 mips_valid_pointer_mode (enum machine_mode mode)
8621 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8624 /* Target hook for vector_mode_supported_p. */
8627 mips_vector_mode_supported_p (enum machine_mode mode)
8632 return TARGET_PAIRED_SINGLE_FLOAT;
8643 /* If we can access small data directly (using gp-relative relocation
8644 operators) return the small data pointer, otherwise return null.
8646 For each mips16 function which refers to GP relative symbols, we
8647 use a pseudo register, initialized at the start of the function, to
8648 hold the $gp value. */
8651 mips16_gp_pseudo_reg (void)
8653 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8657 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8659 /* We want to initialize this to a value which gcc will believe
8661 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8663 push_topmost_sequence ();
8664 /* We need to emit the initialization after the FUNCTION_BEG
8665 note, so that it will be integrated. */
8666 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8668 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8670 if (scan == NULL_RTX)
8671 scan = get_insns ();
8672 insn = emit_insn_after (insn, scan);
8673 pop_topmost_sequence ();
8676 return cfun->machine->mips16_gp_pseudo_rtx;
8679 /* Write out code to move floating point arguments in or out of
8680 general registers. Output the instructions to FILE. FP_CODE is
8681 the code describing which arguments are present (see the comment at
8682 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8683 we are copying from the floating point registers. */
8686 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8691 CUMULATIVE_ARGS cum;
8693 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8694 gcc_assert (TARGET_OLDABI);
8701 init_cumulative_args (&cum, NULL, NULL);
8703 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8705 enum machine_mode mode;
8706 struct mips_arg_info info;
8710 else if ((f & 3) == 2)
8715 mips_arg_info (&cum, mode, NULL, true, &info);
8716 gparg = mips_arg_regno (&info, false);
8717 fparg = mips_arg_regno (&info, true);
8720 fprintf (file, "\t%s\t%s,%s\n", s,
8721 reg_names[gparg], reg_names[fparg]);
8722 else if (TARGET_64BIT)
8723 fprintf (file, "\td%s\t%s,%s\n", s,
8724 reg_names[gparg], reg_names[fparg]);
8725 else if (ISA_HAS_MXHC1)
8726 /* -mips32r2 -mfp64 */
8727 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8729 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8731 from_fp_p ? "mfhc1" : "mthc1",
8732 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8734 else if (TARGET_BIG_ENDIAN)
8735 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8736 reg_names[gparg], reg_names[fparg + 1], s,
8737 reg_names[gparg + 1], reg_names[fparg]);
8739 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8740 reg_names[gparg], reg_names[fparg], s,
8741 reg_names[gparg + 1], reg_names[fparg + 1]);
8743 function_arg_advance (&cum, mode, NULL, true);
8747 /* Build a mips16 function stub. This is used for functions which
8748 take arguments in the floating point registers. It is 32-bit code
8749 that moves the floating point args into the general registers, and
8750 then jumps to the 16-bit code. */
8753 build_mips16_function_stub (FILE *file)
8756 char *secname, *stubname;
8757 tree stubid, stubdecl;
8761 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8762 secname = (char *) alloca (strlen (fnname) + 20);
8763 sprintf (secname, ".mips16.fn.%s", fnname);
8764 stubname = (char *) alloca (strlen (fnname) + 20);
8765 sprintf (stubname, "__fn_stub_%s", fnname);
8766 stubid = get_identifier (stubname);
8767 stubdecl = build_decl (FUNCTION_DECL, stubid,
8768 build_function_type (void_type_node, NULL_TREE));
8769 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8770 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
8772 fprintf (file, "\t# Stub function for %s (", current_function_name ());
8774 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
8776 fprintf (file, "%s%s",
8777 need_comma ? ", " : "",
8778 (f & 3) == 1 ? "float" : "double");
8781 fprintf (file, ")\n");
8783 fprintf (file, "\t.set\tnomips16\n");
8784 switch_to_section (function_section (stubdecl));
8785 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8787 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8788 within a .ent, and we cannot emit another .ent. */
8789 if (!FUNCTION_NAME_ALREADY_DECLARED)
8791 fputs ("\t.ent\t", file);
8792 assemble_name (file, stubname);
8796 assemble_name (file, stubname);
8797 fputs (":\n", file);
8799 /* We don't want the assembler to insert any nops here. */
8800 fprintf (file, "\t.set\tnoreorder\n");
8802 mips16_fp_args (file, current_function_args_info.fp_code, 1);
8804 fprintf (asm_out_file, "\t.set\tnoat\n");
8805 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8806 assemble_name (file, fnname);
8807 fprintf (file, "\n");
8808 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8809 fprintf (asm_out_file, "\t.set\tat\n");
8811 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8812 with one of the mfc1 instructions, because the result is not
8813 available for one instruction, so if the very first instruction
8814 in the function refers to the register, it will see the wrong
8816 fprintf (file, "\tnop\n");
8818 fprintf (file, "\t.set\treorder\n");
8820 if (!FUNCTION_NAME_ALREADY_DECLARED)
8822 fputs ("\t.end\t", file);
8823 assemble_name (file, stubname);
8827 fprintf (file, "\t.set\tmips16\n");
8829 switch_to_section (function_section (current_function_decl));
8832 /* We keep a list of functions for which we have already built stubs
8833 in build_mips16_call_stub. */
8837 struct mips16_stub *next;
8842 static struct mips16_stub *mips16_stubs;
8844 /* Emit code to return a double value from a mips16 stub. GPREG is the
8845 first GP reg to use, FPREG is the first FP reg to use. */
8848 mips16_fpret_double (int gpreg, int fpreg)
8851 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
8852 reg_names[gpreg], reg_names[fpreg]);
8853 else if (TARGET_FLOAT64)
8855 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8856 reg_names[gpreg + WORDS_BIG_ENDIAN],
8858 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
8859 reg_names[gpreg + !WORDS_BIG_ENDIAN],
8864 if (TARGET_BIG_ENDIAN)
8866 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8867 reg_names[gpreg + 0],
8868 reg_names[fpreg + 1]);
8869 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8870 reg_names[gpreg + 1],
8871 reg_names[fpreg + 0]);
8875 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8876 reg_names[gpreg + 0],
8877 reg_names[fpreg + 0]);
8878 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8879 reg_names[gpreg + 1],
8880 reg_names[fpreg + 1]);
8885 /* Build a call stub for a mips16 call. A stub is needed if we are
8886 passing any floating point values which should go into the floating
8887 point registers. If we are, and the call turns out to be to a
8888 32-bit function, the stub will be used to move the values into the
8889 floating point registers before calling the 32-bit function. The
8890 linker will magically adjust the function call to either the 16-bit
8891 function or the 32-bit stub, depending upon where the function call
8892 is actually defined.
8894 Similarly, we need a stub if the return value might come back in a
8895 floating point register.
8897 RETVAL is the location of the return value, or null if this is
8898 a call rather than a call_value. FN is the address of the
8899 function and ARG_SIZE is the size of the arguments. FP_CODE
8900 is the code built by function_arg. This function returns a nonzero
8901 value if it builds the call instruction itself. */
8904 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
8908 char *secname, *stubname;
8909 struct mips16_stub *l;
8910 tree stubid, stubdecl;
8914 /* We don't need to do anything if we aren't in mips16 mode, or if
8915 we were invoked with the -msoft-float option. */
8916 if (!mips16_hard_float)
8919 /* Figure out whether the value might come back in a floating point
8922 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
8924 /* We don't need to do anything if there were no floating point
8925 arguments and the value will not be returned in a floating point
8927 if (fp_code == 0 && ! fpret)
8930 /* We don't need to do anything if this is a call to a special
8931 mips16 support function. */
8932 if (GET_CODE (fn) == SYMBOL_REF
8933 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
8936 /* This code will only work for o32 and o64 abis. The other ABI's
8937 require more sophisticated support. */
8938 gcc_assert (TARGET_OLDABI);
8940 /* If we're calling via a function pointer, then we must always call
8941 via a stub. There are magic stubs provided in libgcc.a for each
8942 of the required cases. Each of them expects the function address
8943 to arrive in register $2. */
8945 if (GET_CODE (fn) != SYMBOL_REF)
8951 /* ??? If this code is modified to support other ABI's, we need
8952 to handle PARALLEL return values here. */
8955 sprintf (buf, "__mips16_call_stub_%s_%d",
8956 mips16_call_stub_mode_suffix (GET_MODE (retval)),
8959 sprintf (buf, "__mips16_call_stub_%d",
8962 id = get_identifier (buf);
8963 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8965 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8967 if (retval == NULL_RTX)
8968 insn = gen_call_internal (stub_fn, arg_size);
8970 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8971 insn = emit_call_insn (insn);
8973 /* Put the register usage information on the CALL. */
8974 CALL_INSN_FUNCTION_USAGE (insn) =
8975 gen_rtx_EXPR_LIST (VOIDmode,
8976 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8977 CALL_INSN_FUNCTION_USAGE (insn));
8979 /* If we are handling a floating point return value, we need to
8980 save $18 in the function prologue. Putting a note on the
8981 call will mean that df_regs_ever_live_p ($18) will be true if the
8982 call is not eliminated, and we can check that in the prologue
8985 CALL_INSN_FUNCTION_USAGE (insn) =
8986 gen_rtx_EXPR_LIST (VOIDmode,
8987 gen_rtx_USE (VOIDmode,
8988 gen_rtx_REG (word_mode, 18)),
8989 CALL_INSN_FUNCTION_USAGE (insn));
8991 /* Return 1 to tell the caller that we've generated the call
8996 /* We know the function we are going to call. If we have already
8997 built a stub, we don't need to do anything further. */
8999 fnname = XSTR (fn, 0);
9000 for (l = mips16_stubs; l != NULL; l = l->next)
9001 if (strcmp (l->name, fnname) == 0)
9006 /* Build a special purpose stub. When the linker sees a
9007 function call in mips16 code, it will check where the target
9008 is defined. If the target is a 32-bit call, the linker will
9009 search for the section defined here. It can tell which
9010 symbol this section is associated with by looking at the
9011 relocation information (the name is unreliable, since this
9012 might be a static function). If such a section is found, the
9013 linker will redirect the call to the start of the magic
9016 If the function does not return a floating point value, the
9017 special stub section is named
9020 If the function does return a floating point value, the stub
9022 .mips16.call.fp.FNNAME
9025 secname = (char *) alloca (strlen (fnname) + 40);
9026 sprintf (secname, ".mips16.call.%s%s",
9029 stubname = (char *) alloca (strlen (fnname) + 20);
9030 sprintf (stubname, "__call_stub_%s%s",
9033 stubid = get_identifier (stubname);
9034 stubdecl = build_decl (FUNCTION_DECL, stubid,
9035 build_function_type (void_type_node, NULL_TREE));
9036 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9037 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9039 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9041 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9045 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9047 fprintf (asm_out_file, "%s%s",
9048 need_comma ? ", " : "",
9049 (f & 3) == 1 ? "float" : "double");
9052 fprintf (asm_out_file, ")\n");
9054 fprintf (asm_out_file, "\t.set\tnomips16\n");
9055 assemble_start_function (stubdecl, stubname);
9057 if (!FUNCTION_NAME_ALREADY_DECLARED)
9059 fputs ("\t.ent\t", asm_out_file);
9060 assemble_name (asm_out_file, stubname);
9061 fputs ("\n", asm_out_file);
9063 assemble_name (asm_out_file, stubname);
9064 fputs (":\n", asm_out_file);
9067 /* We build the stub code by hand. That's the only way we can
9068 do it, since we can't generate 32-bit code during a 16-bit
9071 /* We don't want the assembler to insert any nops here. */
9072 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9074 mips16_fp_args (asm_out_file, fp_code, 0);
9078 fprintf (asm_out_file, "\t.set\tnoat\n");
9079 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9081 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9082 fprintf (asm_out_file, "\t.set\tat\n");
9083 /* Unfortunately, we can't fill the jump delay slot. We
9084 can't fill with one of the mtc1 instructions, because the
9085 result is not available for one instruction, so if the
9086 very first instruction in the function refers to the
9087 register, it will see the wrong value. */
9088 fprintf (asm_out_file, "\tnop\n");
9092 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9093 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9094 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9095 /* As above, we can't fill the delay slot. */
9096 fprintf (asm_out_file, "\tnop\n");
9097 if (GET_MODE (retval) == SFmode)
9098 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9099 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9100 else if (GET_MODE (retval) == SCmode)
9102 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9103 reg_names[GP_REG_FIRST + 2],
9104 reg_names[FP_REG_FIRST + 0]);
9105 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9106 reg_names[GP_REG_FIRST + 3],
9107 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9109 else if (GET_MODE (retval) == DFmode
9110 || GET_MODE (retval) == V2SFmode)
9112 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9114 else if (GET_MODE (retval) == DCmode)
9116 mips16_fpret_double (GP_REG_FIRST + 2,
9118 mips16_fpret_double (GP_REG_FIRST + 4,
9119 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9123 if (TARGET_BIG_ENDIAN)
9125 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9126 reg_names[GP_REG_FIRST + 2],
9127 reg_names[FP_REG_FIRST + 1]);
9128 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9129 reg_names[GP_REG_FIRST + 3],
9130 reg_names[FP_REG_FIRST + 0]);
9134 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9135 reg_names[GP_REG_FIRST + 2],
9136 reg_names[FP_REG_FIRST + 0]);
9137 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9138 reg_names[GP_REG_FIRST + 3],
9139 reg_names[FP_REG_FIRST + 1]);
9142 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9143 /* As above, we can't fill the delay slot. */
9144 fprintf (asm_out_file, "\tnop\n");
9147 fprintf (asm_out_file, "\t.set\treorder\n");
9149 #ifdef ASM_DECLARE_FUNCTION_SIZE
9150 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9153 if (!FUNCTION_NAME_ALREADY_DECLARED)
9155 fputs ("\t.end\t", asm_out_file);
9156 assemble_name (asm_out_file, stubname);
9157 fputs ("\n", asm_out_file);
9160 fprintf (asm_out_file, "\t.set\tmips16\n");
9162 /* Record this stub. */
9163 l = (struct mips16_stub *) xmalloc (sizeof *l);
9164 l->name = xstrdup (fnname);
9166 l->next = mips16_stubs;
9170 /* If we expect a floating point return value, but we've built a
9171 stub which does not expect one, then we're in trouble. We can't
9172 use the existing stub, because it won't handle the floating point
9173 value. We can't build a new stub, because the linker won't know
9174 which stub to use for the various calls in this object file.
9175 Fortunately, this case is illegal, since it means that a function
9176 was declared in two different ways in a single compilation. */
9177 if (fpret && ! l->fpret)
9178 error ("cannot handle inconsistent calls to %qs", fnname);
9180 /* If we are calling a stub which handles a floating point return
9181 value, we need to arrange to save $18 in the prologue. We do
9182 this by marking the function call as using the register. The
9183 prologue will later see that it is used, and emit code to save
9190 if (retval == NULL_RTX)
9191 insn = gen_call_internal (fn, arg_size);
9193 insn = gen_call_value_internal (retval, fn, arg_size);
9194 insn = emit_call_insn (insn);
9196 CALL_INSN_FUNCTION_USAGE (insn) =
9197 gen_rtx_EXPR_LIST (VOIDmode,
9198 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9199 CALL_INSN_FUNCTION_USAGE (insn));
9201 /* Return 1 to tell the caller that we've generated the call
9206 /* Return 0 to let the caller generate the call insn. */
9210 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9211 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9213 struct mips16_constant {
9214 struct mips16_constant *next;
9217 enum machine_mode mode;
9220 /* Information about an incomplete mips16 constant pool. FIRST is the
9221 first constant, HIGHEST_ADDRESS is the highest address that the first
9222 byte of the pool can have, and INSN_ADDRESS is the current instruction
9225 struct mips16_constant_pool {
9226 struct mips16_constant *first;
9227 int highest_address;
9231 /* Add constant VALUE to POOL and return its label. MODE is the
9232 value's mode (used for CONST_INTs, etc.). */
9235 add_constant (struct mips16_constant_pool *pool,
9236 rtx value, enum machine_mode mode)
9238 struct mips16_constant **p, *c;
9239 bool first_of_size_p;
9241 /* See whether the constant is already in the pool. If so, return the
9242 existing label, otherwise leave P pointing to the place where the
9243 constant should be added.
9245 Keep the pool sorted in increasing order of mode size so that we can
9246 reduce the number of alignments needed. */
9247 first_of_size_p = true;
9248 for (p = &pool->first; *p != 0; p = &(*p)->next)
9250 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9252 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9254 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9255 first_of_size_p = false;
9258 /* In the worst case, the constant needed by the earliest instruction
9259 will end up at the end of the pool. The entire pool must then be
9260 accessible from that instruction.
9262 When adding the first constant, set the pool's highest address to
9263 the address of the first out-of-range byte. Adjust this address
9264 downwards each time a new constant is added. */
9265 if (pool->first == 0)
9266 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9267 is the address of the instruction with the lowest two bits clear.
9268 The base PC value for ld has the lowest three bits clear. Assume
9269 the worst case here. */
9270 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9271 pool->highest_address -= GET_MODE_SIZE (mode);
9272 if (first_of_size_p)
9273 /* Take into account the worst possible padding due to alignment. */
9274 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9276 /* Create a new entry. */
9277 c = (struct mips16_constant *) xmalloc (sizeof *c);
9280 c->label = gen_label_rtx ();
9287 /* Output constant VALUE after instruction INSN and return the last
9288 instruction emitted. MODE is the mode of the constant. */
9291 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9293 switch (GET_MODE_CLASS (mode))
9297 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9298 return emit_insn_after (gen_consttable_int (value, size), insn);
9302 return emit_insn_after (gen_consttable_float (value), insn);
9304 case MODE_VECTOR_FLOAT:
9305 case MODE_VECTOR_INT:
9308 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9309 insn = dump_constants_1 (GET_MODE_INNER (mode),
9310 CONST_VECTOR_ELT (value, i), insn);
9320 /* Dump out the constants in CONSTANTS after INSN. */
9323 dump_constants (struct mips16_constant *constants, rtx insn)
9325 struct mips16_constant *c, *next;
9329 for (c = constants; c != NULL; c = next)
9331 /* If necessary, increase the alignment of PC. */
9332 if (align < GET_MODE_SIZE (c->mode))
9334 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9335 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9337 align = GET_MODE_SIZE (c->mode);
9339 insn = emit_label_after (c->label, insn);
9340 insn = dump_constants_1 (c->mode, c->value, insn);
9346 emit_barrier_after (insn);
9349 /* Return the length of instruction INSN. */
9352 mips16_insn_length (rtx insn)
9356 rtx body = PATTERN (insn);
9357 if (GET_CODE (body) == ADDR_VEC)
9358 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9359 if (GET_CODE (body) == ADDR_DIFF_VEC)
9360 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9362 return get_attr_length (insn);
9365 /* Rewrite *X so that constant pool references refer to the constant's
9366 label instead. DATA points to the constant pool structure. */
9369 mips16_rewrite_pool_refs (rtx *x, void *data)
9371 struct mips16_constant_pool *pool = data;
9372 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
9373 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
9374 get_pool_constant (*x),
9375 get_pool_mode (*x)));
9379 /* Build MIPS16 constant pools. */
9382 mips16_lay_out_constants (void)
9384 struct mips16_constant_pool pool;
9388 memset (&pool, 0, sizeof (pool));
9389 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9391 /* Rewrite constant pool references in INSN. */
9393 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9395 pool.insn_address += mips16_insn_length (insn);
9397 if (pool.first != NULL)
9399 /* If there are no natural barriers between the first user of
9400 the pool and the highest acceptable address, we'll need to
9401 create a new instruction to jump around the constant pool.
9402 In the worst case, this instruction will be 4 bytes long.
9404 If it's too late to do this transformation after INSN,
9405 do it immediately before INSN. */
9406 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9410 label = gen_label_rtx ();
9412 jump = emit_jump_insn_before (gen_jump (label), insn);
9413 JUMP_LABEL (jump) = label;
9414 LABEL_NUSES (label) = 1;
9415 barrier = emit_barrier_after (jump);
9417 emit_label_after (label, barrier);
9418 pool.insn_address += 4;
9421 /* See whether the constant pool is now out of range of the first
9422 user. If so, output the constants after the previous barrier.
9423 Note that any instructions between BARRIER and INSN (inclusive)
9424 will use negative offsets to refer to the pool. */
9425 if (pool.insn_address > pool.highest_address)
9427 dump_constants (pool.first, barrier);
9431 else if (BARRIER_P (insn))
9435 dump_constants (pool.first, get_last_insn ());
9438 /* A temporary variable used by for_each_rtx callbacks, etc. */
9439 static rtx mips_sim_insn;
9441 /* A structure representing the state of the processor pipeline.
9442 Used by the mips_sim_* family of functions. */
9444 /* The maximum number of instructions that can be issued in a cycle.
9445 (Caches mips_issue_rate.) */
9446 unsigned int issue_rate;
9448 /* The current simulation time. */
9451 /* How many more instructions can be issued in the current cycle. */
9452 unsigned int insns_left;
9454 /* LAST_SET[X].INSN is the last instruction to set register X.
9455 LAST_SET[X].TIME is the time at which that instruction was issued.
9456 INSN is null if no instruction has yet set register X. */
9460 } last_set[FIRST_PSEUDO_REGISTER];
9462 /* The pipeline's current DFA state. */
9466 /* Reset STATE to the initial simulation state. */
9469 mips_sim_reset (struct mips_sim *state)
9472 state->insns_left = state->issue_rate;
9473 memset (&state->last_set, 0, sizeof (state->last_set));
9474 state_reset (state->dfa_state);
9477 /* Initialize STATE before its first use. DFA_STATE points to an
9478 allocated but uninitialized DFA state. */
9481 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9483 state->issue_rate = mips_issue_rate ();
9484 state->dfa_state = dfa_state;
9485 mips_sim_reset (state);
9488 /* Advance STATE by one clock cycle. */
9491 mips_sim_next_cycle (struct mips_sim *state)
9494 state->insns_left = state->issue_rate;
9495 state_transition (state->dfa_state, 0);
9498 /* Advance simulation state STATE until instruction INSN can read
9502 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9506 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9507 if (state->last_set[REGNO (reg) + i].insn != 0)
9511 t = state->last_set[REGNO (reg) + i].time;
9512 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9513 while (state->time < t)
9514 mips_sim_next_cycle (state);
9518 /* A for_each_rtx callback. If *X is a register, advance simulation state
9519 DATA until mips_sim_insn can read the register's value. */
9522 mips_sim_wait_regs_2 (rtx *x, void *data)
9525 mips_sim_wait_reg (data, mips_sim_insn, *x);
9529 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9532 mips_sim_wait_regs_1 (rtx *x, void *data)
9534 for_each_rtx (x, mips_sim_wait_regs_2, data);
9537 /* Advance simulation state STATE until all of INSN's register
9538 dependencies are satisfied. */
9541 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9543 mips_sim_insn = insn;
9544 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9547 /* Advance simulation state STATE until the units required by
9548 instruction INSN are available. */
9551 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9555 tmp_state = alloca (state_size ());
9556 while (state->insns_left == 0
9557 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9558 state_transition (tmp_state, insn) >= 0))
9559 mips_sim_next_cycle (state);
9562 /* Advance simulation state STATE until INSN is ready to issue. */
9565 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9567 mips_sim_wait_regs (state, insn);
9568 mips_sim_wait_units (state, insn);
9571 /* mips_sim_insn has just set X. Update the LAST_SET array
9572 in simulation state DATA. */
9575 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9577 struct mips_sim *state;
9582 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9584 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9585 state->last_set[REGNO (x) + i].time = state->time;
9589 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9590 can issue immediately (i.e., that mips_sim_wait_insn has already
9594 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9596 state_transition (state->dfa_state, insn);
9597 state->insns_left--;
9599 mips_sim_insn = insn;
9600 note_stores (PATTERN (insn), mips_sim_record_set, state);
9603 /* Simulate issuing a NOP in state STATE. */
9606 mips_sim_issue_nop (struct mips_sim *state)
9608 if (state->insns_left == 0)
9609 mips_sim_next_cycle (state);
9610 state->insns_left--;
9613 /* Update simulation state STATE so that it's ready to accept the instruction
9614 after INSN. INSN should be part of the main rtl chain, not a member of a
9618 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9620 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9622 mips_sim_issue_nop (state);
9624 switch (GET_CODE (SEQ_BEGIN (insn)))
9628 /* We can't predict the processor state after a call or label. */
9629 mips_sim_reset (state);
9633 /* The delay slots of branch likely instructions are only executed
9634 when the branch is taken. Therefore, if the caller has simulated
9635 the delay slot instruction, STATE does not really reflect the state
9636 of the pipeline for the instruction after the delay slot. Also,
9637 branch likely instructions tend to incur a penalty when not taken,
9638 so there will probably be an extra delay between the branch and
9639 the instruction after the delay slot. */
9640 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9641 mips_sim_reset (state);
9649 /* The VR4130 pipeline issues aligned pairs of instructions together,
9650 but it stalls the second instruction if it depends on the first.
9651 In order to cut down the amount of logic required, this dependence
9652 check is not based on a full instruction decode. Instead, any non-SPECIAL
9653 instruction is assumed to modify the register specified by bits 20-16
9654 (which is usually the "rt" field).
9656 In beq, beql, bne and bnel instructions, the rt field is actually an
9657 input, so we can end up with a false dependence between the branch
9658 and its delay slot. If this situation occurs in instruction INSN,
9659 try to avoid it by swapping rs and rt. */
9662 vr4130_avoid_branch_rt_conflict (rtx insn)
9666 first = SEQ_BEGIN (insn);
9667 second = SEQ_END (insn);
9669 && NONJUMP_INSN_P (second)
9670 && GET_CODE (PATTERN (first)) == SET
9671 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9672 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9674 /* Check for the right kind of condition. */
9675 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9676 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9677 && REG_P (XEXP (cond, 0))
9678 && REG_P (XEXP (cond, 1))
9679 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9680 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9682 /* SECOND mentions the rt register but not the rs register. */
9683 rtx tmp = XEXP (cond, 0);
9684 XEXP (cond, 0) = XEXP (cond, 1);
9685 XEXP (cond, 1) = tmp;
9690 /* Implement -mvr4130-align. Go through each basic block and simulate the
9691 processor pipeline. If we find that a pair of instructions could execute
9692 in parallel, and the first of those instruction is not 8-byte aligned,
9693 insert a nop to make it aligned. */
9696 vr4130_align_insns (void)
9698 struct mips_sim state;
9699 rtx insn, subinsn, last, last2, next;
9704 /* LAST is the last instruction before INSN to have a nonzero length.
9705 LAST2 is the last such instruction before LAST. */
9709 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9712 mips_sim_init (&state, alloca (state_size ()));
9713 for (insn = get_insns (); insn != 0; insn = next)
9715 unsigned int length;
9717 next = NEXT_INSN (insn);
9719 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9720 This isn't really related to the alignment pass, but we do it on
9721 the fly to avoid a separate instruction walk. */
9722 vr4130_avoid_branch_rt_conflict (insn);
9724 if (USEFUL_INSN_P (insn))
9725 FOR_EACH_SUBINSN (subinsn, insn)
9727 mips_sim_wait_insn (&state, subinsn);
9729 /* If we want this instruction to issue in parallel with the
9730 previous one, make sure that the previous instruction is
9731 aligned. There are several reasons why this isn't worthwhile
9732 when the second instruction is a call:
9734 - Calls are less likely to be performance critical,
9735 - There's a good chance that the delay slot can execute
9736 in parallel with the call.
9737 - The return address would then be unaligned.
9739 In general, if we're going to insert a nop between instructions
9740 X and Y, it's better to insert it immediately after X. That
9741 way, if the nop makes Y aligned, it will also align any labels
9743 if (state.insns_left != state.issue_rate
9744 && !CALL_P (subinsn))
9746 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
9748 /* SUBINSN is the first instruction in INSN and INSN is
9749 aligned. We want to align the previous instruction
9750 instead, so insert a nop between LAST2 and LAST.
9752 Note that LAST could be either a single instruction
9753 or a branch with a delay slot. In the latter case,
9754 LAST, like INSN, is already aligned, but the delay
9755 slot must have some extra delay that stops it from
9756 issuing at the same time as the branch. We therefore
9757 insert a nop before the branch in order to align its
9759 emit_insn_after (gen_nop (), last2);
9762 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
9764 /* SUBINSN is the delay slot of INSN, but INSN is
9765 currently unaligned. Insert a nop between
9766 LAST and INSN to align it. */
9767 emit_insn_after (gen_nop (), last);
9771 mips_sim_issue_insn (&state, subinsn);
9773 mips_sim_finish_insn (&state, insn);
9775 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
9776 length = get_attr_length (insn);
9779 /* If the instruction is an asm statement or multi-instruction
9780 mips.md patern, the length is only an estimate. Insert an
9781 8 byte alignment after it so that the following instructions
9782 can be handled correctly. */
9783 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
9784 && (recog_memoized (insn) < 0 || length >= 8))
9786 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
9787 next = NEXT_INSN (next);
9788 mips_sim_next_cycle (&state);
9791 else if (length & 4)
9792 aligned_p = !aligned_p;
9797 /* See whether INSN is an aligned label. */
9798 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
9804 /* Subroutine of mips_reorg. If there is a hazard between INSN
9805 and a previous instruction, avoid it by inserting nops after
9808 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
9809 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
9810 before using the value of that register. *HILO_DELAY counts the
9811 number of instructions since the last hilo hazard (that is,
9812 the number of instructions since the last mflo or mfhi).
9814 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
9815 for the next instruction.
9817 LO_REG is an rtx for the LO register, used in dependence checking. */
9820 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
9821 rtx *delayed_reg, rtx lo_reg)
9829 pattern = PATTERN (insn);
9831 /* Do not put the whole function in .set noreorder if it contains
9832 an asm statement. We don't know whether there will be hazards
9833 between the asm statement and the gcc-generated code. */
9834 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
9835 cfun->machine->all_noreorder_p = false;
9837 /* Ignore zero-length instructions (barriers and the like). */
9838 ninsns = get_attr_length (insn) / 4;
9842 /* Work out how many nops are needed. Note that we only care about
9843 registers that are explicitly mentioned in the instruction's pattern.
9844 It doesn't matter that calls use the argument registers or that they
9845 clobber hi and lo. */
9846 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
9847 nops = 2 - *hilo_delay;
9848 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
9853 /* Insert the nops between this instruction and the previous one.
9854 Each new nop takes us further from the last hilo hazard. */
9855 *hilo_delay += nops;
9857 emit_insn_after (gen_hazard_nop (), after);
9859 /* Set up the state for the next instruction. */
9860 *hilo_delay += ninsns;
9862 if (INSN_CODE (insn) >= 0)
9863 switch (get_attr_hazard (insn))
9873 set = single_set (insn);
9874 gcc_assert (set != 0);
9875 *delayed_reg = SET_DEST (set);
9881 /* Go through the instruction stream and insert nops where necessary.
9882 See if the whole function can then be put into .set noreorder &
9886 mips_avoid_hazards (void)
9888 rtx insn, last_insn, lo_reg, delayed_reg;
9891 /* Force all instructions to be split into their final form. */
9892 split_all_insns_noflow ();
9894 /* Recalculate instruction lengths without taking nops into account. */
9895 cfun->machine->ignore_hazard_length_p = true;
9896 shorten_branches (get_insns ());
9898 cfun->machine->all_noreorder_p = true;
9900 /* Profiled functions can't be all noreorder because the profiler
9901 support uses assembler macros. */
9902 if (current_function_profile)
9903 cfun->machine->all_noreorder_p = false;
9905 /* Code compiled with -mfix-vr4120 can't be all noreorder because
9906 we rely on the assembler to work around some errata. */
9907 if (TARGET_FIX_VR4120)
9908 cfun->machine->all_noreorder_p = false;
9910 /* The same is true for -mfix-vr4130 if we might generate mflo or
9911 mfhi instructions. Note that we avoid using mflo and mfhi if
9912 the VR4130 macc and dmacc instructions are available instead;
9913 see the *mfhilo_{si,di}_macc patterns. */
9914 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
9915 cfun->machine->all_noreorder_p = false;
9920 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
9922 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
9925 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9926 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9927 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
9928 &hilo_delay, &delayed_reg, lo_reg);
9930 mips_avoid_hazard (last_insn, insn, &hilo_delay,
9931 &delayed_reg, lo_reg);
9938 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
9944 mips16_lay_out_constants ();
9945 else if (TARGET_EXPLICIT_RELOCS)
9947 if (mips_flag_delayed_branch)
9948 dbr_schedule (get_insns ());
9949 mips_avoid_hazards ();
9950 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
9951 vr4130_align_insns ();
9955 /* This function does three things:
9957 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9958 - Register the mips16 hardware floating point stubs.
9959 - Register the gofast functions if selected using --enable-gofast. */
9961 #include "config/gofast.h"
9964 mips_init_libfuncs (void)
9966 if (TARGET_FIX_VR4120)
9968 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9969 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9972 if (mips16_hard_float)
9974 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9975 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9976 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9977 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9979 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9980 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9981 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9982 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9983 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9984 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9986 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9987 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9989 if (TARGET_DOUBLE_FLOAT)
9991 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9992 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9993 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9994 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9996 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9997 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9998 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9999 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10000 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10001 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10003 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10004 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10006 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10007 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10011 gofast_maybe_init_libfuncs ();
10014 /* Return a number assessing the cost of moving a register in class
10015 FROM to class TO. The classes are expressed using the enumeration
10016 values such as `GENERAL_REGS'. A value of 2 is the default; other
10017 values are interpreted relative to that.
10019 It is not required that the cost always equal 2 when FROM is the
10020 same as TO; on some machines it is expensive to move between
10021 registers if they are not general registers.
10023 If reload sees an insn consisting of a single `set' between two
10024 hard registers, and if `REGISTER_MOVE_COST' applied to their
10025 classes returns a value of 2, reload does not check to ensure that
10026 the constraints of the insn are met. Setting a cost of other than
10027 2 will allow reload to verify that the constraints are met. You
10028 should do this if the `movM' pattern's constraints do not allow
10031 ??? We make the cost of moving from HI/LO into general
10032 registers the same as for one of moving general registers to
10033 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10034 pseudo to HI/LO. This might hurt optimizations though, it
10035 isn't clear if it is wise. And it might not work in all cases. We
10036 could solve the DImode LO reg problem by using a multiply, just
10037 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10038 problem by using divide instructions. divu puts the remainder in
10039 the HI reg, so doing a divide by -1 will move the value in the HI
10040 reg for all values except -1. We could handle that case by using a
10041 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10042 a compare/branch to test the input value to see which instruction
10043 we need to use. This gets pretty messy, but it is feasible. */
10046 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10047 enum reg_class to, enum reg_class from)
10049 if (from == M16_REGS && GR_REG_CLASS_P (to))
10051 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
10053 else if (GR_REG_CLASS_P (from))
10055 if (to == M16_REGS)
10057 else if (to == M16_NA_REGS)
10059 else if (GR_REG_CLASS_P (to))
10066 else if (to == FP_REGS)
10068 else if (reg_class_subset_p (to, ACC_REGS))
10075 else if (COP_REG_CLASS_P (to))
10080 else if (from == FP_REGS)
10082 if (GR_REG_CLASS_P (to))
10084 else if (to == FP_REGS)
10086 else if (to == ST_REGS)
10089 else if (reg_class_subset_p (from, ACC_REGS))
10091 if (GR_REG_CLASS_P (to))
10099 else if (from == ST_REGS && GR_REG_CLASS_P (to))
10101 else if (COP_REG_CLASS_P (from))
10107 ??? What cases are these? Shouldn't we return 2 here? */
10112 /* Return the length of INSN. LENGTH is the initial length computed by
10113 attributes in the machine-description file. */
10116 mips_adjust_insn_length (rtx insn, int length)
10118 /* A unconditional jump has an unfilled delay slot if it is not part
10119 of a sequence. A conditional jump normally has a delay slot, but
10120 does not on MIPS16. */
10121 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10124 /* See how many nops might be needed to avoid hardware hazards. */
10125 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10126 switch (get_attr_hazard (insn))
10140 /* All MIPS16 instructions are a measly two bytes. */
10148 /* Return an asm sequence to start a noat block and load the address
10149 of a label into $1. */
10152 mips_output_load_label (void)
10154 if (TARGET_EXPLICIT_RELOCS)
10158 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10161 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10164 if (ISA_HAS_LOAD_DELAY)
10165 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10166 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10170 if (Pmode == DImode)
10171 return "%[dla\t%@,%0";
10173 return "%[la\t%@,%0";
10177 /* Return the assembly code for INSN, which has the operands given by
10178 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10179 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10180 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10181 version of BRANCH_IF_TRUE. */
10184 mips_output_conditional_branch (rtx insn, rtx *operands,
10185 const char *branch_if_true,
10186 const char *branch_if_false)
10188 unsigned int length;
10189 rtx taken, not_taken;
10191 length = get_attr_length (insn);
10194 /* Just a simple conditional branch. */
10195 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10196 return branch_if_true;
10199 /* Generate a reversed branch around a direct jump. This fallback does
10200 not use branch-likely instructions. */
10201 mips_branch_likely = false;
10202 not_taken = gen_label_rtx ();
10203 taken = operands[1];
10205 /* Generate the reversed branch to NOT_TAKEN. */
10206 operands[1] = not_taken;
10207 output_asm_insn (branch_if_false, operands);
10209 /* If INSN has a delay slot, we must provide delay slots for both the
10210 branch to NOT_TAKEN and the conditional jump. We must also ensure
10211 that INSN's delay slot is executed in the appropriate cases. */
10212 if (final_sequence)
10214 /* This first delay slot will always be executed, so use INSN's
10215 delay slot if is not annulled. */
10216 if (!INSN_ANNULLED_BRANCH_P (insn))
10218 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10219 asm_out_file, optimize, 1, NULL);
10220 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10223 output_asm_insn ("nop", 0);
10224 fprintf (asm_out_file, "\n");
10227 /* Output the unconditional branch to TAKEN. */
10229 output_asm_insn ("j\t%0%/", &taken);
10232 output_asm_insn (mips_output_load_label (), &taken);
10233 output_asm_insn ("jr\t%@%]%/", 0);
10236 /* Now deal with its delay slot; see above. */
10237 if (final_sequence)
10239 /* This delay slot will only be executed if the branch is taken.
10240 Use INSN's delay slot if is annulled. */
10241 if (INSN_ANNULLED_BRANCH_P (insn))
10243 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10244 asm_out_file, optimize, 1, NULL);
10245 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10248 output_asm_insn ("nop", 0);
10249 fprintf (asm_out_file, "\n");
10252 /* Output NOT_TAKEN. */
10253 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10254 CODE_LABEL_NUMBER (not_taken));
10258 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10259 if some ordered condition is true. The condition is given by
10260 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10261 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10262 its second is always zero. */
10265 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10267 const char *branch[2];
10269 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10270 Make BRANCH[0] branch on the inverse condition. */
10271 switch (GET_CODE (operands[0]))
10273 /* These cases are equivalent to comparisons against zero. */
10275 inverted_p = !inverted_p;
10276 /* Fall through. */
10278 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10279 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10282 /* These cases are always true or always false. */
10284 inverted_p = !inverted_p;
10285 /* Fall through. */
10287 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10288 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10292 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10293 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10296 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10299 /* Used to output div or ddiv instruction DIVISION, which has the operands
10300 given by OPERANDS. Add in a divide-by-zero check if needed.
10302 When working around R4000 and R4400 errata, we need to make sure that
10303 the division is not immediately followed by a shift[1][2]. We also
10304 need to stop the division from being put into a branch delay slot[3].
10305 The easiest way to avoid both problems is to add a nop after the
10306 division. When a divide-by-zero check is needed, this nop can be
10307 used to fill the branch delay slot.
10309 [1] If a double-word or a variable shift executes immediately
10310 after starting an integer division, the shift may give an
10311 incorrect result. See quotations of errata #16 and #28 from
10312 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10313 in mips.md for details.
10315 [2] A similar bug to [1] exists for all revisions of the
10316 R4000 and the R4400 when run in an MC configuration.
10317 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10319 "19. In this following sequence:
10321 ddiv (or ddivu or div or divu)
10322 dsll32 (or dsrl32, dsra32)
10324 if an MPT stall occurs, while the divide is slipping the cpu
10325 pipeline, then the following double shift would end up with an
10328 Workaround: The compiler needs to avoid generating any
10329 sequence with divide followed by extended double shift."
10331 This erratum is also present in "MIPS R4400MC Errata, Processor
10332 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10333 & 3.0" as errata #10 and #4, respectively.
10335 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10336 (also valid for MIPS R4000MC processors):
10338 "52. R4000SC: This bug does not apply for the R4000PC.
10340 There are two flavors of this bug:
10342 1) If the instruction just after divide takes an RF exception
10343 (tlb-refill, tlb-invalid) and gets an instruction cache
10344 miss (both primary and secondary) and the line which is
10345 currently in secondary cache at this index had the first
10346 data word, where the bits 5..2 are set, then R4000 would
10347 get a wrong result for the div.
10352 ------------------- # end-of page. -tlb-refill
10357 ------------------- # end-of page. -tlb-invalid
10360 2) If the divide is in the taken branch delay slot, where the
10361 target takes RF exception and gets an I-cache miss for the
10362 exception vector or where I-cache miss occurs for the
10363 target address, under the above mentioned scenarios, the
10364 div would get wrong results.
10367 j r2 # to next page mapped or unmapped
10368 div r8,r9 # this bug would be there as long
10369 # as there is an ICache miss and
10370 nop # the "data pattern" is present
10373 beq r0, r0, NextPage # to Next page
10377 This bug is present for div, divu, ddiv, and ddivu
10380 Workaround: For item 1), OS could make sure that the next page
10381 after the divide instruction is also mapped. For item 2), the
10382 compiler could make sure that the divide instruction is not in
10383 the branch delay slot."
10385 These processors have PRId values of 0x00004220 and 0x00004300 for
10386 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10389 mips_output_division (const char *division, rtx *operands)
10394 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10396 output_asm_insn (s, operands);
10399 if (TARGET_CHECK_ZERO_DIV)
10403 output_asm_insn (s, operands);
10404 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10406 else if (GENERATE_DIVIDE_TRAPS)
10408 output_asm_insn (s, operands);
10409 s = "teq\t%2,%.,7";
10413 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10414 output_asm_insn (s, operands);
10415 s = "break\t7%)\n1:";
10421 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10422 with a final "000" replaced by "k". Ignore case.
10424 Note: this function is shared between GCC and GAS. */
10427 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10429 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10430 given++, canonical++;
10432 return ((*given == 0 && *canonical == 0)
10433 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10437 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10438 CPU name. We've traditionally allowed a lot of variation here.
10440 Note: this function is shared between GCC and GAS. */
10443 mips_matching_cpu_name_p (const char *canonical, const char *given)
10445 /* First see if the name matches exactly, or with a final "000"
10446 turned into "k". */
10447 if (mips_strict_matching_cpu_name_p (canonical, given))
10450 /* If not, try comparing based on numerical designation alone.
10451 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10452 if (TOLOWER (*given) == 'r')
10454 if (!ISDIGIT (*given))
10457 /* Skip over some well-known prefixes in the canonical name,
10458 hoping to find a number there too. */
10459 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10461 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10463 else if (TOLOWER (canonical[0]) == 'r')
10466 return mips_strict_matching_cpu_name_p (canonical, given);
10470 /* Return the mips_cpu_info entry for the processor or ISA given
10471 by CPU_STRING. Return null if the string isn't recognized.
10473 A similar function exists in GAS. */
10475 static const struct mips_cpu_info *
10476 mips_parse_cpu (const char *cpu_string)
10478 const struct mips_cpu_info *p;
10481 /* In the past, we allowed upper-case CPU names, but it doesn't
10482 work well with the multilib machinery. */
10483 for (s = cpu_string; *s != 0; s++)
10486 warning (0, "the cpu name must be lower case");
10490 /* 'from-abi' selects the most compatible architecture for the given
10491 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10492 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10493 version. Look first at the -mgp options, if given, otherwise base
10494 the choice on MASK_64BIT in TARGET_DEFAULT. */
10495 if (strcasecmp (cpu_string, "from-abi") == 0)
10496 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10497 : ABI_NEEDS_64BIT_REGS ? 3
10498 : (TARGET_64BIT ? 3 : 1));
10500 /* 'default' has traditionally been a no-op. Probably not very useful. */
10501 if (strcasecmp (cpu_string, "default") == 0)
10504 for (p = mips_cpu_info_table; p->name != 0; p++)
10505 if (mips_matching_cpu_name_p (p->name, cpu_string))
10512 /* Return the processor associated with the given ISA level, or null
10513 if the ISA isn't valid. */
10515 static const struct mips_cpu_info *
10516 mips_cpu_info_from_isa (int isa)
10518 const struct mips_cpu_info *p;
10520 for (p = mips_cpu_info_table; p->name != 0; p++)
10527 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10528 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10529 they only hold condition code modes, and CCmode is always considered to
10530 be 4 bytes wide. All other registers are word sized. */
10533 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10535 if (ST_REG_P (regno))
10536 return ((GET_MODE_SIZE (mode) + 3) / 4);
10537 else if (! FP_REG_P (regno))
10538 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10540 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10543 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10544 all BLKmode objects are returned in memory. Under the new (N32 and
10545 64-bit MIPS ABIs) small structures are returned in a register.
10546 Objects with varying size must still be returned in memory, of
10550 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
10553 return (TYPE_MODE (type) == BLKmode);
10555 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10556 || (int_size_in_bytes (type) == -1));
10560 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10562 return !TARGET_OLDABI;
10565 /* Return true if INSN is a multiply-add or multiply-subtract
10566 instruction and PREV assigns to the accumulator operand. */
10569 mips_linked_madd_p (rtx prev, rtx insn)
10573 x = single_set (insn);
10579 if (GET_CODE (x) == PLUS
10580 && GET_CODE (XEXP (x, 0)) == MULT
10581 && reg_set_p (XEXP (x, 1), prev))
10584 if (GET_CODE (x) == MINUS
10585 && GET_CODE (XEXP (x, 1)) == MULT
10586 && reg_set_p (XEXP (x, 0), prev))
10592 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10593 that may clobber hi or lo. */
10595 static rtx mips_macc_chains_last_hilo;
10597 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10598 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10601 mips_macc_chains_record (rtx insn)
10603 if (get_attr_may_clobber_hilo (insn))
10604 mips_macc_chains_last_hilo = insn;
10607 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10608 has NREADY elements, looking for a multiply-add or multiply-subtract
10609 instruction that is cumulative with mips_macc_chains_last_hilo.
10610 If there is one, promote it ahead of anything else that might
10611 clobber hi or lo. */
10614 mips_macc_chains_reorder (rtx *ready, int nready)
10618 if (mips_macc_chains_last_hilo != 0)
10619 for (i = nready - 1; i >= 0; i--)
10620 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10622 for (j = nready - 1; j > i; j--)
10623 if (recog_memoized (ready[j]) >= 0
10624 && get_attr_may_clobber_hilo (ready[j]))
10626 mips_promote_ready (ready, i, j);
10633 /* The last instruction to be scheduled. */
10635 static rtx vr4130_last_insn;
10637 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10638 points to an rtx that is initially an instruction. Nullify the rtx
10639 if the instruction uses the value of register X. */
10642 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
10644 rtx *insn_ptr = data;
10647 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10651 /* Return true if there is true register dependence between vr4130_last_insn
10655 vr4130_true_reg_dependence_p (rtx insn)
10657 note_stores (PATTERN (vr4130_last_insn),
10658 vr4130_true_reg_dependence_p_1, &insn);
10662 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10663 the ready queue and that INSN2 is the instruction after it, return
10664 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10665 in which INSN1 and INSN2 can probably issue in parallel, but for
10666 which (INSN2, INSN1) should be less sensitive to instruction
10667 alignment than (INSN1, INSN2). See 4130.md for more details. */
10670 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10674 /* Check for the following case:
10676 1) there is some other instruction X with an anti dependence on INSN1;
10677 2) X has a higher priority than INSN2; and
10678 3) X is an arithmetic instruction (and thus has no unit restrictions).
10680 If INSN1 is the last instruction blocking X, it would better to
10681 choose (INSN1, X) over (INSN2, INSN1). */
10682 FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
10683 if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
10684 && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
10685 && recog_memoized (DEP_LINK_CON (dep)) >= 0
10686 && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
10689 if (vr4130_last_insn != 0
10690 && recog_memoized (insn1) >= 0
10691 && recog_memoized (insn2) >= 0)
10693 /* See whether INSN1 and INSN2 use different execution units,
10694 or if they are both ALU-type instructions. If so, they can
10695 probably execute in parallel. */
10696 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10697 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10698 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10700 /* If only one of the instructions has a dependence on
10701 vr4130_last_insn, prefer to schedule the other one first. */
10702 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10703 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10707 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10708 is not an ALU-type instruction and if INSN1 uses the same
10709 execution unit. (Note that if this condition holds, we already
10710 know that INSN2 uses a different execution unit.) */
10711 if (class1 != VR4130_CLASS_ALU
10712 && recog_memoized (vr4130_last_insn) >= 0
10713 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10720 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10721 queue with at least two instructions. Swap the first two if
10722 vr4130_swap_insns_p says that it could be worthwhile. */
10725 vr4130_reorder (rtx *ready, int nready)
10727 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10728 mips_promote_ready (ready, nready - 2, nready - 1);
10731 /* Remove the instruction at index LOWER from ready queue READY and
10732 reinsert it in front of the instruction at index HIGHER. LOWER must
10736 mips_promote_ready (rtx *ready, int lower, int higher)
10741 new_head = ready[lower];
10742 for (i = lower; i < higher; i++)
10743 ready[i] = ready[i + 1];
10744 ready[i] = new_head;
10747 /* Implement TARGET_SCHED_REORDER. */
10750 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10751 rtx *ready, int *nreadyp, int cycle)
10753 if (!reload_completed && TUNE_MACC_CHAINS)
10756 mips_macc_chains_last_hilo = 0;
10758 mips_macc_chains_reorder (ready, *nreadyp);
10760 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
10763 vr4130_last_insn = 0;
10765 vr4130_reorder (ready, *nreadyp);
10767 return mips_issue_rate ();
10770 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10773 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10774 rtx insn, int more)
10776 switch (GET_CODE (PATTERN (insn)))
10780 /* Don't count USEs and CLOBBERs against the issue rate. */
10785 if (!reload_completed && TUNE_MACC_CHAINS)
10786 mips_macc_chains_record (insn);
10787 vr4130_last_insn = insn;
10793 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10794 dependencies have no cost. */
10797 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10798 rtx dep ATTRIBUTE_UNUSED, int cost)
10800 if (REG_NOTE_KIND (link) != 0)
10805 /* Return the number of instructions that can be issued per cycle. */
10808 mips_issue_rate (void)
10812 case PROCESSOR_74KC:
10813 case PROCESSOR_74KF2_1:
10814 case PROCESSOR_74KF1_1:
10815 case PROCESSOR_74KF3_2:
10816 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10817 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10818 but in reality only a maximum of 3 insns can be issued as the
10819 floating point load/stores also require a slot in the AGEN pipe. */
10822 case PROCESSOR_R4130:
10823 case PROCESSOR_R5400:
10824 case PROCESSOR_R5500:
10825 case PROCESSOR_R7000:
10826 case PROCESSOR_R9000:
10829 case PROCESSOR_SB1:
10830 case PROCESSOR_SB1A:
10831 /* This is actually 4, but we get better performance if we claim 3.
10832 This is partly because of unwanted speculative code motion with the
10833 larger number, and partly because in most common cases we can't
10834 reach the theoretical max of 4. */
10842 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10843 be as wide as the scheduling freedom in the DFA. */
10846 mips_multipass_dfa_lookahead (void)
10848 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10855 /* Implements a store data bypass check. We need this because the cprestore
10856 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
10857 default routine to abort. We just return false for that case. */
10858 /* ??? Should try to give a better result here than assuming false. */
10861 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10863 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10866 return ! store_data_bypass_p (out_insn, in_insn);
10869 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10870 return the first operand of the associated "pref" or "prefx" insn. */
10873 mips_prefetch_cookie (rtx write, rtx locality)
10875 /* store_streamed / load_streamed. */
10876 if (INTVAL (locality) <= 0)
10877 return GEN_INT (INTVAL (write) + 4);
10879 /* store / load. */
10880 if (INTVAL (locality) <= 2)
10883 /* store_retained / load_retained. */
10884 return GEN_INT (INTVAL (write) + 6);
10887 /* MIPS builtin function support. */
10889 struct builtin_description
10891 /* The code of the main .md file instruction. See mips_builtin_type
10892 for more information. */
10893 enum insn_code icode;
10895 /* The floating-point comparison code to use with ICODE, if any. */
10896 enum mips_fp_condition cond;
10898 /* The name of the builtin function. */
10901 /* Specifies how the function should be expanded. */
10902 enum mips_builtin_type builtin_type;
10904 /* The function's prototype. */
10905 enum mips_function_type function_type;
10907 /* The target flags required for this function. */
10911 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
10912 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
10913 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10914 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10915 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10917 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10919 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
10920 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
10921 "__builtin_mips_" #INSN "_" #COND "_s", \
10922 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
10923 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
10924 "__builtin_mips_" #INSN "_" #COND "_d", \
10925 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10927 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10928 The lower and upper forms require TARGET_FLAGS while the any and all
10929 forms require MASK_MIPS3D. */
10930 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
10931 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10932 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
10933 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10934 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10935 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10936 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10937 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10938 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10939 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10940 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10941 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10942 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10944 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10945 require MASK_MIPS3D. */
10946 #define CMP_4S_BUILTINS(INSN, COND) \
10947 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10948 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10949 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10951 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10952 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10953 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10956 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10957 instruction requires TARGET_FLAGS. */
10958 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10959 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10960 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10961 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10963 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10964 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10965 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10968 /* Define all the builtins related to c.cond.fmt condition COND. */
10969 #define CMP_BUILTINS(COND) \
10970 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10971 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10972 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10973 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10974 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10975 CMP_4S_BUILTINS (c, COND), \
10976 CMP_4S_BUILTINS (cabs, COND)
10978 static const struct builtin_description mips_bdesc[] =
10980 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10981 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10982 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10983 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10984 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10985 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10986 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10987 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10989 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10990 MASK_PAIRED_SINGLE_FLOAT),
10991 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10992 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10993 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10994 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10996 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10997 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10998 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10999 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11000 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11001 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11003 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11004 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11005 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11006 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11007 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11008 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11010 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11013 /* Builtin functions for the SB-1 processor. */
11015 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11017 static const struct builtin_description sb1_bdesc[] =
11019 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11022 /* Builtin functions for DSP ASE. */
11024 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11025 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11026 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11027 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11028 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11030 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11031 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11032 builtin_description fields. */
11033 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11034 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11035 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11037 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11038 branch instruction. TARGET_FLAGS is a builtin_description field. */
11039 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11040 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11041 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11043 static const struct builtin_description dsp_bdesc[] =
11045 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11046 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11047 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11048 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11049 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11050 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11051 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11052 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11053 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11054 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11055 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11056 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11057 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11058 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11059 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11060 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11061 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11062 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11063 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11064 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11065 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11066 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11067 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11068 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11069 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11070 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11071 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11072 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11073 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11074 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11075 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11076 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11077 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11078 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11079 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11080 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11081 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11082 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11083 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11084 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11085 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11086 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11087 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11088 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11089 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11090 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11091 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11092 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11093 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11094 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11095 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11096 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11097 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11098 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11099 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11100 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11101 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11102 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11103 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11104 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11105 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11106 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11107 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11108 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11109 BPOSGE_BUILTIN (32, MASK_DSP),
11111 /* The following are for the MIPS DSP ASE REV 2. */
11112 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11113 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11114 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11115 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11116 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11117 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11118 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11119 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11120 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11121 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11122 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11123 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11124 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11125 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11126 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11127 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11128 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11129 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11130 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11131 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11132 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11133 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11134 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11135 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11136 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11137 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11138 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11139 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11140 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11141 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11142 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11143 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11144 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11145 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11148 static const struct builtin_description dsp_32only_bdesc[] =
11150 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11151 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11152 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11153 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11154 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11155 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11156 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11157 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11158 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11159 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11160 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11161 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11162 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11163 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11164 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11165 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11166 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11167 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11168 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11169 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11170 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11172 /* The following are for the MIPS DSP ASE REV 2. */
11173 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11174 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11175 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11176 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11177 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11178 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11179 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11180 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11181 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11182 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11183 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11184 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11185 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11186 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11187 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11190 /* This helps provide a mapping from builtin function codes to bdesc
11195 /* The builtin function table that this entry describes. */
11196 const struct builtin_description *bdesc;
11198 /* The number of entries in the builtin function table. */
11201 /* The target processor that supports these builtin functions.
11202 PROCESSOR_MAX means we enable them for all processors. */
11203 enum processor_type proc;
11205 /* If the target has these flags, this builtin function table
11206 will not be supported. */
11207 int unsupported_target_flags;
11210 static const struct bdesc_map bdesc_arrays[] =
11212 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11213 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11214 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11215 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11219 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11220 suitable for input operand OP of instruction ICODE. Return the value. */
11223 mips_prepare_builtin_arg (enum insn_code icode,
11224 unsigned int op, tree exp, unsigned int argnum)
11227 enum machine_mode mode;
11229 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11230 mode = insn_data[icode].operand[op].mode;
11231 if (!insn_data[icode].operand[op].predicate (value, mode))
11233 value = copy_to_mode_reg (mode, value);
11234 /* Check the predicate again. */
11235 if (!insn_data[icode].operand[op].predicate (value, mode))
11237 error ("invalid argument to builtin function");
11245 /* Return an rtx suitable for output operand OP of instruction ICODE.
11246 If TARGET is non-null, try to use it where possible. */
11249 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11251 enum machine_mode mode;
11253 mode = insn_data[icode].operand[op].mode;
11254 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11255 target = gen_reg_rtx (mode);
11260 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11263 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11264 enum machine_mode mode ATTRIBUTE_UNUSED,
11265 int ignore ATTRIBUTE_UNUSED)
11267 enum insn_code icode;
11268 enum mips_builtin_type type;
11270 unsigned int fcode;
11271 const struct builtin_description *bdesc;
11272 const struct bdesc_map *m;
11274 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11275 fcode = DECL_FUNCTION_CODE (fndecl);
11278 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11280 if (fcode < m->size)
11283 icode = bdesc[fcode].icode;
11284 type = bdesc[fcode].builtin_type;
11294 case MIPS_BUILTIN_DIRECT:
11295 return mips_expand_builtin_direct (icode, target, exp, true);
11297 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11298 return mips_expand_builtin_direct (icode, target, exp, false);
11300 case MIPS_BUILTIN_MOVT:
11301 case MIPS_BUILTIN_MOVF:
11302 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11305 case MIPS_BUILTIN_CMP_ANY:
11306 case MIPS_BUILTIN_CMP_ALL:
11307 case MIPS_BUILTIN_CMP_UPPER:
11308 case MIPS_BUILTIN_CMP_LOWER:
11309 case MIPS_BUILTIN_CMP_SINGLE:
11310 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11313 case MIPS_BUILTIN_BPOSGE32:
11314 return mips_expand_builtin_bposge (type, target);
11321 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11324 mips_init_builtins (void)
11326 const struct builtin_description *d;
11327 const struct bdesc_map *m;
11328 tree types[(int) MIPS_MAX_FTYPE_MAX];
11329 tree V2SF_type_node;
11330 tree V2HI_type_node;
11331 tree V4QI_type_node;
11332 unsigned int offset;
11334 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11335 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11338 if (TARGET_PAIRED_SINGLE_FLOAT)
11340 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11342 types[MIPS_V2SF_FTYPE_V2SF]
11343 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11345 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11346 = build_function_type_list (V2SF_type_node,
11347 V2SF_type_node, V2SF_type_node, NULL_TREE);
11349 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11350 = build_function_type_list (V2SF_type_node,
11351 V2SF_type_node, V2SF_type_node,
11352 integer_type_node, NULL_TREE);
11354 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11355 = build_function_type_list (V2SF_type_node,
11356 V2SF_type_node, V2SF_type_node,
11357 V2SF_type_node, V2SF_type_node, NULL_TREE);
11359 types[MIPS_V2SF_FTYPE_SF_SF]
11360 = build_function_type_list (V2SF_type_node,
11361 float_type_node, float_type_node, NULL_TREE);
11363 types[MIPS_INT_FTYPE_V2SF_V2SF]
11364 = build_function_type_list (integer_type_node,
11365 V2SF_type_node, V2SF_type_node, NULL_TREE);
11367 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11368 = build_function_type_list (integer_type_node,
11369 V2SF_type_node, V2SF_type_node,
11370 V2SF_type_node, V2SF_type_node, NULL_TREE);
11372 types[MIPS_INT_FTYPE_SF_SF]
11373 = build_function_type_list (integer_type_node,
11374 float_type_node, float_type_node, NULL_TREE);
11376 types[MIPS_INT_FTYPE_DF_DF]
11377 = build_function_type_list (integer_type_node,
11378 double_type_node, double_type_node, NULL_TREE);
11380 types[MIPS_SF_FTYPE_V2SF]
11381 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11383 types[MIPS_SF_FTYPE_SF]
11384 = build_function_type_list (float_type_node,
11385 float_type_node, NULL_TREE);
11387 types[MIPS_SF_FTYPE_SF_SF]
11388 = build_function_type_list (float_type_node,
11389 float_type_node, float_type_node, NULL_TREE);
11391 types[MIPS_DF_FTYPE_DF]
11392 = build_function_type_list (double_type_node,
11393 double_type_node, NULL_TREE);
11395 types[MIPS_DF_FTYPE_DF_DF]
11396 = build_function_type_list (double_type_node,
11397 double_type_node, double_type_node, NULL_TREE);
11402 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11403 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11405 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11406 = build_function_type_list (V2HI_type_node,
11407 V2HI_type_node, V2HI_type_node,
11410 types[MIPS_SI_FTYPE_SI_SI]
11411 = build_function_type_list (intSI_type_node,
11412 intSI_type_node, intSI_type_node,
11415 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11416 = build_function_type_list (V4QI_type_node,
11417 V4QI_type_node, V4QI_type_node,
11420 types[MIPS_SI_FTYPE_V4QI]
11421 = build_function_type_list (intSI_type_node,
11425 types[MIPS_V2HI_FTYPE_V2HI]
11426 = build_function_type_list (V2HI_type_node,
11430 types[MIPS_SI_FTYPE_SI]
11431 = build_function_type_list (intSI_type_node,
11435 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11436 = build_function_type_list (V4QI_type_node,
11437 V2HI_type_node, V2HI_type_node,
11440 types[MIPS_V2HI_FTYPE_SI_SI]
11441 = build_function_type_list (V2HI_type_node,
11442 intSI_type_node, intSI_type_node,
11445 types[MIPS_SI_FTYPE_V2HI]
11446 = build_function_type_list (intSI_type_node,
11450 types[MIPS_V2HI_FTYPE_V4QI]
11451 = build_function_type_list (V2HI_type_node,
11455 types[MIPS_V4QI_FTYPE_V4QI_SI]
11456 = build_function_type_list (V4QI_type_node,
11457 V4QI_type_node, intSI_type_node,
11460 types[MIPS_V2HI_FTYPE_V2HI_SI]
11461 = build_function_type_list (V2HI_type_node,
11462 V2HI_type_node, intSI_type_node,
11465 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11466 = build_function_type_list (V2HI_type_node,
11467 V4QI_type_node, V2HI_type_node,
11470 types[MIPS_SI_FTYPE_V2HI_V2HI]
11471 = build_function_type_list (intSI_type_node,
11472 V2HI_type_node, V2HI_type_node,
11475 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11476 = build_function_type_list (intDI_type_node,
11477 intDI_type_node, V4QI_type_node, V4QI_type_node,
11480 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11481 = build_function_type_list (intDI_type_node,
11482 intDI_type_node, V2HI_type_node, V2HI_type_node,
11485 types[MIPS_DI_FTYPE_DI_SI_SI]
11486 = build_function_type_list (intDI_type_node,
11487 intDI_type_node, intSI_type_node, intSI_type_node,
11490 types[MIPS_V4QI_FTYPE_SI]
11491 = build_function_type_list (V4QI_type_node,
11495 types[MIPS_V2HI_FTYPE_SI]
11496 = build_function_type_list (V2HI_type_node,
11500 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11501 = build_function_type_list (void_type_node,
11502 V4QI_type_node, V4QI_type_node,
11505 types[MIPS_SI_FTYPE_V4QI_V4QI]
11506 = build_function_type_list (intSI_type_node,
11507 V4QI_type_node, V4QI_type_node,
11510 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11511 = build_function_type_list (void_type_node,
11512 V2HI_type_node, V2HI_type_node,
11515 types[MIPS_SI_FTYPE_DI_SI]
11516 = build_function_type_list (intSI_type_node,
11517 intDI_type_node, intSI_type_node,
11520 types[MIPS_DI_FTYPE_DI_SI]
11521 = build_function_type_list (intDI_type_node,
11522 intDI_type_node, intSI_type_node,
11525 types[MIPS_VOID_FTYPE_SI_SI]
11526 = build_function_type_list (void_type_node,
11527 intSI_type_node, intSI_type_node,
11530 types[MIPS_SI_FTYPE_PTR_SI]
11531 = build_function_type_list (intSI_type_node,
11532 ptr_type_node, intSI_type_node,
11535 types[MIPS_SI_FTYPE_VOID]
11536 = build_function_type (intSI_type_node, void_list_node);
11540 types[MIPS_V4QI_FTYPE_V4QI]
11541 = build_function_type_list (V4QI_type_node,
11545 types[MIPS_SI_FTYPE_SI_SI_SI]
11546 = build_function_type_list (intSI_type_node,
11547 intSI_type_node, intSI_type_node,
11548 intSI_type_node, NULL_TREE);
11550 types[MIPS_DI_FTYPE_DI_USI_USI]
11551 = build_function_type_list (intDI_type_node,
11553 unsigned_intSI_type_node,
11554 unsigned_intSI_type_node, NULL_TREE);
11556 types[MIPS_DI_FTYPE_SI_SI]
11557 = build_function_type_list (intDI_type_node,
11558 intSI_type_node, intSI_type_node,
11561 types[MIPS_DI_FTYPE_USI_USI]
11562 = build_function_type_list (intDI_type_node,
11563 unsigned_intSI_type_node,
11564 unsigned_intSI_type_node, NULL_TREE);
11566 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11567 = build_function_type_list (V2HI_type_node,
11568 intSI_type_node, intSI_type_node,
11569 intSI_type_node, NULL_TREE);
11574 /* Iterate through all of the bdesc arrays, initializing all of the
11575 builtin functions. */
11578 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11580 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11581 && (m->unsupported_target_flags & target_flags) == 0)
11582 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11583 if ((d->target_flags & target_flags) == d->target_flags)
11584 add_builtin_function (d->name, types[d->function_type],
11585 d - m->bdesc + offset,
11586 BUILT_IN_MD, NULL, NULL);
11591 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11592 .md pattern and CALL is the function expr with arguments. TARGET,
11593 if nonnull, suggests a good place to put the result.
11594 HAS_TARGET indicates the function must return something. */
11597 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11600 rtx ops[MAX_RECOG_OPERANDS];
11606 /* We save target to ops[0]. */
11607 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11611 /* We need to test if the arglist is not zero. Some instructions have extra
11612 clobber registers. */
11613 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11614 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11619 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11623 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11627 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11631 gcc_unreachable ();
11636 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
11637 function (TYPE says which). EXP is the tree for the function
11638 function, ICODE is the instruction that should be used to compare
11639 the first two arguments, and COND is the condition it should test.
11640 TARGET, if nonnull, suggests a good place to put the result. */
11643 mips_expand_builtin_movtf (enum mips_builtin_type type,
11644 enum insn_code icode, enum mips_fp_condition cond,
11645 rtx target, tree exp)
11647 rtx cmp_result, op0, op1;
11649 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11650 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11651 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11652 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11654 icode = CODE_FOR_mips_cond_move_tf_ps;
11655 target = mips_prepare_builtin_target (icode, 0, target);
11656 if (type == MIPS_BUILTIN_MOVT)
11658 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11659 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11663 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11664 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11666 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11670 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11671 into TARGET otherwise. Return TARGET. */
11674 mips_builtin_branch_and_move (rtx condition, rtx target,
11675 rtx value_if_true, rtx value_if_false)
11677 rtx true_label, done_label;
11679 true_label = gen_label_rtx ();
11680 done_label = gen_label_rtx ();
11682 /* First assume that CONDITION is false. */
11683 emit_move_insn (target, value_if_false);
11685 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11686 emit_jump_insn (gen_condjump (condition, true_label));
11687 emit_jump_insn (gen_jump (done_label));
11690 /* Fix TARGET if CONDITION is true. */
11691 emit_label (true_label);
11692 emit_move_insn (target, value_if_true);
11694 emit_label (done_label);
11698 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
11699 of the comparison instruction and COND is the condition it should test.
11700 EXP is the function call and arguments and TARGET, if nonnull,
11701 suggests a good place to put the boolean result. */
11704 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11705 enum insn_code icode, enum mips_fp_condition cond,
11706 rtx target, tree exp)
11708 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
11712 if (target == 0 || GET_MODE (target) != SImode)
11713 target = gen_reg_rtx (SImode);
11715 /* Prepare the operands to the comparison. */
11716 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11717 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
11718 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11720 switch (insn_data[icode].n_operands)
11723 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
11727 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
11728 ops[3], ops[4], GEN_INT (cond)));
11732 gcc_unreachable ();
11735 /* If the comparison sets more than one register, we define the result
11736 to be 0 if all registers are false and -1 if all registers are true.
11737 The value of the complete result is indeterminate otherwise. */
11738 switch (builtin_type)
11740 case MIPS_BUILTIN_CMP_ALL:
11741 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11742 return mips_builtin_branch_and_move (condition, target,
11743 const0_rtx, const1_rtx);
11745 case MIPS_BUILTIN_CMP_UPPER:
11746 case MIPS_BUILTIN_CMP_LOWER:
11747 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11748 condition = gen_single_cc (cmp_result, offset);
11749 return mips_builtin_branch_and_move (condition, target,
11750 const1_rtx, const0_rtx);
11753 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11754 return mips_builtin_branch_and_move (condition, target,
11755 const1_rtx, const0_rtx);
11759 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
11760 suggests a good place to put the boolean result. */
11763 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11765 rtx condition, cmp_result;
11768 if (target == 0 || GET_MODE (target) != SImode)
11769 target = gen_reg_rtx (SImode);
11771 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11773 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11778 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11779 return mips_builtin_branch_and_move (condition, target,
11780 const1_rtx, const0_rtx);
11783 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
11784 FIRST is true if this is the first time handling this decl. */
11787 mips_encode_section_info (tree decl, rtx rtl, int first)
11789 default_encode_section_info (decl, rtl, first);
11791 if (TREE_CODE (decl) == FUNCTION_DECL)
11793 rtx symbol = XEXP (rtl, 0);
11795 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
11796 || mips_far_type_p (TREE_TYPE (decl)))
11797 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
11801 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
11802 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
11805 mips_extra_live_on_entry (bitmap regs)
11807 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
11808 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
11811 /* SImode values are represented as sign-extended to DImode. */
11814 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11816 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11817 return SIGN_EXTEND;
11822 #include "gt-mips.h"