1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save and restore registers.
78 The value for normal-mode code must be a SMALL_OPERAND and must
79 preserve the maximum stack alignment. We therefore use a value
80 of 0x7ff0 in this case.
82 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
83 up to 0x7f8 bytes and can usually save or restore all the registers
84 that we need to save or restore. (Note that we can only use these
85 instructions for o32, for which the stack alignment is 8 bytes.)
87 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
88 RESTORE are not available. We can then use unextended instructions
89 to save and restore registers, and to allocate and deallocate the top
91 #define MIPS_MAX_FIRST_STACK_STEP \
92 (!TARGET_MIPS16 ? 0x7ff0 \
93 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
94 : TARGET_64BIT ? 0x100 : 0x400)
96 /* True if INSN is a mips.md pattern or asm statement. */
97 #define USEFUL_INSN_P(INSN) \
99 && GET_CODE (PATTERN (INSN)) != USE \
100 && GET_CODE (PATTERN (INSN)) != CLOBBER \
101 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
102 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104 /* If INSN is a delayed branch sequence, return the first instruction
105 in the sequence, otherwise return INSN itself. */
106 #define SEQ_BEGIN(INSN) \
107 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
108 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 /* Likewise for the last instruction in a delayed branch sequence. */
112 #define SEQ_END(INSN) \
113 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
114 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 /* Execute the following loop body with SUBINSN set to each instruction
118 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
119 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
120 for ((SUBINSN) = SEQ_BEGIN (INSN); \
121 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
122 (SUBINSN) = NEXT_INSN (SUBINSN))
124 /* True if bit BIT is set in VALUE. */
125 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127 /* Classifies an address.
130 A natural register + offset address. The register satisfies
131 mips_valid_base_register_p and the offset is a const_arith_operand.
134 A LO_SUM rtx. The first operand is a valid base register and
135 the second operand is a symbolic address.
138 A signed 16-bit constant address.
141 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
142 enum mips_address_type {
149 /* Classifies the prototype of a builtin function. */
150 enum mips_function_type
152 MIPS_V2SF_FTYPE_V2SF,
153 MIPS_V2SF_FTYPE_V2SF_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
156 MIPS_V2SF_FTYPE_SF_SF,
157 MIPS_INT_FTYPE_V2SF_V2SF,
158 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
159 MIPS_INT_FTYPE_SF_SF,
160 MIPS_INT_FTYPE_DF_DF,
167 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI_SI,
170 MIPS_DI_FTYPE_DI_V2HI_V2HI,
171 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_PTR_SI,
177 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI_V4QI,
182 MIPS_V2HI_FTYPE_SI_SI,
183 MIPS_V2HI_FTYPE_V2HI,
184 MIPS_V2HI_FTYPE_V2HI_SI,
185 MIPS_V2HI_FTYPE_V2HI_V2HI,
186 MIPS_V2HI_FTYPE_V4QI,
187 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_V2HI_V2HI,
190 MIPS_V4QI_FTYPE_V4QI_SI,
191 MIPS_V4QI_FTYPE_V4QI_V4QI,
192 MIPS_VOID_FTYPE_SI_SI,
193 MIPS_VOID_FTYPE_V2HI_V2HI,
194 MIPS_VOID_FTYPE_V4QI_V4QI,
196 /* For MIPS DSP REV 2 ASE. */
197 MIPS_V4QI_FTYPE_V4QI,
198 MIPS_SI_FTYPE_SI_SI_SI,
199 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_USI_USI,
202 MIPS_V2HI_FTYPE_SI_SI_SI,
208 /* Specifies how a builtin function should be converted into rtl. */
209 enum mips_builtin_type
211 /* The builtin corresponds directly to an .md pattern. The return
212 value is mapped to operand 0 and the arguments are mapped to
213 operands 1 and above. */
216 /* The builtin corresponds directly to an .md pattern. There is no return
217 value and the arguments are mapped to operands 0 and above. */
218 MIPS_BUILTIN_DIRECT_NO_TARGET,
220 /* The builtin corresponds to a comparison instruction followed by
221 a mips_cond_move_tf_ps pattern. The first two arguments are the
222 values to compare and the second two arguments are the vector
223 operands for the movt.ps or movf.ps instruction (in assembly order). */
227 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
228 of this instruction is the result of the comparison, which has mode
229 CCV2 or CCV4. The function arguments are mapped to operands 1 and
230 above. The function's return value is an SImode boolean that is
231 true under the following conditions:
233 MIPS_BUILTIN_CMP_ANY: one of the registers is true
234 MIPS_BUILTIN_CMP_ALL: all of the registers are true
235 MIPS_BUILTIN_CMP_LOWER: the first register is true
236 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
237 MIPS_BUILTIN_CMP_ANY,
238 MIPS_BUILTIN_CMP_ALL,
239 MIPS_BUILTIN_CMP_UPPER,
240 MIPS_BUILTIN_CMP_LOWER,
242 /* As above, but the instruction only sets a single $fcc register. */
243 MIPS_BUILTIN_CMP_SINGLE,
245 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
246 MIPS_BUILTIN_BPOSGE32
249 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
250 #define MIPS_FP_CONDITIONS(MACRO) \
268 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
269 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
270 enum mips_fp_condition {
271 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
274 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
275 #define STRINGIFY(X) #X
276 static const char *const mips_fp_conditions[] = {
277 MIPS_FP_CONDITIONS (STRINGIFY)
280 /* A function to save or store a register. The first argument is the
281 register and the second is the stack slot. */
282 typedef void (*mips_save_restore_fn) (rtx, rtx);
284 struct mips16_constant;
285 struct mips_arg_info;
286 struct mips_address_info;
287 struct mips_integer_op;
290 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
291 static bool mips_classify_address (struct mips_address_info *, rtx,
292 enum machine_mode, int);
293 static bool mips_cannot_force_const_mem (rtx);
294 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
295 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
296 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
297 static rtx mips_force_temporary (rtx, rtx);
298 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
299 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
300 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
301 static unsigned int mips_build_lower (struct mips_integer_op *,
302 unsigned HOST_WIDE_INT);
303 static unsigned int mips_build_integer (struct mips_integer_op *,
304 unsigned HOST_WIDE_INT);
305 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
306 static int m16_check_op (rtx, int, int, int);
307 static bool mips_rtx_costs (rtx, int, int, int *);
308 static int mips_address_cost (rtx);
309 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
310 static void mips_load_call_address (rtx, rtx, int);
311 static bool mips_function_ok_for_sibcall (tree, tree);
312 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
313 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
314 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
315 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
316 tree, int, struct mips_arg_info *);
317 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
318 static void mips_set_architecture (const struct mips_cpu_info *);
319 static void mips_set_tune (const struct mips_cpu_info *);
320 static bool mips_handle_option (size_t, const char *, int);
321 static struct machine_function *mips_init_machine_status (void);
322 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
324 static void mips_file_start (void);
325 static int mips_small_data_pattern_1 (rtx *, void *);
326 static int mips_rewrite_small_data_1 (rtx *, void *);
327 static bool mips_function_has_gp_insn (void);
328 static unsigned int mips_global_pointer (void);
329 static bool mips_save_reg_p (unsigned int);
330 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
331 mips_save_restore_fn);
332 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
333 static void mips_output_cplocal (void);
334 static void mips_emit_loadgp (void);
335 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
336 static void mips_set_frame_expr (rtx);
337 static rtx mips_frame_set (rtx, rtx);
338 static void mips_save_reg (rtx, rtx);
339 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
340 static void mips_restore_reg (rtx, rtx);
341 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
342 HOST_WIDE_INT, tree);
343 static int symbolic_expression_p (rtx);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_vector_mode_supported_p (enum machine_mode);
412 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
413 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
414 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
415 static void mips_init_builtins (void);
416 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
417 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
418 enum insn_code, enum mips_fp_condition,
420 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
421 enum insn_code, enum mips_fp_condition,
423 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
424 static void mips_encode_section_info (tree, rtx, int);
425 static void mips_extra_live_on_entry (bitmap);
426 static int mips_comp_type_attributes (const_tree, const_tree);
427 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
428 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
429 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
431 /* Structure to be filled in by compute_frame_size with register
432 save masks, and offsets for the current function. */
434 struct mips_frame_info GTY(())
436 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
437 HOST_WIDE_INT var_size; /* # bytes that variables take up */
438 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
439 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
440 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
441 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
442 unsigned int mask; /* mask of saved gp registers */
443 unsigned int fmask; /* mask of saved fp registers */
444 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
445 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
446 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
447 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
448 bool initialized; /* true if frame size already calculated */
449 int num_gp; /* number of gp registers saved */
450 int num_fp; /* number of fp registers saved */
453 struct machine_function GTY(()) {
454 /* Pseudo-reg holding the value of $28 in a mips16 function which
455 refers to GP relative global variables. */
456 rtx mips16_gp_pseudo_rtx;
458 /* The number of extra stack bytes taken up by register varargs.
459 This area is allocated by the callee at the very top of the frame. */
462 /* Current frame information, calculated by compute_frame_size. */
463 struct mips_frame_info frame;
465 /* The register to use as the global pointer within this function. */
466 unsigned int global_pointer;
468 /* True if mips_adjust_insn_length should ignore an instruction's
470 bool ignore_hazard_length_p;
472 /* True if the whole function is suitable for .set noreorder and
474 bool all_noreorder_p;
476 /* True if the function is known to have an instruction that needs $gp. */
479 /* True if we have emitted an instruction to initialize
480 mips16_gp_pseudo_rtx. */
481 bool initialized_mips16_gp_pseudo_p;
484 /* Information about a single argument. */
487 /* True if the argument is passed in a floating-point register, or
488 would have been if we hadn't run out of registers. */
491 /* The number of words passed in registers, rounded up. */
492 unsigned int reg_words;
494 /* For EABI, the offset of the first register from GP_ARG_FIRST or
495 FP_ARG_FIRST. For other ABIs, the offset of the first register from
496 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
497 comment for details).
499 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
501 unsigned int reg_offset;
503 /* The number of words that must be passed on the stack, rounded up. */
504 unsigned int stack_words;
506 /* The offset from the start of the stack overflow area of the argument's
507 first stack word. Only meaningful when STACK_WORDS is nonzero. */
508 unsigned int stack_offset;
512 /* Information about an address described by mips_address_type.
518 REG is the base register and OFFSET is the constant offset.
521 REG is the register that contains the high part of the address,
522 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
523 is the type of OFFSET's symbol.
526 SYMBOL_TYPE is the type of symbol being referenced. */
528 struct mips_address_info
530 enum mips_address_type type;
533 enum mips_symbol_type symbol_type;
537 /* One stage in a constant building sequence. These sequences have
541 A = A CODE[1] VALUE[1]
542 A = A CODE[2] VALUE[2]
545 where A is an accumulator, each CODE[i] is a binary rtl operation
546 and each VALUE[i] is a constant integer. */
547 struct mips_integer_op {
549 unsigned HOST_WIDE_INT value;
553 /* The largest number of operations needed to load an integer constant.
554 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
555 When the lowest bit is clear, we can try, but reject a sequence with
556 an extra SLL at the end. */
557 #define MIPS_MAX_INTEGER_OPS 7
559 /* Information about a MIPS16e SAVE or RESTORE instruction. */
560 struct mips16e_save_restore_info {
561 /* The number of argument registers saved by a SAVE instruction.
562 0 for RESTORE instructions. */
565 /* Bit X is set if the instruction saves or restores GPR X. */
568 /* The total number of bytes to allocate. */
572 /* Global variables for machine-dependent things. */
574 /* Threshold for data being put into the small data/bss area, instead
575 of the normal data area. */
576 int mips_section_threshold = -1;
578 /* Count the number of .file directives, so that .loc is up to date. */
579 int num_source_filenames = 0;
581 /* Count the number of sdb related labels are generated (to find block
582 start and end boundaries). */
583 int sdb_label_count = 0;
585 /* Next label # for each statement for Silicon Graphics IRIS systems. */
588 /* Name of the file containing the current function. */
589 const char *current_function_file = "";
591 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
597 /* The next branch instruction is a branch likely, not branch normal. */
598 int mips_branch_likely;
600 /* The operands passed to the last cmpMM expander. */
603 /* The target cpu for code generation. */
604 enum processor_type mips_arch;
605 const struct mips_cpu_info *mips_arch_info;
607 /* The target cpu for optimization and scheduling. */
608 enum processor_type mips_tune;
609 const struct mips_cpu_info *mips_tune_info;
611 /* Which instruction set architecture to use. */
614 /* Which ABI to use. */
615 int mips_abi = MIPS_ABI_DEFAULT;
617 /* Cost information to use. */
618 const struct mips_rtx_cost_data *mips_cost;
620 /* The -mtext-loads setting. */
621 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
623 /* The architecture selected by -mipsN. */
624 static const struct mips_cpu_info *mips_isa_info;
626 /* If TRUE, we split addresses into their high and low parts in the RTL. */
627 int mips_split_addresses;
629 /* Mode used for saving/restoring general purpose registers. */
630 static enum machine_mode gpr_mode;
632 /* Array giving truth value on whether or not a given hard register
633 can support a given mode. */
634 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
636 /* List of all MIPS punctuation characters used by print_operand. */
637 char mips_print_operand_punct[256];
639 /* Map GCC register number to debugger register number. */
640 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
641 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
643 /* A copy of the original flag_delayed_branch: see override_options. */
644 static int mips_flag_delayed_branch;
646 static GTY (()) int mips_output_filename_first_time = 1;
648 /* mips_split_p[X] is true if symbols of type X can be split by
649 mips_split_symbol(). */
650 bool mips_split_p[NUM_SYMBOL_TYPES];
652 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
653 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
654 if they are matched by a special .md file pattern. */
655 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
657 /* Likewise for HIGHs. */
658 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
660 /* Map hard register number to register class */
661 const enum reg_class mips_regno_to_class[] =
663 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
664 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
665 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
666 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
667 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
668 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
669 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
670 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
671 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
672 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
673 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
674 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
679 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
680 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
681 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
682 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
683 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
684 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
685 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
686 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
691 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
692 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
693 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
694 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
699 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
700 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
701 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
702 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
707 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
708 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
709 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
712 /* Table of machine dependent attributes. */
713 const struct attribute_spec mips_attribute_table[] =
715 { "long_call", 0, 0, false, true, true, NULL },
716 { "far", 0, 0, false, true, true, NULL },
717 { "near", 0, 0, false, true, true, NULL },
718 { NULL, 0, 0, false, false, false, NULL }
721 /* A table describing all the processors gcc knows about. Names are
722 matched in the order listed. The first mention of an ISA level is
723 taken as the canonical name for that ISA.
725 To ease comparison, please keep this table in the same order as
726 gas's mips_cpu_info_table[]. Please also make sure that
727 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
728 const struct mips_cpu_info mips_cpu_info_table[] = {
729 /* Entries for generic ISAs */
730 { "mips1", PROCESSOR_R3000, 1 },
731 { "mips2", PROCESSOR_R6000, 2 },
732 { "mips3", PROCESSOR_R4000, 3 },
733 { "mips4", PROCESSOR_R8000, 4 },
734 { "mips32", PROCESSOR_4KC, 32 },
735 { "mips32r2", PROCESSOR_M4K, 33 },
736 { "mips64", PROCESSOR_5KC, 64 },
739 { "r3000", PROCESSOR_R3000, 1 },
740 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
741 { "r3900", PROCESSOR_R3900, 1 },
744 { "r6000", PROCESSOR_R6000, 2 },
747 { "r4000", PROCESSOR_R4000, 3 },
748 { "vr4100", PROCESSOR_R4100, 3 },
749 { "vr4111", PROCESSOR_R4111, 3 },
750 { "vr4120", PROCESSOR_R4120, 3 },
751 { "vr4130", PROCESSOR_R4130, 3 },
752 { "vr4300", PROCESSOR_R4300, 3 },
753 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
754 { "r4600", PROCESSOR_R4600, 3 },
755 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
756 { "r4650", PROCESSOR_R4650, 3 },
759 { "r8000", PROCESSOR_R8000, 4 },
760 { "vr5000", PROCESSOR_R5000, 4 },
761 { "vr5400", PROCESSOR_R5400, 4 },
762 { "vr5500", PROCESSOR_R5500, 4 },
763 { "rm7000", PROCESSOR_R7000, 4 },
764 { "rm9000", PROCESSOR_R9000, 4 },
767 { "4kc", PROCESSOR_4KC, 32 },
768 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
769 { "4kp", PROCESSOR_4KP, 32 },
770 { "4ksc", PROCESSOR_4KC, 32 },
772 /* MIPS32 Release 2 */
773 { "m4k", PROCESSOR_M4K, 33 },
774 { "4kec", PROCESSOR_4KC, 33 },
775 { "4kem", PROCESSOR_4KC, 33 },
776 { "4kep", PROCESSOR_4KP, 33 },
777 { "4ksd", PROCESSOR_4KC, 33 },
779 { "24kc", PROCESSOR_24KC, 33 },
780 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
781 { "24kf", PROCESSOR_24KF2_1, 33 },
782 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
783 { "24kfx", PROCESSOR_24KF1_1, 33 },
784 { "24kx", PROCESSOR_24KF1_1, 33 },
786 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
787 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
788 { "24kef", PROCESSOR_24KF2_1, 33 },
789 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
790 { "24kefx", PROCESSOR_24KF1_1, 33 },
791 { "24kex", PROCESSOR_24KF1_1, 33 },
793 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
794 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
795 { "34kf", PROCESSOR_24KF2_1, 33 },
796 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
797 { "34kfx", PROCESSOR_24KF1_1, 33 },
798 { "34kx", PROCESSOR_24KF1_1, 33 },
800 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
801 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
802 { "74kf", PROCESSOR_74KF2_1, 33 },
803 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
804 { "74kfx", PROCESSOR_74KF1_1, 33 },
805 { "74kx", PROCESSOR_74KF1_1, 33 },
806 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
809 { "5kc", PROCESSOR_5KC, 64 },
810 { "5kf", PROCESSOR_5KF, 64 },
811 { "20kc", PROCESSOR_20KC, 64 },
812 { "sb1", PROCESSOR_SB1, 64 },
813 { "sb1a", PROCESSOR_SB1A, 64 },
814 { "sr71000", PROCESSOR_SR71000, 64 },
820 /* Default costs. If these are used for a processor we should look
821 up the actual costs. */
822 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
823 COSTS_N_INSNS (7), /* fp_mult_sf */ \
824 COSTS_N_INSNS (8), /* fp_mult_df */ \
825 COSTS_N_INSNS (23), /* fp_div_sf */ \
826 COSTS_N_INSNS (36), /* fp_div_df */ \
827 COSTS_N_INSNS (10), /* int_mult_si */ \
828 COSTS_N_INSNS (10), /* int_mult_di */ \
829 COSTS_N_INSNS (69), /* int_div_si */ \
830 COSTS_N_INSNS (69), /* int_div_di */ \
831 2, /* branch_cost */ \
832 4 /* memory_latency */
834 /* Need to replace these with the costs of calling the appropriate
836 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
837 COSTS_N_INSNS (256), /* fp_mult_sf */ \
838 COSTS_N_INSNS (256), /* fp_mult_df */ \
839 COSTS_N_INSNS (256), /* fp_div_sf */ \
840 COSTS_N_INSNS (256) /* fp_div_df */
842 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
844 COSTS_N_INSNS (1), /* fp_add */
845 COSTS_N_INSNS (1), /* fp_mult_sf */
846 COSTS_N_INSNS (1), /* fp_mult_df */
847 COSTS_N_INSNS (1), /* fp_div_sf */
848 COSTS_N_INSNS (1), /* fp_div_df */
849 COSTS_N_INSNS (1), /* int_mult_si */
850 COSTS_N_INSNS (1), /* int_mult_di */
851 COSTS_N_INSNS (1), /* int_div_si */
852 COSTS_N_INSNS (1), /* int_div_di */
854 4 /* memory_latency */
857 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
860 COSTS_N_INSNS (2), /* fp_add */
861 COSTS_N_INSNS (4), /* fp_mult_sf */
862 COSTS_N_INSNS (5), /* fp_mult_df */
863 COSTS_N_INSNS (12), /* fp_div_sf */
864 COSTS_N_INSNS (19), /* fp_div_df */
865 COSTS_N_INSNS (12), /* int_mult_si */
866 COSTS_N_INSNS (12), /* int_mult_di */
867 COSTS_N_INSNS (35), /* int_div_si */
868 COSTS_N_INSNS (35), /* int_div_di */
870 4 /* memory_latency */
875 COSTS_N_INSNS (6), /* int_mult_si */
876 COSTS_N_INSNS (6), /* int_mult_di */
877 COSTS_N_INSNS (36), /* int_div_si */
878 COSTS_N_INSNS (36), /* int_div_di */
880 4 /* memory_latency */
884 COSTS_N_INSNS (36), /* int_mult_si */
885 COSTS_N_INSNS (36), /* int_mult_di */
886 COSTS_N_INSNS (37), /* int_div_si */
887 COSTS_N_INSNS (37), /* int_div_di */
889 4 /* memory_latency */
893 COSTS_N_INSNS (4), /* int_mult_si */
894 COSTS_N_INSNS (11), /* int_mult_di */
895 COSTS_N_INSNS (36), /* int_div_si */
896 COSTS_N_INSNS (68), /* int_div_di */
898 4 /* memory_latency */
901 COSTS_N_INSNS (4), /* fp_add */
902 COSTS_N_INSNS (4), /* fp_mult_sf */
903 COSTS_N_INSNS (5), /* fp_mult_df */
904 COSTS_N_INSNS (17), /* fp_div_sf */
905 COSTS_N_INSNS (32), /* fp_div_df */
906 COSTS_N_INSNS (4), /* int_mult_si */
907 COSTS_N_INSNS (11), /* int_mult_di */
908 COSTS_N_INSNS (36), /* int_div_si */
909 COSTS_N_INSNS (68), /* int_div_di */
911 4 /* memory_latency */
914 COSTS_N_INSNS (4), /* fp_add */
915 COSTS_N_INSNS (4), /* fp_mult_sf */
916 COSTS_N_INSNS (5), /* fp_mult_df */
917 COSTS_N_INSNS (17), /* fp_div_sf */
918 COSTS_N_INSNS (32), /* fp_div_df */
919 COSTS_N_INSNS (4), /* int_mult_si */
920 COSTS_N_INSNS (7), /* int_mult_di */
921 COSTS_N_INSNS (42), /* int_div_si */
922 COSTS_N_INSNS (72), /* int_div_di */
924 4 /* memory_latency */
928 COSTS_N_INSNS (5), /* int_mult_si */
929 COSTS_N_INSNS (5), /* int_mult_di */
930 COSTS_N_INSNS (41), /* int_div_si */
931 COSTS_N_INSNS (41), /* int_div_di */
933 4 /* memory_latency */
936 COSTS_N_INSNS (8), /* fp_add */
937 COSTS_N_INSNS (8), /* fp_mult_sf */
938 COSTS_N_INSNS (10), /* fp_mult_df */
939 COSTS_N_INSNS (34), /* fp_div_sf */
940 COSTS_N_INSNS (64), /* fp_div_df */
941 COSTS_N_INSNS (5), /* int_mult_si */
942 COSTS_N_INSNS (5), /* int_mult_di */
943 COSTS_N_INSNS (41), /* int_div_si */
944 COSTS_N_INSNS (41), /* int_div_di */
946 4 /* memory_latency */
949 COSTS_N_INSNS (4), /* fp_add */
950 COSTS_N_INSNS (4), /* fp_mult_sf */
951 COSTS_N_INSNS (5), /* fp_mult_df */
952 COSTS_N_INSNS (17), /* fp_div_sf */
953 COSTS_N_INSNS (32), /* fp_div_df */
954 COSTS_N_INSNS (5), /* int_mult_si */
955 COSTS_N_INSNS (5), /* int_mult_di */
956 COSTS_N_INSNS (41), /* int_div_si */
957 COSTS_N_INSNS (41), /* int_div_di */
959 4 /* memory_latency */
963 COSTS_N_INSNS (5), /* int_mult_si */
964 COSTS_N_INSNS (5), /* int_mult_di */
965 COSTS_N_INSNS (41), /* int_div_si */
966 COSTS_N_INSNS (41), /* int_div_di */
968 4 /* memory_latency */
971 COSTS_N_INSNS (8), /* fp_add */
972 COSTS_N_INSNS (8), /* fp_mult_sf */
973 COSTS_N_INSNS (10), /* fp_mult_df */
974 COSTS_N_INSNS (34), /* fp_div_sf */
975 COSTS_N_INSNS (64), /* fp_div_df */
976 COSTS_N_INSNS (5), /* int_mult_si */
977 COSTS_N_INSNS (5), /* int_mult_di */
978 COSTS_N_INSNS (41), /* int_div_si */
979 COSTS_N_INSNS (41), /* int_div_di */
981 4 /* memory_latency */
984 COSTS_N_INSNS (4), /* fp_add */
985 COSTS_N_INSNS (4), /* fp_mult_sf */
986 COSTS_N_INSNS (5), /* fp_mult_df */
987 COSTS_N_INSNS (17), /* fp_div_sf */
988 COSTS_N_INSNS (32), /* fp_div_df */
989 COSTS_N_INSNS (5), /* int_mult_si */
990 COSTS_N_INSNS (5), /* int_mult_di */
991 COSTS_N_INSNS (41), /* int_div_si */
992 COSTS_N_INSNS (41), /* int_div_di */
994 4 /* memory_latency */
997 COSTS_N_INSNS (6), /* fp_add */
998 COSTS_N_INSNS (6), /* fp_mult_sf */
999 COSTS_N_INSNS (7), /* fp_mult_df */
1000 COSTS_N_INSNS (25), /* fp_div_sf */
1001 COSTS_N_INSNS (48), /* fp_div_df */
1002 COSTS_N_INSNS (5), /* int_mult_si */
1003 COSTS_N_INSNS (5), /* int_mult_di */
1004 COSTS_N_INSNS (41), /* int_div_si */
1005 COSTS_N_INSNS (41), /* int_div_di */
1006 1, /* branch_cost */
1007 4 /* memory_latency */
1013 COSTS_N_INSNS (2), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (5), /* fp_mult_df */
1016 COSTS_N_INSNS (12), /* fp_div_sf */
1017 COSTS_N_INSNS (19), /* fp_div_df */
1018 COSTS_N_INSNS (2), /* int_mult_si */
1019 COSTS_N_INSNS (2), /* int_mult_di */
1020 COSTS_N_INSNS (35), /* int_div_si */
1021 COSTS_N_INSNS (35), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1026 COSTS_N_INSNS (3), /* fp_add */
1027 COSTS_N_INSNS (5), /* fp_mult_sf */
1028 COSTS_N_INSNS (6), /* fp_mult_df */
1029 COSTS_N_INSNS (15), /* fp_div_sf */
1030 COSTS_N_INSNS (16), /* fp_div_df */
1031 COSTS_N_INSNS (17), /* int_mult_si */
1032 COSTS_N_INSNS (17), /* int_mult_di */
1033 COSTS_N_INSNS (38), /* int_div_si */
1034 COSTS_N_INSNS (38), /* int_div_di */
1035 2, /* branch_cost */
1036 6 /* memory_latency */
1039 COSTS_N_INSNS (6), /* fp_add */
1040 COSTS_N_INSNS (7), /* fp_mult_sf */
1041 COSTS_N_INSNS (8), /* fp_mult_df */
1042 COSTS_N_INSNS (23), /* fp_div_sf */
1043 COSTS_N_INSNS (36), /* fp_div_df */
1044 COSTS_N_INSNS (10), /* int_mult_si */
1045 COSTS_N_INSNS (10), /* int_mult_di */
1046 COSTS_N_INSNS (69), /* int_div_si */
1047 COSTS_N_INSNS (69), /* int_div_di */
1048 2, /* branch_cost */
1049 6 /* memory_latency */
1061 /* The only costs that appear to be updated here are
1062 integer multiplication. */
1064 COSTS_N_INSNS (4), /* int_mult_si */
1065 COSTS_N_INSNS (6), /* int_mult_di */
1066 COSTS_N_INSNS (69), /* int_div_si */
1067 COSTS_N_INSNS (69), /* int_div_di */
1068 1, /* branch_cost */
1069 4 /* memory_latency */
1081 COSTS_N_INSNS (6), /* fp_add */
1082 COSTS_N_INSNS (4), /* fp_mult_sf */
1083 COSTS_N_INSNS (5), /* fp_mult_df */
1084 COSTS_N_INSNS (23), /* fp_div_sf */
1085 COSTS_N_INSNS (36), /* fp_div_df */
1086 COSTS_N_INSNS (5), /* int_mult_si */
1087 COSTS_N_INSNS (5), /* int_mult_di */
1088 COSTS_N_INSNS (36), /* int_div_si */
1089 COSTS_N_INSNS (36), /* int_div_di */
1090 1, /* branch_cost */
1091 4 /* memory_latency */
1094 COSTS_N_INSNS (6), /* fp_add */
1095 COSTS_N_INSNS (5), /* fp_mult_sf */
1096 COSTS_N_INSNS (6), /* fp_mult_df */
1097 COSTS_N_INSNS (30), /* fp_div_sf */
1098 COSTS_N_INSNS (59), /* fp_div_df */
1099 COSTS_N_INSNS (3), /* int_mult_si */
1100 COSTS_N_INSNS (4), /* int_mult_di */
1101 COSTS_N_INSNS (42), /* int_div_si */
1102 COSTS_N_INSNS (74), /* int_div_di */
1103 1, /* branch_cost */
1104 4 /* memory_latency */
1107 COSTS_N_INSNS (6), /* fp_add */
1108 COSTS_N_INSNS (5), /* fp_mult_sf */
1109 COSTS_N_INSNS (6), /* fp_mult_df */
1110 COSTS_N_INSNS (30), /* fp_div_sf */
1111 COSTS_N_INSNS (59), /* fp_div_df */
1112 COSTS_N_INSNS (5), /* int_mult_si */
1113 COSTS_N_INSNS (9), /* int_mult_di */
1114 COSTS_N_INSNS (42), /* int_div_si */
1115 COSTS_N_INSNS (74), /* int_div_di */
1116 1, /* branch_cost */
1117 4 /* memory_latency */
1120 /* The only costs that are changed here are
1121 integer multiplication. */
1122 COSTS_N_INSNS (6), /* fp_add */
1123 COSTS_N_INSNS (7), /* fp_mult_sf */
1124 COSTS_N_INSNS (8), /* fp_mult_df */
1125 COSTS_N_INSNS (23), /* fp_div_sf */
1126 COSTS_N_INSNS (36), /* fp_div_df */
1127 COSTS_N_INSNS (5), /* int_mult_si */
1128 COSTS_N_INSNS (9), /* int_mult_di */
1129 COSTS_N_INSNS (69), /* int_div_si */
1130 COSTS_N_INSNS (69), /* int_div_di */
1131 1, /* branch_cost */
1132 4 /* memory_latency */
1138 /* The only costs that are changed here are
1139 integer multiplication. */
1140 COSTS_N_INSNS (6), /* fp_add */
1141 COSTS_N_INSNS (7), /* fp_mult_sf */
1142 COSTS_N_INSNS (8), /* fp_mult_df */
1143 COSTS_N_INSNS (23), /* fp_div_sf */
1144 COSTS_N_INSNS (36), /* fp_div_df */
1145 COSTS_N_INSNS (3), /* int_mult_si */
1146 COSTS_N_INSNS (8), /* int_mult_di */
1147 COSTS_N_INSNS (69), /* int_div_si */
1148 COSTS_N_INSNS (69), /* int_div_di */
1149 1, /* branch_cost */
1150 4 /* memory_latency */
1153 /* These costs are the same as the SB-1A below. */
1154 COSTS_N_INSNS (4), /* fp_add */
1155 COSTS_N_INSNS (4), /* fp_mult_sf */
1156 COSTS_N_INSNS (4), /* fp_mult_df */
1157 COSTS_N_INSNS (24), /* fp_div_sf */
1158 COSTS_N_INSNS (32), /* fp_div_df */
1159 COSTS_N_INSNS (3), /* int_mult_si */
1160 COSTS_N_INSNS (4), /* int_mult_di */
1161 COSTS_N_INSNS (36), /* int_div_si */
1162 COSTS_N_INSNS (68), /* int_div_di */
1163 1, /* branch_cost */
1164 4 /* memory_latency */
1167 /* These costs are the same as the SB-1 above. */
1168 COSTS_N_INSNS (4), /* fp_add */
1169 COSTS_N_INSNS (4), /* fp_mult_sf */
1170 COSTS_N_INSNS (4), /* fp_mult_df */
1171 COSTS_N_INSNS (24), /* fp_div_sf */
1172 COSTS_N_INSNS (32), /* fp_div_df */
1173 COSTS_N_INSNS (3), /* int_mult_si */
1174 COSTS_N_INSNS (4), /* int_mult_di */
1175 COSTS_N_INSNS (36), /* int_div_si */
1176 COSTS_N_INSNS (68), /* int_div_di */
1177 1, /* branch_cost */
1178 4 /* memory_latency */
1185 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1186 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1187 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1188 static const unsigned char mips16e_s2_s8_regs[] = {
1189 30, 23, 22, 21, 20, 19, 18
1191 static const unsigned char mips16e_a0_a3_regs[] = {
1195 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1196 ordered from the uppermost in memory to the lowest in memory. */
1197 static const unsigned char mips16e_save_restore_regs[] = {
1198 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1201 /* Nonzero if -march should decide the default value of
1202 MASK_SOFT_FLOAT_ABI. */
1203 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1204 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1207 /* Initialize the GCC target structure. */
1208 #undef TARGET_ASM_ALIGNED_HI_OP
1209 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1210 #undef TARGET_ASM_ALIGNED_SI_OP
1211 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1212 #undef TARGET_ASM_ALIGNED_DI_OP
1213 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1215 #undef TARGET_ASM_FUNCTION_PROLOGUE
1216 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1217 #undef TARGET_ASM_FUNCTION_EPILOGUE
1218 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1219 #undef TARGET_ASM_SELECT_RTX_SECTION
1220 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1221 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1222 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1224 #undef TARGET_SCHED_INIT
1225 #define TARGET_SCHED_INIT mips_sched_init
1226 #undef TARGET_SCHED_REORDER
1227 #define TARGET_SCHED_REORDER mips_sched_reorder
1228 #undef TARGET_SCHED_REORDER2
1229 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1230 #undef TARGET_SCHED_VARIABLE_ISSUE
1231 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1232 #undef TARGET_SCHED_ADJUST_COST
1233 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1234 #undef TARGET_SCHED_ISSUE_RATE
1235 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1236 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1237 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1238 mips_multipass_dfa_lookahead
1240 #undef TARGET_DEFAULT_TARGET_FLAGS
1241 #define TARGET_DEFAULT_TARGET_FLAGS \
1243 | TARGET_CPU_DEFAULT \
1244 | TARGET_ENDIAN_DEFAULT \
1245 | TARGET_FP_EXCEPTIONS_DEFAULT \
1246 | MASK_CHECK_ZERO_DIV \
1248 #undef TARGET_HANDLE_OPTION
1249 #define TARGET_HANDLE_OPTION mips_handle_option
1251 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1252 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1254 #undef TARGET_VALID_POINTER_MODE
1255 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1256 #undef TARGET_RTX_COSTS
1257 #define TARGET_RTX_COSTS mips_rtx_costs
1258 #undef TARGET_ADDRESS_COST
1259 #define TARGET_ADDRESS_COST mips_address_cost
1261 #undef TARGET_IN_SMALL_DATA_P
1262 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1264 #undef TARGET_MACHINE_DEPENDENT_REORG
1265 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1267 #undef TARGET_ASM_FILE_START
1268 #define TARGET_ASM_FILE_START mips_file_start
1269 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1270 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1272 #undef TARGET_INIT_LIBFUNCS
1273 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1275 #undef TARGET_BUILD_BUILTIN_VA_LIST
1276 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1277 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1278 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1280 #undef TARGET_PROMOTE_FUNCTION_ARGS
1281 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1282 #undef TARGET_PROMOTE_FUNCTION_RETURN
1283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1284 #undef TARGET_PROMOTE_PROTOTYPES
1285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1287 #undef TARGET_RETURN_IN_MEMORY
1288 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1289 #undef TARGET_RETURN_IN_MSB
1290 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1292 #undef TARGET_ASM_OUTPUT_MI_THUNK
1293 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1294 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1295 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1297 #undef TARGET_SETUP_INCOMING_VARARGS
1298 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1299 #undef TARGET_STRICT_ARGUMENT_NAMING
1300 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1301 #undef TARGET_MUST_PASS_IN_STACK
1302 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1303 #undef TARGET_PASS_BY_REFERENCE
1304 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1305 #undef TARGET_CALLEE_COPIES
1306 #define TARGET_CALLEE_COPIES mips_callee_copies
1307 #undef TARGET_ARG_PARTIAL_BYTES
1308 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1310 #undef TARGET_MODE_REP_EXTENDED
1311 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1313 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1314 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1316 #undef TARGET_INIT_BUILTINS
1317 #define TARGET_INIT_BUILTINS mips_init_builtins
1318 #undef TARGET_EXPAND_BUILTIN
1319 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1321 #undef TARGET_HAVE_TLS
1322 #define TARGET_HAVE_TLS HAVE_AS_TLS
1324 #undef TARGET_CANNOT_FORCE_CONST_MEM
1325 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1327 #undef TARGET_ENCODE_SECTION_INFO
1328 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1330 #undef TARGET_ATTRIBUTE_TABLE
1331 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1333 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1334 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1336 #undef TARGET_MIN_ANCHOR_OFFSET
1337 #define TARGET_MIN_ANCHOR_OFFSET -32768
1338 #undef TARGET_MAX_ANCHOR_OFFSET
1339 #define TARGET_MAX_ANCHOR_OFFSET 32767
1340 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1341 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1342 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1343 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1345 #undef TARGET_COMP_TYPE_ATTRIBUTES
1346 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1348 #ifdef HAVE_AS_DTPRELWORD
1349 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1350 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1353 struct gcc_target targetm = TARGET_INITIALIZER;
1356 /* Predicates to test for presence of "near" and "far"/"long_call"
1357 attributes on the given TYPE. */
1360 mips_near_type_p (const_tree type)
1362 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1366 mips_far_type_p (const_tree type)
1368 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1369 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1373 /* Return 0 if the attributes for two types are incompatible, 1 if they
1374 are compatible, and 2 if they are nearly compatible (which causes a
1375 warning to be generated). */
1378 mips_comp_type_attributes (const_tree type1, const_tree type2)
1380 /* Check for mismatch of non-default calling convention. */
1381 if (TREE_CODE (type1) != FUNCTION_TYPE)
1384 /* Disallow mixed near/far attributes. */
1385 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1387 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1393 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1394 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1397 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1399 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1401 *base_ptr = XEXP (x, 0);
1402 *offset_ptr = INTVAL (XEXP (x, 1));
1411 /* Return true if SYMBOL_REF X is associated with a global symbol
1412 (in the STB_GLOBAL sense). */
1415 mips_global_symbol_p (const_rtx x)
1417 const_tree const decl = SYMBOL_REF_DECL (x);
1420 return !SYMBOL_REF_LOCAL_P (x);
1422 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1423 or weak symbols. Relocations in the object file will be against
1424 the target symbol, so it's that symbol's binding that matters here. */
1425 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1428 /* Return true if SYMBOL_REF X binds locally. */
1431 mips_symbol_binds_local_p (const_rtx x)
1433 return (SYMBOL_REF_DECL (x)
1434 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1435 : SYMBOL_REF_LOCAL_P (x));
1438 /* Return the method that should be used to access SYMBOL_REF or
1439 LABEL_REF X in context CONTEXT. */
1441 static enum mips_symbol_type
1442 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1445 return SYMBOL_GOT_DISP;
1447 if (GET_CODE (x) == LABEL_REF)
1449 /* LABEL_REFs are used for jump tables as well as text labels.
1450 Only return SYMBOL_PC_RELATIVE if we know the label is in
1451 the text section. */
1452 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1453 return SYMBOL_PC_RELATIVE;
1454 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1455 return SYMBOL_GOT_PAGE_OFST;
1456 return SYMBOL_ABSOLUTE;
1459 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1461 if (SYMBOL_REF_TLS_MODEL (x))
1464 if (CONSTANT_POOL_ADDRESS_P (x))
1466 if (TARGET_MIPS16_TEXT_LOADS)
1467 return SYMBOL_PC_RELATIVE;
1469 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1470 return SYMBOL_PC_RELATIVE;
1472 if (!TARGET_EMBEDDED_DATA
1473 && GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1474 return SYMBOL_GP_RELATIVE;
1477 /* Do not use small-data accesses for weak symbols; they may end up
1479 if (SYMBOL_REF_SMALL_P (x)
1480 && !SYMBOL_REF_WEAK (x))
1481 return SYMBOL_GP_RELATIVE;
1483 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1486 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1488 /* There are three cases to consider:
1490 - o32 PIC (either with or without explicit relocs)
1491 - n32/n64 PIC without explicit relocs
1492 - n32/n64 PIC with explicit relocs
1494 In the first case, both local and global accesses will use an
1495 R_MIPS_GOT16 relocation. We must correctly predict which of
1496 the two semantics (local or global) the assembler and linker
1497 will apply. The choice depends on the symbol's binding rather
1498 than its visibility.
1500 In the second case, the assembler will not use R_MIPS_GOT16
1501 relocations, but it chooses between local and global accesses
1502 in the same way as for o32 PIC.
1504 In the third case we have more freedom since both forms of
1505 access will work for any kind of symbol. However, there seems
1506 little point in doing things differently. */
1507 if (mips_global_symbol_p (x))
1508 return SYMBOL_GOT_DISP;
1510 return SYMBOL_GOT_PAGE_OFST;
1513 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1514 return SYMBOL_FORCE_TO_MEM;
1515 return SYMBOL_ABSOLUTE;
1518 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1519 is the alignment (in bytes) of SYMBOL_REF X. */
1522 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1524 /* If for some reason we can't get the alignment for the
1525 symbol, initializing this to one means we will only accept
1527 HOST_WIDE_INT align = 1;
1530 /* Get the alignment of the symbol we're referring to. */
1531 t = SYMBOL_REF_DECL (x);
1533 align = DECL_ALIGN_UNIT (t);
1535 return offset >= 0 && offset < align;
1538 /* Return true if X is a symbolic constant that can be used in context
1539 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1542 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1543 enum mips_symbol_type *symbol_type)
1547 split_const (x, &x, &offset);
1548 if (UNSPEC_ADDRESS_P (x))
1550 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1551 x = UNSPEC_ADDRESS (x);
1553 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1555 *symbol_type = mips_classify_symbol (x, context);
1556 if (*symbol_type == SYMBOL_TLS)
1562 if (offset == const0_rtx)
1565 /* Check whether a nonzero offset is valid for the underlying
1567 switch (*symbol_type)
1569 case SYMBOL_ABSOLUTE:
1570 case SYMBOL_FORCE_TO_MEM:
1571 case SYMBOL_32_HIGH:
1572 case SYMBOL_64_HIGH:
1575 /* If the target has 64-bit pointers and the object file only
1576 supports 32-bit symbols, the values of those symbols will be
1577 sign-extended. In this case we can't allow an arbitrary offset
1578 in case the 32-bit value X + OFFSET has a different sign from X. */
1579 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1580 return offset_within_block_p (x, INTVAL (offset));
1582 /* In other cases the relocations can handle any offset. */
1585 case SYMBOL_PC_RELATIVE:
1586 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1587 In this case, we no longer have access to the underlying constant,
1588 but the original symbol-based access was known to be valid. */
1589 if (GET_CODE (x) == LABEL_REF)
1594 case SYMBOL_GP_RELATIVE:
1595 /* Make sure that the offset refers to something within the
1596 same object block. This should guarantee that the final
1597 PC- or GP-relative offset is within the 16-bit limit. */
1598 return offset_within_block_p (x, INTVAL (offset));
1600 case SYMBOL_GOT_PAGE_OFST:
1601 case SYMBOL_GOTOFF_PAGE:
1602 /* If the symbol is global, the GOT entry will contain the symbol's
1603 address, and we will apply a 16-bit offset after loading it.
1604 If the symbol is local, the linker should provide enough local
1605 GOT entries for a 16-bit offset, but larger offsets may lead
1607 return SMALL_INT (offset);
1611 /* There is no carry between the HI and LO REL relocations, so the
1612 offset is only valid if we know it won't lead to such a carry. */
1613 return mips_offset_within_alignment_p (x, INTVAL (offset));
1615 case SYMBOL_GOT_DISP:
1616 case SYMBOL_GOTOFF_DISP:
1617 case SYMBOL_GOTOFF_CALL:
1618 case SYMBOL_GOTOFF_LOADGP:
1621 case SYMBOL_GOTTPREL:
1630 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1633 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1635 if (!HARD_REGISTER_NUM_P (regno))
1639 regno = reg_renumber[regno];
1642 /* These fake registers will be eliminated to either the stack or
1643 hard frame pointer, both of which are usually valid base registers.
1644 Reload deals with the cases where the eliminated form isn't valid. */
1645 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1648 /* In mips16 mode, the stack pointer can only address word and doubleword
1649 values, nothing smaller. There are two problems here:
1651 (a) Instantiating virtual registers can introduce new uses of the
1652 stack pointer. If these virtual registers are valid addresses,
1653 the stack pointer should be too.
1655 (b) Most uses of the stack pointer are not made explicit until
1656 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1657 We don't know until that stage whether we'll be eliminating to the
1658 stack pointer (which needs the restriction) or the hard frame
1659 pointer (which doesn't).
1661 All in all, it seems more consistent to only enforce this restriction
1662 during and after reload. */
1663 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1664 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1666 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1670 /* Return true if X is a valid base register for the given mode.
1671 Allow only hard registers if STRICT. */
1674 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1676 if (!strict && GET_CODE (x) == SUBREG)
1680 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1684 /* Return true if X is a valid address for machine mode MODE. If it is,
1685 fill in INFO appropriately. STRICT is true if we should only accept
1686 hard base registers. */
1689 mips_classify_address (struct mips_address_info *info, rtx x,
1690 enum machine_mode mode, int strict)
1692 switch (GET_CODE (x))
1696 info->type = ADDRESS_REG;
1698 info->offset = const0_rtx;
1699 return mips_valid_base_register_p (info->reg, mode, strict);
1702 info->type = ADDRESS_REG;
1703 info->reg = XEXP (x, 0);
1704 info->offset = XEXP (x, 1);
1705 return (mips_valid_base_register_p (info->reg, mode, strict)
1706 && const_arith_operand (info->offset, VOIDmode));
1709 info->type = ADDRESS_LO_SUM;
1710 info->reg = XEXP (x, 0);
1711 info->offset = XEXP (x, 1);
1712 return (mips_valid_base_register_p (info->reg, mode, strict)
1713 && mips_symbolic_constant_p (info->offset, SYMBOL_CONTEXT_MEM,
1715 && mips_symbol_insns (info->symbol_type, mode) > 0
1716 && mips_lo_relocs[info->symbol_type] != 0);
1719 /* Small-integer addresses don't occur very often, but they
1720 are legitimate if $0 is a valid base register. */
1721 info->type = ADDRESS_CONST_INT;
1722 return !TARGET_MIPS16 && SMALL_INT (x);
1727 info->type = ADDRESS_SYMBOLIC;
1728 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1730 && mips_symbol_insns (info->symbol_type, mode) > 0
1731 && !mips_split_p[info->symbol_type]);
1738 /* Return true if X is a thread-local symbol. */
1741 mips_tls_operand_p (rtx x)
1743 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1746 /* Return true if X can not be forced into a constant pool. */
1749 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1751 return mips_tls_operand_p (*x);
1754 /* Return true if X can not be forced into a constant pool. */
1757 mips_cannot_force_const_mem (rtx x)
1763 /* As an optimization, reject constants that mips_legitimize_move
1766 Suppose we have a multi-instruction sequence that loads constant C
1767 into register R. If R does not get allocated a hard register, and
1768 R is used in an operand that allows both registers and memory
1769 references, reload will consider forcing C into memory and using
1770 one of the instruction's memory alternatives. Returning false
1771 here will force it to use an input reload instead. */
1772 if (GET_CODE (x) == CONST_INT)
1775 split_const (x, &base, &offset);
1776 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1780 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1786 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1787 constants when we're using a per-function constant pool. */
1790 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1791 const_rtx x ATTRIBUTE_UNUSED)
1793 return !TARGET_MIPS16_PCREL_LOADS;
1796 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1797 single instruction. We rely on the fact that, in the worst case,
1798 all instructions involved in a MIPS16 address calculation are usually
1802 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1806 case SYMBOL_ABSOLUTE:
1807 /* When using 64-bit symbols, we need 5 preparatory instructions,
1810 lui $at,%highest(symbol)
1811 daddiu $at,$at,%higher(symbol)
1813 daddiu $at,$at,%hi(symbol)
1816 The final address is then $at + %lo(symbol). With 32-bit
1817 symbols we just need a preparatory lui for normal mode and
1818 a preparatory "li; sll" for MIPS16. */
1819 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1821 case SYMBOL_GP_RELATIVE:
1822 /* Treat GP-relative accesses as taking a single instruction on
1823 MIPS16 too; the copy of $gp can often be shared. */
1826 case SYMBOL_PC_RELATIVE:
1827 /* PC-relative constants can be only be used with addiupc,
1829 if (mode == MAX_MACHINE_MODE
1830 || GET_MODE_SIZE (mode) == 4
1831 || GET_MODE_SIZE (mode) == 8)
1834 /* The constant must be loaded using addiupc first. */
1837 case SYMBOL_FORCE_TO_MEM:
1838 /* The constant must be loaded from the constant pool. */
1841 case SYMBOL_GOT_DISP:
1842 /* The constant will have to be loaded from the GOT before it
1843 is used in an address. */
1844 if (mode != MAX_MACHINE_MODE)
1849 case SYMBOL_GOT_PAGE_OFST:
1850 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1851 the local/global classification is accurate. See override_options
1854 The worst cases are:
1856 (1) For local symbols when generating o32 or o64 code. The assembler
1862 ...and the final address will be $at + %lo(symbol).
1864 (2) For global symbols when -mxgot. The assembler will use:
1866 lui $at,%got_hi(symbol)
1869 ...and the final address will be $at + %got_lo(symbol). */
1872 case SYMBOL_GOTOFF_PAGE:
1873 case SYMBOL_GOTOFF_DISP:
1874 case SYMBOL_GOTOFF_CALL:
1875 case SYMBOL_GOTOFF_LOADGP:
1876 case SYMBOL_32_HIGH:
1877 case SYMBOL_64_HIGH:
1883 case SYMBOL_GOTTPREL:
1886 /* A 16-bit constant formed by a single relocation, or a 32-bit
1887 constant formed from a high 16-bit relocation and a low 16-bit
1888 relocation. Use mips_split_p to determine which. */
1889 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1892 /* We don't treat a bare TLS symbol as a constant. */
1898 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1899 to load symbols of type TYPE into a register. Return 0 if the given
1900 type of symbol cannot be used as an immediate operand.
1902 Otherwise, return the number of instructions needed to load or store
1903 values of mode MODE to or from addresses of type TYPE. Return 0 if
1904 the given type of symbol is not valid in addresses.
1906 In both cases, treat extended MIPS16 instructions as two instructions. */
1909 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1911 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1914 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1917 mips_stack_address_p (rtx x, enum machine_mode mode)
1919 struct mips_address_info addr;
1921 return (mips_classify_address (&addr, x, mode, false)
1922 && addr.type == ADDRESS_REG
1923 && addr.reg == stack_pointer_rtx);
1926 /* Return true if a value at OFFSET bytes from BASE can be accessed
1927 using an unextended mips16 instruction. MODE is the mode of the
1930 Usually the offset in an unextended instruction is a 5-bit field.
1931 The offset is unsigned and shifted left once for HIs, twice
1932 for SIs, and so on. An exception is SImode accesses off the
1933 stack pointer, which have an 8-bit immediate field. */
1936 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1939 && GET_CODE (offset) == CONST_INT
1940 && INTVAL (offset) >= 0
1941 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1943 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1944 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1945 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1951 /* Return the number of instructions needed to load or store a value
1952 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
1953 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
1954 otherwise assume that a single load or store is enough.
1956 For mips16 code, count extended instructions as two instructions. */
1959 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1961 struct mips_address_info addr;
1964 /* BLKmode is used for single unaligned loads and stores and should
1965 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
1966 meaningless, so we have to single it out as a special case one way
1968 if (mode != BLKmode && might_split_p)
1969 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1973 if (mips_classify_address (&addr, x, mode, false))
1978 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1982 case ADDRESS_LO_SUM:
1983 return (TARGET_MIPS16 ? factor * 2 : factor);
1985 case ADDRESS_CONST_INT:
1988 case ADDRESS_SYMBOLIC:
1989 return factor * mips_symbol_insns (addr.symbol_type, mode);
1995 /* Likewise for constant X. */
1998 mips_const_insns (rtx x)
2000 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2001 enum mips_symbol_type symbol_type;
2004 switch (GET_CODE (x))
2007 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2009 || !mips_split_p[symbol_type])
2012 /* This is simply an lui for normal mode. It is an extended
2013 "li" followed by an extended "sll" for MIPS16. */
2014 return TARGET_MIPS16 ? 4 : 1;
2018 /* Unsigned 8-bit constants can be loaded using an unextended
2019 LI instruction. Unsigned 16-bit constants can be loaded
2020 using an extended LI. Negative constants must be loaded
2021 using LI and then negated. */
2022 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2023 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2024 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2025 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2028 return mips_build_integer (codes, INTVAL (x));
2032 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2038 /* See if we can refer to X directly. */
2039 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2040 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2042 /* Otherwise try splitting the constant into a base and offset.
2043 16-bit offsets can be added using an extra addiu. Larger offsets
2044 must be calculated separately and then added to the base. */
2045 split_const (x, &x, &offset);
2048 int n = mips_const_insns (x);
2051 if (SMALL_INT (offset))
2054 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2061 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2070 /* Return the number of instructions needed to implement INSN,
2071 given that it loads from or stores to MEM. Count extended
2072 mips16 instructions as two instructions. */
2075 mips_load_store_insns (rtx mem, rtx insn)
2077 enum machine_mode mode;
2081 gcc_assert (MEM_P (mem));
2082 mode = GET_MODE (mem);
2084 /* Try to prove that INSN does not need to be split. */
2085 might_split_p = true;
2086 if (GET_MODE_BITSIZE (mode) == 64)
2088 set = single_set (insn);
2089 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2090 might_split_p = false;
2093 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2097 /* Return the number of instructions needed for an integer division. */
2100 mips_idiv_insns (void)
2105 if (TARGET_CHECK_ZERO_DIV)
2107 if (GENERATE_DIVIDE_TRAPS)
2113 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2118 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2119 returns a nonzero value if X is a legitimate address for a memory
2120 operand of the indicated MODE. STRICT is nonzero if this function
2121 is called during reload. */
2124 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2126 struct mips_address_info addr;
2128 return mips_classify_address (&addr, x, mode, strict);
2131 /* Emit a move from SRC to DEST. Assume that the move expanders can
2132 handle all moves if !can_create_pseudo_p (). The distinction is
2133 important because, unlike emit_move_insn, the move expanders know
2134 how to force Pmode objects into the constant pool even when the
2135 constant pool address is not itself legitimate. */
2138 mips_emit_move (rtx dest, rtx src)
2140 return (can_create_pseudo_p ()
2141 ? emit_move_insn (dest, src)
2142 : emit_move_insn_1 (dest, src));
2145 /* Copy VALUE to a register and return that register. If new psuedos
2146 are allowed, copy it into a new register, otherwise use DEST. */
2149 mips_force_temporary (rtx dest, rtx value)
2151 if (can_create_pseudo_p ())
2152 return force_reg (Pmode, value);
2155 mips_emit_move (copy_rtx (dest), value);
2161 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2162 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2163 constant in that context and can be split into a high part and a LO_SUM.
2164 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2165 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2167 TEMP is as for mips_force_temporary and is used to load the high
2168 part into a register. */
2171 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2173 enum mips_symbol_context context;
2174 enum mips_symbol_type symbol_type;
2177 context = (mode == MAX_MACHINE_MODE
2178 ? SYMBOL_CONTEXT_LEA
2179 : SYMBOL_CONTEXT_MEM);
2180 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2181 || mips_symbol_insns (symbol_type, mode) == 0
2182 || !mips_split_p[symbol_type])
2187 if (symbol_type == SYMBOL_GP_RELATIVE)
2189 if (!can_create_pseudo_p ())
2191 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2195 high = mips16_gp_pseudo_reg ();
2199 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2200 high = mips_force_temporary (temp, high);
2202 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2208 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2209 and add CONST_INT OFFSET to the result. */
2212 mips_unspec_address_offset (rtx base, rtx offset,
2213 enum mips_symbol_type symbol_type)
2215 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2216 UNSPEC_ADDRESS_FIRST + symbol_type);
2217 if (offset != const0_rtx)
2218 base = gen_rtx_PLUS (Pmode, base, offset);
2219 return gen_rtx_CONST (Pmode, base);
2222 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2223 type SYMBOL_TYPE. */
2226 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2230 split_const (address, &base, &offset);
2231 return mips_unspec_address_offset (base, offset, symbol_type);
2235 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2236 high part to BASE and return the result. Just return BASE otherwise.
2237 TEMP is available as a temporary register if needed.
2239 The returned expression can be used as the first operand to a LO_SUM. */
2242 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2243 enum mips_symbol_type symbol_type)
2245 if (mips_split_p[symbol_type])
2247 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2248 addr = mips_force_temporary (temp, addr);
2249 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2255 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2256 mips_force_temporary; it is only needed when OFFSET is not a
2260 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2262 if (!SMALL_OPERAND (offset))
2267 /* Load the full offset into a register so that we can use
2268 an unextended instruction for the address itself. */
2269 high = GEN_INT (offset);
2274 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2275 high = GEN_INT (CONST_HIGH_PART (offset));
2276 offset = CONST_LOW_PART (offset);
2278 high = mips_force_temporary (temp, high);
2279 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2281 return plus_constant (reg, offset);
2284 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2285 referencing, and TYPE is the symbol type to use (either global
2286 dynamic or local dynamic). V0 is an RTX for the return value
2287 location. The entire insn sequence is returned. */
2289 static GTY(()) rtx mips_tls_symbol;
2292 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2294 rtx insn, loc, tga, a0;
2296 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2298 if (!mips_tls_symbol)
2299 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2301 loc = mips_unspec_address (sym, type);
2305 emit_insn (gen_rtx_SET (Pmode, a0,
2306 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2307 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2308 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2309 CONST_OR_PURE_CALL_P (insn) = 1;
2310 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2311 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2312 insn = get_insns ();
2319 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2320 return value will be a valid address and move_operand (either a REG
2324 mips_legitimize_tls_address (rtx loc)
2326 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2327 enum tls_model model;
2331 sorry ("MIPS16 TLS");
2332 return gen_reg_rtx (Pmode);
2335 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2336 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2338 model = SYMBOL_REF_TLS_MODEL (loc);
2339 /* Only TARGET_ABICALLS code can have more than one module; other
2340 code must be be static and should not use a GOT. All TLS models
2341 reduce to local exec in this situation. */
2342 if (!TARGET_ABICALLS)
2343 model = TLS_MODEL_LOCAL_EXEC;
2347 case TLS_MODEL_GLOBAL_DYNAMIC:
2348 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2349 dest = gen_reg_rtx (Pmode);
2350 emit_libcall_block (insn, dest, v0, loc);
2353 case TLS_MODEL_LOCAL_DYNAMIC:
2354 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2355 tmp1 = gen_reg_rtx (Pmode);
2357 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2358 share the LDM result with other LD model accesses. */
2359 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2361 emit_libcall_block (insn, tmp1, v0, eqv);
2363 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2364 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2365 mips_unspec_address (loc, SYMBOL_DTPREL));
2368 case TLS_MODEL_INITIAL_EXEC:
2369 tmp1 = gen_reg_rtx (Pmode);
2370 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2371 if (Pmode == DImode)
2373 emit_insn (gen_tls_get_tp_di (v1));
2374 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2378 emit_insn (gen_tls_get_tp_si (v1));
2379 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2381 dest = gen_reg_rtx (Pmode);
2382 emit_insn (gen_add3_insn (dest, tmp1, v1));
2385 case TLS_MODEL_LOCAL_EXEC:
2386 if (Pmode == DImode)
2387 emit_insn (gen_tls_get_tp_di (v1));
2389 emit_insn (gen_tls_get_tp_si (v1));
2391 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2392 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2393 mips_unspec_address (loc, SYMBOL_TPREL));
2403 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2404 be legitimized in a way that the generic machinery might not expect,
2405 put the new address in *XLOC and return true. MODE is the mode of
2406 the memory being accessed. */
2409 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2411 if (mips_tls_operand_p (*xloc))
2413 *xloc = mips_legitimize_tls_address (*xloc);
2417 /* See if the address can split into a high part and a LO_SUM. */
2418 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2421 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2423 /* Handle REG + CONSTANT using mips_add_offset. */
2426 reg = XEXP (*xloc, 0);
2427 if (!mips_valid_base_register_p (reg, mode, 0))
2428 reg = copy_to_mode_reg (Pmode, reg);
2429 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2437 /* Subroutine of mips_build_integer (with the same interface).
2438 Assume that the final action in the sequence should be a left shift. */
2441 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2443 unsigned int i, shift;
2445 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2446 since signed numbers are easier to load than unsigned ones. */
2448 while ((value & 1) == 0)
2449 value /= 2, shift++;
2451 i = mips_build_integer (codes, value);
2452 codes[i].code = ASHIFT;
2453 codes[i].value = shift;
2458 /* As for mips_build_shift, but assume that the final action will be
2459 an IOR or PLUS operation. */
2462 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2464 unsigned HOST_WIDE_INT high;
2467 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2468 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2470 /* The constant is too complex to load with a simple lui/ori pair
2471 so our goal is to clear as many trailing zeros as possible.
2472 In this case, we know bit 16 is set and that the low 16 bits
2473 form a negative number. If we subtract that number from VALUE,
2474 we will clear at least the lowest 17 bits, maybe more. */
2475 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2476 codes[i].code = PLUS;
2477 codes[i].value = CONST_LOW_PART (value);
2481 i = mips_build_integer (codes, high);
2482 codes[i].code = IOR;
2483 codes[i].value = value & 0xffff;
2489 /* Fill CODES with a sequence of rtl operations to load VALUE.
2490 Return the number of operations needed. */
2493 mips_build_integer (struct mips_integer_op *codes,
2494 unsigned HOST_WIDE_INT value)
2496 if (SMALL_OPERAND (value)
2497 || SMALL_OPERAND_UNSIGNED (value)
2498 || LUI_OPERAND (value))
2500 /* The value can be loaded with a single instruction. */
2501 codes[0].code = UNKNOWN;
2502 codes[0].value = value;
2505 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2507 /* Either the constant is a simple LUI/ORI combination or its
2508 lowest bit is set. We don't want to shift in this case. */
2509 return mips_build_lower (codes, value);
2511 else if ((value & 0xffff) == 0)
2513 /* The constant will need at least three actions. The lowest
2514 16 bits are clear, so the final action will be a shift. */
2515 return mips_build_shift (codes, value);
2519 /* The final action could be a shift, add or inclusive OR.
2520 Rather than use a complex condition to select the best
2521 approach, try both mips_build_shift and mips_build_lower
2522 and pick the one that gives the shortest sequence.
2523 Note that this case is only used once per constant. */
2524 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2525 unsigned int cost, alt_cost;
2527 cost = mips_build_shift (codes, value);
2528 alt_cost = mips_build_lower (alt_codes, value);
2529 if (alt_cost < cost)
2531 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2539 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2542 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2544 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2545 enum machine_mode mode;
2546 unsigned int i, cost;
2549 mode = GET_MODE (dest);
2550 cost = mips_build_integer (codes, value);
2552 /* Apply each binary operation to X. Invariant: X is a legitimate
2553 source operand for a SET pattern. */
2554 x = GEN_INT (codes[0].value);
2555 for (i = 1; i < cost; i++)
2557 if (!can_create_pseudo_p ())
2559 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2563 x = force_reg (mode, x);
2564 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2567 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2571 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2572 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2576 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2580 /* Split moves of big integers into smaller pieces. */
2581 if (splittable_const_int_operand (src, mode))
2583 mips_move_integer (dest, dest, INTVAL (src));
2587 /* Split moves of symbolic constants into high/low pairs. */
2588 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2590 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2594 if (mips_tls_operand_p (src))
2596 mips_emit_move (dest, mips_legitimize_tls_address (src));
2600 /* If we have (const (plus symbol offset)), and that expression cannot
2601 be forced into memory, load the symbol first and add in the offset.
2602 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2603 forced into memory, as it usually produces better code. */
2604 split_const (src, &base, &offset);
2605 if (offset != const0_rtx
2606 && (targetm.cannot_force_const_mem (src)
2607 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2609 base = mips_force_temporary (dest, base);
2610 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2614 src = force_const_mem (mode, src);
2616 /* When using explicit relocs, constant pool references are sometimes
2617 not legitimate addresses. */
2618 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2619 mips_emit_move (dest, src);
2623 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2624 sequence that is valid. */
2627 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2629 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2631 mips_emit_move (dest, force_reg (mode, src));
2635 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2636 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2637 && REG_P (src) && MD_REG_P (REGNO (src))
2638 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2640 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2641 if (GET_MODE_SIZE (mode) <= 4)
2642 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2643 gen_rtx_REG (SImode, REGNO (src)),
2644 gen_rtx_REG (SImode, other_regno)));
2646 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2647 gen_rtx_REG (DImode, REGNO (src)),
2648 gen_rtx_REG (DImode, other_regno)));
2652 /* We need to deal with constants that would be legitimate
2653 immediate_operands but not legitimate move_operands. */
2654 if (CONSTANT_P (src) && !move_operand (src, mode))
2656 mips_legitimize_const_move (mode, dest, src);
2657 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2663 /* We need a lot of little routines to check constant values on the
2664 mips16. These are used to figure out how long the instruction will
2665 be. It would be much better to do this using constraints, but
2666 there aren't nearly enough letters available. */
2669 m16_check_op (rtx op, int low, int high, int mask)
2671 return (GET_CODE (op) == CONST_INT
2672 && INTVAL (op) >= low
2673 && INTVAL (op) <= high
2674 && (INTVAL (op) & mask) == 0);
2678 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2680 return m16_check_op (op, 0x1, 0x8, 0);
2684 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2686 return m16_check_op (op, - 0x8, 0x7, 0);
2690 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2692 return m16_check_op (op, - 0x7, 0x8, 0);
2696 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2698 return m16_check_op (op, - 0x10, 0xf, 0);
2702 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2704 return m16_check_op (op, - 0xf, 0x10, 0);
2708 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2710 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2714 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2716 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2720 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2722 return m16_check_op (op, - 0x80, 0x7f, 0);
2726 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2728 return m16_check_op (op, - 0x7f, 0x80, 0);
2732 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2734 return m16_check_op (op, 0x0, 0xff, 0);
2738 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2740 return m16_check_op (op, - 0xff, 0x0, 0);
2744 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2746 return m16_check_op (op, - 0x1, 0xfe, 0);
2750 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2752 return m16_check_op (op, 0x0, 0xff << 2, 3);
2756 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2758 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2762 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2764 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2768 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2770 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2773 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2774 address instruction. */
2777 mips_lwxs_address_p (rtx addr)
2780 && GET_CODE (addr) == PLUS
2781 && REG_P (XEXP (addr, 1)))
2783 rtx offset = XEXP (addr, 0);
2784 if (GET_CODE (offset) == MULT
2785 && REG_P (XEXP (offset, 0))
2786 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2787 && INTVAL (XEXP (offset, 1)) == 4)
2794 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2796 enum machine_mode mode = GET_MODE (x);
2797 bool float_mode_p = FLOAT_MODE_P (mode);
2804 /* A number between 1 and 8 inclusive is efficient for a shift.
2805 Otherwise, we will need an extended instruction. */
2806 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2807 || (outer_code) == LSHIFTRT)
2809 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2812 *total = COSTS_N_INSNS (1);
2816 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2817 if ((outer_code) == XOR
2818 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2824 /* We may be able to use slt or sltu for a comparison with a
2825 signed 16-bit value. (The boundary conditions aren't quite
2826 right, but this is just a heuristic anyhow.) */
2827 if (((outer_code) == LT || (outer_code) == LE
2828 || (outer_code) == GE || (outer_code) == GT
2829 || (outer_code) == LTU || (outer_code) == LEU
2830 || (outer_code) == GEU || (outer_code) == GTU)
2831 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2837 /* Equality comparisons with 0 are cheap. */
2838 if (((outer_code) == EQ || (outer_code) == NE)
2845 /* Constants in the range 0...255 can be loaded with an unextended
2846 instruction. They are therefore as cheap as a register move.
2848 Given the choice between "li R1,0...255" and "move R1,R2"
2849 (where R2 is a known constant), it is usually better to use "li",
2850 since we do not want to unnecessarily extend the lifetime
2852 if (outer_code == SET
2854 && INTVAL (x) < 256)
2862 /* These can be used anywhere. */
2867 /* Otherwise fall through to the handling below because
2868 we'll need to construct the constant. */
2874 if (LEGITIMATE_CONSTANT_P (x))
2876 *total = COSTS_N_INSNS (1);
2881 /* The value will need to be fetched from the constant pool. */
2882 *total = CONSTANT_POOL_COST;
2888 /* If the address is legitimate, return the number of
2889 instructions it needs. */
2890 rtx addr = XEXP (x, 0);
2891 int n = mips_address_insns (addr, GET_MODE (x), true);
2894 *total = COSTS_N_INSNS (n + 1);
2897 /* Check for scaled indexed address. */
2898 if (mips_lwxs_address_p (addr))
2900 *total = COSTS_N_INSNS (2);
2903 /* Otherwise use the default handling. */
2908 *total = COSTS_N_INSNS (6);
2912 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2918 if (mode == DImode && !TARGET_64BIT)
2920 *total = COSTS_N_INSNS (2);
2928 if (mode == DImode && !TARGET_64BIT)
2930 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2938 *total = COSTS_N_INSNS (1);
2940 *total = COSTS_N_INSNS (4);
2944 *total = COSTS_N_INSNS (1);
2951 *total = mips_cost->fp_add;
2955 else if (mode == DImode && !TARGET_64BIT)
2957 *total = COSTS_N_INSNS (4);
2963 if (mode == DImode && !TARGET_64BIT)
2965 *total = COSTS_N_INSNS (4);
2972 *total = mips_cost->fp_mult_sf;
2974 else if (mode == DFmode)
2975 *total = mips_cost->fp_mult_df;
2977 else if (mode == SImode)
2978 *total = mips_cost->int_mult_si;
2981 *total = mips_cost->int_mult_di;
2990 *total = mips_cost->fp_div_sf;
2992 *total = mips_cost->fp_div_df;
3001 *total = mips_cost->int_div_di;
3003 *total = mips_cost->int_div_si;
3008 /* A sign extend from SImode to DImode in 64-bit mode is often
3009 zero instructions, because the result can often be used
3010 directly by another instruction; we'll call it one. */
3011 if (TARGET_64BIT && mode == DImode
3012 && GET_MODE (XEXP (x, 0)) == SImode)
3013 *total = COSTS_N_INSNS (1);
3015 *total = COSTS_N_INSNS (2);
3019 if (TARGET_64BIT && mode == DImode
3020 && GET_MODE (XEXP (x, 0)) == SImode)
3021 *total = COSTS_N_INSNS (2);
3023 *total = COSTS_N_INSNS (1);
3027 case UNSIGNED_FLOAT:
3030 case FLOAT_TRUNCATE:
3032 *total = mips_cost->fp_add;
3040 /* Provide the costs of an addressing mode that contains ADDR.
3041 If ADDR is not a valid address, its cost is irrelevant. */
3044 mips_address_cost (rtx addr)
3046 return mips_address_insns (addr, SImode, false);
3049 /* Return one word of double-word value OP, taking into account the fixed
3050 endianness of certain registers. HIGH_P is true to select the high part,
3051 false to select the low part. */
3054 mips_subword (rtx op, int high_p)
3057 enum machine_mode mode;
3059 mode = GET_MODE (op);
3060 if (mode == VOIDmode)
3063 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3064 byte = UNITS_PER_WORD;
3068 if (FP_REG_RTX_P (op))
3069 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3072 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3074 return simplify_gen_subreg (word_mode, op, mode, byte);
3078 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3081 mips_split_64bit_move_p (rtx dest, rtx src)
3086 /* FP->FP moves can be done in a single instruction. */
3087 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3090 /* Check for floating-point loads and stores. They can be done using
3091 ldc1 and sdc1 on MIPS II and above. */
3094 if (FP_REG_RTX_P (dest) && MEM_P (src))
3096 if (FP_REG_RTX_P (src) && MEM_P (dest))
3103 /* Split a 64-bit move from SRC to DEST assuming that
3104 mips_split_64bit_move_p holds.
3106 Moves into and out of FPRs cause some difficulty here. Such moves
3107 will always be DFmode, since paired FPRs are not allowed to store
3108 DImode values. The most natural representation would be two separate
3109 32-bit moves, such as:
3111 (set (reg:SI $f0) (mem:SI ...))
3112 (set (reg:SI $f1) (mem:SI ...))
3114 However, the second insn is invalid because odd-numbered FPRs are
3115 not allowed to store independent values. Use the patterns load_df_low,
3116 load_df_high and store_df_high instead. */
3119 mips_split_64bit_move (rtx dest, rtx src)
3121 if (FP_REG_RTX_P (dest))
3123 /* Loading an FPR from memory or from GPRs. */
3126 dest = gen_lowpart (DFmode, dest);
3127 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3128 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3133 emit_insn (gen_load_df_low (copy_rtx (dest),
3134 mips_subword (src, 0)));
3135 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3139 else if (FP_REG_RTX_P (src))
3141 /* Storing an FPR into memory or GPRs. */
3144 src = gen_lowpart (DFmode, src);
3145 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3146 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3150 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3151 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3156 /* The operation can be split into two normal moves. Decide in
3157 which order to do them. */
3160 low_dest = mips_subword (dest, 0);
3161 if (REG_P (low_dest)
3162 && reg_overlap_mentioned_p (low_dest, src))
3164 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3165 mips_emit_move (low_dest, mips_subword (src, 0));
3169 mips_emit_move (low_dest, mips_subword (src, 0));
3170 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3175 /* Return the appropriate instructions to move SRC into DEST. Assume
3176 that SRC is operand 1 and DEST is operand 0. */
3179 mips_output_move (rtx dest, rtx src)
3181 enum rtx_code dest_code, src_code;
3182 enum mips_symbol_type symbol_type;
3185 dest_code = GET_CODE (dest);
3186 src_code = GET_CODE (src);
3187 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3189 if (dbl_p && mips_split_64bit_move_p (dest, src))
3192 if ((src_code == REG && GP_REG_P (REGNO (src)))
3193 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3195 if (dest_code == REG)
3197 if (GP_REG_P (REGNO (dest)))
3198 return "move\t%0,%z1";
3200 if (MD_REG_P (REGNO (dest)))
3203 if (DSP_ACC_REG_P (REGNO (dest)))
3205 static char retval[] = "mt__\t%z1,%q0";
3206 retval[2] = reg_names[REGNO (dest)][4];
3207 retval[3] = reg_names[REGNO (dest)][5];
3211 if (FP_REG_P (REGNO (dest)))
3212 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3214 if (ALL_COP_REG_P (REGNO (dest)))
3216 static char retval[] = "dmtc_\t%z1,%0";
3218 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3219 return (dbl_p ? retval : retval + 1);
3222 if (dest_code == MEM)
3223 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3225 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3227 if (src_code == REG)
3229 if (DSP_ACC_REG_P (REGNO (src)))
3231 static char retval[] = "mf__\t%0,%q1";
3232 retval[2] = reg_names[REGNO (src)][4];
3233 retval[3] = reg_names[REGNO (src)][5];
3237 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3238 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3240 if (FP_REG_P (REGNO (src)))
3241 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3243 if (ALL_COP_REG_P (REGNO (src)))
3245 static char retval[] = "dmfc_\t%0,%1";
3247 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3248 return (dbl_p ? retval : retval + 1);
3252 if (src_code == MEM)
3253 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3255 if (src_code == CONST_INT)
3257 /* Don't use the X format, because that will give out of
3258 range numbers for 64-bit hosts and 32-bit targets. */
3260 return "li\t%0,%1\t\t\t# %X1";
3262 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3265 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3269 if (src_code == HIGH)
3270 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3272 if (CONST_GP_P (src))
3273 return "move\t%0,%1";
3275 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3276 && mips_lo_relocs[symbol_type] != 0)
3278 /* A signed 16-bit constant formed by applying a relocation
3279 operator to a symbolic address. */
3280 gcc_assert (!mips_split_p[symbol_type]);
3281 return "li\t%0,%R1";
3284 if (symbolic_operand (src, VOIDmode))
3286 gcc_assert (TARGET_MIPS16
3287 ? TARGET_MIPS16_TEXT_LOADS
3288 : !TARGET_EXPLICIT_RELOCS);
3289 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3292 if (src_code == REG && FP_REG_P (REGNO (src)))
3294 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3296 if (GET_MODE (dest) == V2SFmode)
3297 return "mov.ps\t%0,%1";
3299 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3302 if (dest_code == MEM)
3303 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3305 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3307 if (src_code == MEM)
3308 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3310 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3312 static char retval[] = "l_c_\t%0,%1";
3314 retval[1] = (dbl_p ? 'd' : 'w');
3315 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3318 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3320 static char retval[] = "s_c_\t%1,%0";
3322 retval[1] = (dbl_p ? 'd' : 'w');
3323 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3329 /* Restore $gp from its save slot. Valid only when using o32 or
3333 mips_restore_gp (void)
3337 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3339 address = mips_add_offset (pic_offset_table_rtx,
3340 frame_pointer_needed
3341 ? hard_frame_pointer_rtx
3342 : stack_pointer_rtx,
3343 current_function_outgoing_args_size);
3344 slot = gen_rtx_MEM (Pmode, address);
3346 mips_emit_move (pic_offset_table_rtx, slot);
3347 if (!TARGET_EXPLICIT_RELOCS)
3348 emit_insn (gen_blockage ());
3351 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3354 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3356 emit_insn (gen_rtx_SET (VOIDmode, target,
3357 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3360 /* Return true if CMP1 is a suitable second operand for relational
3361 operator CODE. See also the *sCC patterns in mips.md. */
3364 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3370 return reg_or_0_operand (cmp1, VOIDmode);
3374 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3378 return arith_operand (cmp1, VOIDmode);
3381 return sle_operand (cmp1, VOIDmode);
3384 return sleu_operand (cmp1, VOIDmode);
3391 /* Canonicalize LE or LEU comparisons into LT comparisons when
3392 possible to avoid extra instructions or inverting the
3396 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3397 enum machine_mode mode)
3399 HOST_WIDE_INT original, plus_one;
3401 if (GET_CODE (*cmp1) != CONST_INT)
3404 original = INTVAL (*cmp1);
3405 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3410 if (original < plus_one)
3413 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3422 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3435 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3436 result in TARGET. CMP0 and TARGET are register_operands that have
3437 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3438 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3441 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3442 rtx target, rtx cmp0, rtx cmp1)
3444 /* First see if there is a MIPS instruction that can do this operation
3445 with CMP1 in its current form. If not, try to canonicalize the
3446 comparison to LT. If that fails, try doing the same for the
3447 inverse operation. If that also fails, force CMP1 into a register
3449 if (mips_relational_operand_ok_p (code, cmp1))
3450 mips_emit_binary (code, target, cmp0, cmp1);
3451 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3452 mips_emit_binary (code, target, cmp0, cmp1);
3455 enum rtx_code inv_code = reverse_condition (code);
3456 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3458 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3459 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3461 else if (invert_ptr == 0)
3463 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3464 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3465 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3469 *invert_ptr = !*invert_ptr;
3470 mips_emit_binary (inv_code, target, cmp0, cmp1);
3475 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3476 The register will have the same mode as CMP0. */
3479 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3481 if (cmp1 == const0_rtx)
3484 if (uns_arith_operand (cmp1, VOIDmode))
3485 return expand_binop (GET_MODE (cmp0), xor_optab,
3486 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3488 return expand_binop (GET_MODE (cmp0), sub_optab,
3489 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3492 /* Convert *CODE into a code that can be used in a floating-point
3493 scc instruction (c.<cond>.<fmt>). Return true if the values of
3494 the condition code registers will be inverted, with 0 indicating
3495 that the condition holds. */
3498 mips_reverse_fp_cond_p (enum rtx_code *code)
3505 *code = reverse_condition_maybe_unordered (*code);
3513 /* Convert a comparison into something that can be used in a branch or
3514 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3515 being compared and *CODE is the code used to compare them.
3517 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3518 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3519 otherwise any standard branch condition can be used. The standard branch
3522 - EQ/NE between two registers.
3523 - any comparison between a register and zero. */
3526 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3528 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3530 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3532 *op0 = cmp_operands[0];
3533 *op1 = cmp_operands[1];
3535 else if (*code == EQ || *code == NE)
3539 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3544 *op0 = cmp_operands[0];
3545 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3550 /* The comparison needs a separate scc instruction. Store the
3551 result of the scc in *OP0 and compare it against zero. */
3552 bool invert = false;
3553 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3555 mips_emit_int_relational (*code, &invert, *op0,
3556 cmp_operands[0], cmp_operands[1]);
3557 *code = (invert ? EQ : NE);
3562 enum rtx_code cmp_code;
3564 /* Floating-point tests use a separate c.cond.fmt comparison to
3565 set a condition code register. The branch or conditional move
3566 will then compare that register against zero.
3568 Set CMP_CODE to the code of the comparison instruction and
3569 *CODE to the code that the branch or move should use. */
3571 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3573 ? gen_reg_rtx (CCmode)
3574 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3576 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3580 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3581 Store the result in TARGET and return true if successful.
3583 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3586 mips_emit_scc (enum rtx_code code, rtx target)
3588 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3591 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3592 if (code == EQ || code == NE)
3594 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3595 mips_emit_binary (code, target, zie, const0_rtx);
3598 mips_emit_int_relational (code, 0, target,
3599 cmp_operands[0], cmp_operands[1]);
3603 /* Emit the common code for doing conditional branches.
3604 operand[0] is the label to jump to.
3605 The comparison operands are saved away by cmp{si,di,sf,df}. */
3608 gen_conditional_branch (rtx *operands, enum rtx_code code)
3610 rtx op0, op1, condition;
3612 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3613 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3614 emit_jump_insn (gen_condjump (condition, operands[0]));
3619 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3620 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3623 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3624 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3629 reversed_p = mips_reverse_fp_cond_p (&cond);
3630 cmp_result = gen_reg_rtx (CCV2mode);
3631 emit_insn (gen_scc_ps (cmp_result,
3632 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3634 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3637 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3641 /* Emit the common code for conditional moves. OPERANDS is the array
3642 of operands passed to the conditional move define_expand. */
3645 gen_conditional_move (rtx *operands)
3650 code = GET_CODE (operands[1]);
3651 mips_emit_compare (&code, &op0, &op1, true);
3652 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3653 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3654 gen_rtx_fmt_ee (code,
3657 operands[2], operands[3])));
3660 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3661 the conditional_trap expander. */
3664 mips_gen_conditional_trap (rtx *operands)
3667 enum rtx_code cmp_code = GET_CODE (operands[0]);
3668 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3670 /* MIPS conditional trap machine instructions don't have GT or LE
3671 flavors, so we must invert the comparison and convert to LT and
3672 GE, respectively. */
3675 case GT: cmp_code = LT; break;
3676 case LE: cmp_code = GE; break;
3677 case GTU: cmp_code = LTU; break;
3678 case LEU: cmp_code = GEU; break;
3681 if (cmp_code == GET_CODE (operands[0]))
3683 op0 = cmp_operands[0];
3684 op1 = cmp_operands[1];
3688 op0 = cmp_operands[1];
3689 op1 = cmp_operands[0];
3691 op0 = force_reg (mode, op0);
3692 if (!arith_operand (op1, mode))
3693 op1 = force_reg (mode, op1);
3695 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3696 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3700 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3703 mips_ok_for_lazy_binding_p (rtx x)
3705 return (TARGET_USE_GOT
3706 && GET_CODE (x) == SYMBOL_REF
3707 && !mips_symbol_binds_local_p (x));
3710 /* Load function address ADDR into register DEST. SIBCALL_P is true
3711 if the address is needed for a sibling call. */
3714 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3716 /* If we're generating PIC, and this call is to a global function,
3717 try to allow its address to be resolved lazily. This isn't
3718 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3719 to the stub would be our caller's gp, not ours. */
3720 if (TARGET_EXPLICIT_RELOCS
3721 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3722 && mips_ok_for_lazy_binding_p (addr))
3724 rtx high, lo_sum_symbol;
3726 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3727 addr, SYMBOL_GOTOFF_CALL);
3728 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3729 if (Pmode == SImode)
3730 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3732 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3735 mips_emit_move (dest, addr);
3739 /* Expand a call or call_value instruction. RESULT is where the
3740 result will go (null for calls), ADDR is the address of the
3741 function, ARGS_SIZE is the size of the arguments and AUX is
3742 the value passed to us by mips_function_arg. SIBCALL_P is true
3743 if we are expanding a sibling call, false if we're expanding
3747 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3749 rtx orig_addr, pattern, insn;
3752 if (!call_insn_operand (addr, VOIDmode))
3754 addr = gen_reg_rtx (Pmode);
3755 mips_load_call_address (addr, orig_addr, sibcall_p);
3759 && TARGET_HARD_FLOAT_ABI
3760 && build_mips16_call_stub (result, addr, args_size,
3761 aux == 0 ? 0 : (int) GET_MODE (aux)))
3765 pattern = (sibcall_p
3766 ? gen_sibcall_internal (addr, args_size)
3767 : gen_call_internal (addr, args_size));
3768 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3772 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3773 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3776 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3777 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3780 pattern = (sibcall_p
3781 ? gen_sibcall_value_internal (result, addr, args_size)
3782 : gen_call_value_internal (result, addr, args_size));
3784 insn = emit_call_insn (pattern);
3786 /* Lazy-binding stubs require $gp to be valid on entry. */
3787 if (mips_ok_for_lazy_binding_p (orig_addr))
3788 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3792 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3795 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3796 tree exp ATTRIBUTE_UNUSED)
3798 return TARGET_SIBCALLS;
3801 /* Emit code to move general operand SRC into condition-code
3802 register DEST. SCRATCH is a scratch TFmode float register.
3809 where FP1 and FP2 are single-precision float registers
3810 taken from SCRATCH. */
3813 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3817 /* Change the source to SFmode. */
3819 src = adjust_address (src, SFmode, 0);
3820 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3821 src = gen_rtx_REG (SFmode, true_regnum (src));
3823 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3824 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3826 mips_emit_move (copy_rtx (fp1), src);
3827 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
3828 emit_insn (gen_slt_sf (dest, fp2, fp1));
3831 /* Emit code to change the current function's return address to
3832 ADDRESS. SCRATCH is available as a scratch register, if needed.
3833 ADDRESS and SCRATCH are both word-mode GPRs. */
3836 mips_set_return_address (rtx address, rtx scratch)
3840 compute_frame_size (get_frame_size ());
3841 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3842 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3843 cfun->machine->frame.gp_sp_offset);
3845 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3848 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3849 Assume that the areas do not overlap. */
3852 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3854 HOST_WIDE_INT offset, delta;
3855 unsigned HOST_WIDE_INT bits;
3857 enum machine_mode mode;
3860 /* Work out how many bits to move at a time. If both operands have
3861 half-word alignment, it is usually better to move in half words.
3862 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3863 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3864 Otherwise move word-sized chunks. */
3865 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3866 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3867 bits = BITS_PER_WORD / 2;
3869 bits = BITS_PER_WORD;
3871 mode = mode_for_size (bits, MODE_INT, 0);
3872 delta = bits / BITS_PER_UNIT;
3874 /* Allocate a buffer for the temporary registers. */
3875 regs = alloca (sizeof (rtx) * length / delta);
3877 /* Load as many BITS-sized chunks as possible. Use a normal load if
3878 the source has enough alignment, otherwise use left/right pairs. */
3879 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3881 regs[i] = gen_reg_rtx (mode);
3882 if (MEM_ALIGN (src) >= bits)
3883 mips_emit_move (regs[i], adjust_address (src, mode, offset));
3886 rtx part = adjust_address (src, BLKmode, offset);
3887 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3892 /* Copy the chunks to the destination. */
3893 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3894 if (MEM_ALIGN (dest) >= bits)
3895 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
3898 rtx part = adjust_address (dest, BLKmode, offset);
3899 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3903 /* Mop up any left-over bytes. */
3904 if (offset < length)
3906 src = adjust_address (src, BLKmode, offset);
3907 dest = adjust_address (dest, BLKmode, offset);
3908 move_by_pieces (dest, src, length - offset,
3909 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3913 #define MAX_MOVE_REGS 4
3914 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3917 /* Helper function for doing a loop-based block operation on memory
3918 reference MEM. Each iteration of the loop will operate on LENGTH
3921 Create a new base register for use within the loop and point it to
3922 the start of MEM. Create a new memory reference that uses this
3923 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3926 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3927 rtx *loop_reg, rtx *loop_mem)
3929 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3931 /* Although the new mem does not refer to a known location,
3932 it does keep up to LENGTH bytes of alignment. */
3933 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3934 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3938 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3939 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3940 memory regions do not overlap. */
3943 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3945 rtx label, src_reg, dest_reg, final_src;
3946 HOST_WIDE_INT leftover;
3948 leftover = length % MAX_MOVE_BYTES;
3951 /* Create registers and memory references for use within the loop. */
3952 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3953 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3955 /* Calculate the value that SRC_REG should have after the last iteration
3957 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3960 /* Emit the start of the loop. */
3961 label = gen_label_rtx ();
3964 /* Emit the loop body. */
3965 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3967 /* Move on to the next block. */
3968 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3969 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3971 /* Emit the loop condition. */
3972 if (Pmode == DImode)
3973 emit_insn (gen_cmpdi (src_reg, final_src));
3975 emit_insn (gen_cmpsi (src_reg, final_src));
3976 emit_jump_insn (gen_bne (label));
3978 /* Mop up any left-over bytes. */
3980 mips_block_move_straight (dest, src, leftover);
3984 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3987 mips_expand_synci_loop (rtx begin, rtx end)
3989 rtx inc, label, cmp, cmp_result;
3991 /* Load INC with the cache line size (rdhwr INC,$1). */
3992 inc = gen_reg_rtx (SImode);
3993 emit_insn (gen_rdhwr (inc, const1_rtx));
3995 /* Loop back to here. */
3996 label = gen_label_rtx ();
3999 emit_insn (gen_synci (begin));
4001 cmp = gen_reg_rtx (Pmode);
4002 mips_emit_binary (GTU, cmp, begin, end);
4004 mips_emit_binary (PLUS, begin, begin, inc);
4006 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4007 emit_jump_insn (gen_condjump (cmp_result, label));
4010 /* Expand a movmemsi instruction. */
4013 mips_expand_block_move (rtx dest, rtx src, rtx length)
4015 if (GET_CODE (length) == CONST_INT)
4017 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4019 mips_block_move_straight (dest, src, INTVAL (length));
4024 mips_block_move_loop (dest, src, INTVAL (length));
4031 /* Argument support functions. */
4033 /* Initialize CUMULATIVE_ARGS for a function. */
4036 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4037 rtx libname ATTRIBUTE_UNUSED)
4039 static CUMULATIVE_ARGS zero_cum;
4040 tree param, next_param;
4043 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4045 /* Determine if this function has variable arguments. This is
4046 indicated by the last argument being 'void_type_mode' if there
4047 are no variable arguments. The standard MIPS calling sequence
4048 passes all arguments in the general purpose registers in this case. */
4050 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4051 param != 0; param = next_param)
4053 next_param = TREE_CHAIN (param);
4054 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4055 cum->gp_reg_found = 1;
4060 /* Fill INFO with information about a single argument. CUM is the
4061 cumulative state for earlier arguments. MODE is the mode of this
4062 argument and TYPE is its type (if known). NAMED is true if this
4063 is a named (fixed) argument rather than a variable one. */
4066 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4067 tree type, int named, struct mips_arg_info *info)
4069 bool doubleword_aligned_p;
4070 unsigned int num_bytes, num_words, max_regs;
4072 /* Work out the size of the argument. */
4073 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4074 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4076 /* Decide whether it should go in a floating-point register, assuming
4077 one is free. Later code checks for availability.
4079 The checks against UNITS_PER_FPVALUE handle the soft-float and
4080 single-float cases. */
4084 /* The EABI conventions have traditionally been defined in terms
4085 of TYPE_MODE, regardless of the actual type. */
4086 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4087 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4088 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4093 /* Only leading floating-point scalars are passed in
4094 floating-point registers. We also handle vector floats the same
4095 say, which is OK because they are not covered by the standard ABI. */
4096 info->fpr_p = (!cum->gp_reg_found
4097 && cum->arg_number < 2
4098 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4099 || VECTOR_FLOAT_TYPE_P (type))
4100 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4101 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4102 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4107 /* Scalar and complex floating-point types are passed in
4108 floating-point registers. */
4109 info->fpr_p = (named
4110 && (type == 0 || FLOAT_TYPE_P (type))
4111 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4112 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4113 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4114 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4116 /* ??? According to the ABI documentation, the real and imaginary
4117 parts of complex floats should be passed in individual registers.
4118 The real and imaginary parts of stack arguments are supposed
4119 to be contiguous and there should be an extra word of padding
4122 This has two problems. First, it makes it impossible to use a
4123 single "void *" va_list type, since register and stack arguments
4124 are passed differently. (At the time of writing, MIPSpro cannot
4125 handle complex float varargs correctly.) Second, it's unclear
4126 what should happen when there is only one register free.
4128 For now, we assume that named complex floats should go into FPRs
4129 if there are two FPRs free, otherwise they should be passed in the
4130 same way as a struct containing two floats. */
4132 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4133 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4135 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4136 info->fpr_p = false;
4146 /* See whether the argument has doubleword alignment. */
4147 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4149 /* Set REG_OFFSET to the register count we're interested in.
4150 The EABI allocates the floating-point registers separately,
4151 but the other ABIs allocate them like integer registers. */
4152 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4156 /* Advance to an even register if the argument is doubleword-aligned. */
4157 if (doubleword_aligned_p)
4158 info->reg_offset += info->reg_offset & 1;
4160 /* Work out the offset of a stack argument. */
4161 info->stack_offset = cum->stack_words;
4162 if (doubleword_aligned_p)
4163 info->stack_offset += info->stack_offset & 1;
4165 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4167 /* Partition the argument between registers and stack. */
4168 info->reg_words = MIN (num_words, max_regs);
4169 info->stack_words = num_words - info->reg_words;
4173 /* INFO describes an argument that is passed in a single-register value.
4174 Return the register it uses, assuming that FPRs are available if
4178 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4180 if (!info->fpr_p || !hard_float_p)
4181 return GP_ARG_FIRST + info->reg_offset;
4182 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4183 /* In o32, the second argument is always passed in $f14
4184 for TARGET_DOUBLE_FLOAT, regardless of whether the
4185 first argument was a word or doubleword. */
4186 return FP_ARG_FIRST + 2;
4188 return FP_ARG_FIRST + info->reg_offset;
4191 /* Implement FUNCTION_ARG_ADVANCE. */
4194 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4195 tree type, int named)
4197 struct mips_arg_info info;
4199 mips_arg_info (cum, mode, type, named, &info);
4202 cum->gp_reg_found = true;
4204 /* See the comment above the cumulative args structure in mips.h
4205 for an explanation of what this code does. It assumes the O32
4206 ABI, which passes at most 2 arguments in float registers. */
4207 if (cum->arg_number < 2 && info.fpr_p)
4208 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4210 if (mips_abi != ABI_EABI || !info.fpr_p)
4211 cum->num_gprs = info.reg_offset + info.reg_words;
4212 else if (info.reg_words > 0)
4213 cum->num_fprs += MAX_FPRS_PER_FMT;
4215 if (info.stack_words > 0)
4216 cum->stack_words = info.stack_offset + info.stack_words;
4221 /* Implement FUNCTION_ARG. */
4224 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4225 tree type, int named)
4227 struct mips_arg_info info;
4229 /* We will be called with a mode of VOIDmode after the last argument
4230 has been seen. Whatever we return will be passed to the call
4231 insn. If we need a mips16 fp_code, return a REG with the code
4232 stored as the mode. */
4233 if (mode == VOIDmode)
4235 if (TARGET_MIPS16 && cum->fp_code != 0)
4236 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4242 mips_arg_info (cum, mode, type, named, &info);
4244 /* Return straight away if the whole argument is passed on the stack. */
4245 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4249 && TREE_CODE (type) == RECORD_TYPE
4251 && TYPE_SIZE_UNIT (type)
4252 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4255 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4256 structure contains a double in its entirety, then that 64-bit
4257 chunk is passed in a floating point register. */
4260 /* First check to see if there is any such field. */
4261 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4262 if (TREE_CODE (field) == FIELD_DECL
4263 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4264 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4265 && host_integerp (bit_position (field), 0)
4266 && int_bit_position (field) % BITS_PER_WORD == 0)
4271 /* Now handle the special case by returning a PARALLEL
4272 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4273 chunks are passed in registers. */
4275 HOST_WIDE_INT bitpos;
4278 /* assign_parms checks the mode of ENTRY_PARM, so we must
4279 use the actual mode here. */
4280 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4283 field = TYPE_FIELDS (type);
4284 for (i = 0; i < info.reg_words; i++)
4288 for (; field; field = TREE_CHAIN (field))
4289 if (TREE_CODE (field) == FIELD_DECL
4290 && int_bit_position (field) >= bitpos)
4294 && int_bit_position (field) == bitpos
4295 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4296 && !TARGET_SOFT_FLOAT
4297 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4298 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4300 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4303 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4304 GEN_INT (bitpos / BITS_PER_UNIT));
4306 bitpos += BITS_PER_WORD;
4312 /* Handle the n32/n64 conventions for passing complex floating-point
4313 arguments in FPR pairs. The real part goes in the lower register
4314 and the imaginary part goes in the upper register. */
4317 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4320 enum machine_mode inner;
4323 inner = GET_MODE_INNER (mode);
4324 reg = FP_ARG_FIRST + info.reg_offset;
4325 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4327 /* Real part in registers, imaginary part on stack. */
4328 gcc_assert (info.stack_words == info.reg_words);
4329 return gen_rtx_REG (inner, reg);
4333 gcc_assert (info.stack_words == 0);
4334 real = gen_rtx_EXPR_LIST (VOIDmode,
4335 gen_rtx_REG (inner, reg),
4337 imag = gen_rtx_EXPR_LIST (VOIDmode,
4339 reg + info.reg_words / 2),
4340 GEN_INT (GET_MODE_SIZE (inner)));
4341 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4345 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4349 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4352 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4353 enum machine_mode mode, tree type, bool named)
4355 struct mips_arg_info info;
4357 mips_arg_info (cum, mode, type, named, &info);
4358 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4362 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4363 PARM_BOUNDARY bits of alignment, but will be given anything up
4364 to STACK_BOUNDARY bits if the type requires it. */
4367 function_arg_boundary (enum machine_mode mode, tree type)
4369 unsigned int alignment;
4371 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4372 if (alignment < PARM_BOUNDARY)
4373 alignment = PARM_BOUNDARY;
4374 if (alignment > STACK_BOUNDARY)
4375 alignment = STACK_BOUNDARY;
4379 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4380 upward rather than downward. In other words, return true if the
4381 first byte of the stack slot has useful data, false if the last
4385 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4387 /* On little-endian targets, the first byte of every stack argument
4388 is passed in the first byte of the stack slot. */
4389 if (!BYTES_BIG_ENDIAN)
4392 /* Otherwise, integral types are padded downward: the last byte of a
4393 stack argument is passed in the last byte of the stack slot. */
4395 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4396 : GET_MODE_CLASS (mode) == MODE_INT)
4399 /* Big-endian o64 pads floating-point arguments downward. */
4400 if (mips_abi == ABI_O64)
4401 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4404 /* Other types are padded upward for o32, o64, n32 and n64. */
4405 if (mips_abi != ABI_EABI)
4408 /* Arguments smaller than a stack slot are padded downward. */
4409 if (mode != BLKmode)
4410 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4412 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4416 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4417 if the least significant byte of the register has useful data. Return
4418 the opposite if the most significant byte does. */
4421 mips_pad_reg_upward (enum machine_mode mode, tree type)
4423 /* No shifting is required for floating-point arguments. */
4424 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4425 return !BYTES_BIG_ENDIAN;
4427 /* Otherwise, apply the same padding to register arguments as we do
4428 to stack arguments. */
4429 return mips_pad_arg_upward (mode, type);
4433 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4434 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4437 CUMULATIVE_ARGS local_cum;
4438 int gp_saved, fp_saved;
4440 /* The caller has advanced CUM up to, but not beyond, the last named
4441 argument. Advance a local copy of CUM past the last "real" named
4442 argument, to find out how many registers are left over. */
4445 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4447 /* Found out how many registers we need to save. */
4448 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4449 fp_saved = (EABI_FLOAT_VARARGS_P
4450 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4459 ptr = plus_constant (virtual_incoming_args_rtx,
4460 REG_PARM_STACK_SPACE (cfun->decl)
4461 - gp_saved * UNITS_PER_WORD);
4462 mem = gen_rtx_MEM (BLKmode, ptr);
4463 set_mem_alias_set (mem, get_varargs_alias_set ());
4465 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4470 /* We can't use move_block_from_reg, because it will use
4472 enum machine_mode mode;
4475 /* Set OFF to the offset from virtual_incoming_args_rtx of
4476 the first float register. The FP save area lies below
4477 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4478 off = -gp_saved * UNITS_PER_WORD;
4479 off &= ~(UNITS_PER_FPVALUE - 1);
4480 off -= fp_saved * UNITS_PER_FPREG;
4482 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4484 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4485 i += MAX_FPRS_PER_FMT)
4489 ptr = plus_constant (virtual_incoming_args_rtx, off);
4490 mem = gen_rtx_MEM (mode, ptr);
4491 set_mem_alias_set (mem, get_varargs_alias_set ());
4492 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4493 off += UNITS_PER_HWFPVALUE;
4497 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4498 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4499 + fp_saved * UNITS_PER_FPREG);
4502 /* Create the va_list data type.
4503 We keep 3 pointers, and two offsets.
4504 Two pointers are to the overflow area, which starts at the CFA.
4505 One of these is constant, for addressing into the GPR save area below it.
4506 The other is advanced up the stack through the overflow region.
4507 The third pointer is to the GPR save area. Since the FPR save area
4508 is just below it, we can address FPR slots off this pointer.
4509 We also keep two one-byte offsets, which are to be subtracted from the
4510 constant pointers to yield addresses in the GPR and FPR save areas.
4511 These are downcounted as float or non-float arguments are used,
4512 and when they get to zero, the argument must be obtained from the
4514 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4515 pointer is enough. It's started at the GPR save area, and is
4517 Note that the GPR save area is not constant size, due to optimization
4518 in the prologue. Hence, we can't use a design with two pointers
4519 and two offsets, although we could have designed this with two pointers
4520 and three offsets. */
4523 mips_build_builtin_va_list (void)
4525 if (EABI_FLOAT_VARARGS_P)
4527 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4530 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4532 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4534 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4536 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4538 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4539 unsigned_char_type_node);
4540 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4541 unsigned_char_type_node);
4542 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4543 warn on every user file. */
4544 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4545 array = build_array_type (unsigned_char_type_node,
4546 build_index_type (index));
4547 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4549 DECL_FIELD_CONTEXT (f_ovfl) = record;
4550 DECL_FIELD_CONTEXT (f_gtop) = record;
4551 DECL_FIELD_CONTEXT (f_ftop) = record;
4552 DECL_FIELD_CONTEXT (f_goff) = record;
4553 DECL_FIELD_CONTEXT (f_foff) = record;
4554 DECL_FIELD_CONTEXT (f_res) = record;
4556 TYPE_FIELDS (record) = f_ovfl;
4557 TREE_CHAIN (f_ovfl) = f_gtop;
4558 TREE_CHAIN (f_gtop) = f_ftop;
4559 TREE_CHAIN (f_ftop) = f_goff;
4560 TREE_CHAIN (f_goff) = f_foff;
4561 TREE_CHAIN (f_foff) = f_res;
4563 layout_type (record);
4566 else if (TARGET_IRIX && TARGET_IRIX6)
4567 /* On IRIX 6, this type is 'char *'. */
4568 return build_pointer_type (char_type_node);
4570 /* Otherwise, we use 'void *'. */
4571 return ptr_type_node;
4574 /* Implement va_start. */
4577 mips_va_start (tree valist, rtx nextarg)
4579 if (EABI_FLOAT_VARARGS_P)
4581 const CUMULATIVE_ARGS *cum;
4582 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4583 tree ovfl, gtop, ftop, goff, foff;
4585 int gpr_save_area_size;
4586 int fpr_save_area_size;
4589 cum = ¤t_function_args_info;
4591 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4593 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4595 f_ovfl = TYPE_FIELDS (va_list_type_node);
4596 f_gtop = TREE_CHAIN (f_ovfl);
4597 f_ftop = TREE_CHAIN (f_gtop);
4598 f_goff = TREE_CHAIN (f_ftop);
4599 f_foff = TREE_CHAIN (f_goff);
4601 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4603 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4605 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4607 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4609 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4612 /* Emit code to initialize OVFL, which points to the next varargs
4613 stack argument. CUM->STACK_WORDS gives the number of stack
4614 words used by named arguments. */
4615 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4616 if (cum->stack_words > 0)
4617 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4618 size_int (cum->stack_words * UNITS_PER_WORD));
4619 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4620 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4622 /* Emit code to initialize GTOP, the top of the GPR save area. */
4623 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4624 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4625 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4627 /* Emit code to initialize FTOP, the top of the FPR save area.
4628 This address is gpr_save_area_bytes below GTOP, rounded
4629 down to the next fp-aligned boundary. */
4630 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4631 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4632 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4634 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4635 size_int (-fpr_offset));
4636 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4637 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4639 /* Emit code to initialize GOFF, the offset from GTOP of the
4640 next GPR argument. */
4641 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4642 build_int_cst (NULL_TREE, gpr_save_area_size));
4643 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4645 /* Likewise emit code to initialize FOFF, the offset from FTOP
4646 of the next FPR argument. */
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4648 build_int_cst (NULL_TREE, fpr_save_area_size));
4649 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4653 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4654 std_expand_builtin_va_start (valist, nextarg);
4658 /* Implement va_arg. */
4661 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4663 HOST_WIDE_INT size, rsize;
4667 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4670 type = build_pointer_type (type);
4672 size = int_size_in_bytes (type);
4673 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4675 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4676 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4679 /* Not a simple merged stack. */
4681 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4682 tree ovfl, top, off, align;
4683 HOST_WIDE_INT osize;
4686 f_ovfl = TYPE_FIELDS (va_list_type_node);
4687 f_gtop = TREE_CHAIN (f_ovfl);
4688 f_ftop = TREE_CHAIN (f_gtop);
4689 f_goff = TREE_CHAIN (f_ftop);
4690 f_foff = TREE_CHAIN (f_goff);
4692 /* We maintain separate pointers and offsets for floating-point
4693 and integer arguments, but we need similar code in both cases.
4696 TOP be the top of the register save area;
4697 OFF be the offset from TOP of the next register;
4698 ADDR_RTX be the address of the argument;
4699 RSIZE be the number of bytes used to store the argument
4700 when it's in the register save area;
4701 OSIZE be the number of bytes used to store it when it's
4702 in the stack overflow area; and
4703 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4705 The code we want is:
4707 1: off &= -rsize; // round down
4710 4: addr_rtx = top - off;
4715 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4716 10: addr_rtx = ovfl + PADDING;
4720 [1] and [9] can sometimes be optimized away. */
4722 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4725 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4726 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4728 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4730 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4733 /* When floating-point registers are saved to the stack,
4734 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4735 of the float's precision. */
4736 rsize = UNITS_PER_HWFPVALUE;
4738 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4739 (= PARM_BOUNDARY bits). This can be different from RSIZE
4742 (1) On 32-bit targets when TYPE is a structure such as:
4744 struct s { float f; };
4746 Such structures are passed in paired FPRs, so RSIZE
4747 will be 8 bytes. However, the structure only takes
4748 up 4 bytes of memory, so OSIZE will only be 4.
4750 (2) In combinations such as -mgp64 -msingle-float
4751 -fshort-double. Doubles passed in registers
4752 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4753 but those passed on the stack take up
4754 UNITS_PER_WORD bytes. */
4755 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4759 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4761 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4763 if (rsize > UNITS_PER_WORD)
4765 /* [1] Emit code for: off &= -rsize. */
4766 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4767 build_int_cst (NULL_TREE, -rsize));
4768 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4769 gimplify_and_add (t, pre_p);
4774 /* [2] Emit code to branch if off == 0. */
4775 t = build2 (NE_EXPR, boolean_type_node, off,
4776 build_int_cst (TREE_TYPE (off), 0));
4777 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4779 /* [5] Emit code for: off -= rsize. We do this as a form of
4780 post-increment not available to C. Also widen for the
4781 coming pointer arithmetic. */
4782 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4783 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4784 t = fold_convert (sizetype, t);
4785 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4787 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4788 the argument has RSIZE - SIZE bytes of leading padding. */
4789 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4790 if (BYTES_BIG_ENDIAN && rsize > size)
4792 u = size_int (rsize - size);
4793 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4795 COND_EXPR_THEN (addr) = t;
4797 if (osize > UNITS_PER_WORD)
4799 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4800 u = size_int (osize - 1);
4801 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4802 t = fold_convert (sizetype, t);
4803 u = size_int (-osize);
4804 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4805 t = fold_convert (TREE_TYPE (ovfl), t);
4806 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4811 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4812 post-increment ovfl by osize. On big-endian machines,
4813 the argument has OSIZE - SIZE bytes of leading padding. */
4814 u = fold_convert (TREE_TYPE (ovfl),
4815 build_int_cst (NULL_TREE, osize));
4816 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4817 if (BYTES_BIG_ENDIAN && osize > size)
4819 u = size_int (osize - size);
4820 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4823 /* String [9] and [10,11] together. */
4825 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4826 COND_EXPR_ELSE (addr) = t;
4828 addr = fold_convert (build_pointer_type (type), addr);
4829 addr = build_va_arg_indirect_ref (addr);
4833 addr = build_va_arg_indirect_ref (addr);
4838 /* Return true if it is possible to use left/right accesses for a
4839 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4840 returning true, update *OP, *LEFT and *RIGHT as follows:
4842 *OP is a BLKmode reference to the whole field.
4844 *LEFT is a QImode reference to the first byte if big endian or
4845 the last byte if little endian. This address can be used in the
4846 left-side instructions (lwl, swl, ldl, sdl).
4848 *RIGHT is a QImode reference to the opposite end of the field and
4849 can be used in the patterning right-side instruction. */
4852 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4853 rtx *left, rtx *right)
4857 /* Check that the operand really is a MEM. Not all the extv and
4858 extzv predicates are checked. */
4862 /* Check that the size is valid. */
4863 if (width != 32 && (!TARGET_64BIT || width != 64))
4866 /* We can only access byte-aligned values. Since we are always passed
4867 a reference to the first byte of the field, it is not necessary to
4868 do anything with BITPOS after this check. */
4869 if (bitpos % BITS_PER_UNIT != 0)
4872 /* Reject aligned bitfields: we want to use a normal load or store
4873 instead of a left/right pair. */
4874 if (MEM_ALIGN (*op) >= width)
4877 /* Adjust *OP to refer to the whole field. This also has the effect
4878 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4879 *op = adjust_address (*op, BLKmode, 0);
4880 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4882 /* Get references to both ends of the field. We deliberately don't
4883 use the original QImode *OP for FIRST since the new BLKmode one
4884 might have a simpler address. */
4885 first = adjust_address (*op, QImode, 0);
4886 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4888 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4889 be the upper word and RIGHT the lower word. */
4890 if (TARGET_BIG_ENDIAN)
4891 *left = first, *right = last;
4893 *left = last, *right = first;
4899 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4900 Return true on success. We only handle cases where zero_extract is
4901 equivalent to sign_extract. */
4904 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4906 rtx left, right, temp;
4908 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4909 paradoxical word_mode subreg. This is the only case in which
4910 we allow the destination to be larger than the source. */
4911 if (GET_CODE (dest) == SUBREG
4912 && GET_MODE (dest) == DImode
4913 && SUBREG_BYTE (dest) == 0
4914 && GET_MODE (SUBREG_REG (dest)) == SImode)
4915 dest = SUBREG_REG (dest);
4917 /* After the above adjustment, the destination must be the same
4918 width as the source. */
4919 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4922 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4925 temp = gen_reg_rtx (GET_MODE (dest));
4926 if (GET_MODE (dest) == DImode)
4928 emit_insn (gen_mov_ldl (temp, src, left));
4929 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4933 emit_insn (gen_mov_lwl (temp, src, left));
4934 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4940 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4944 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4947 enum machine_mode mode;
4949 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4952 mode = mode_for_size (width, MODE_INT, 0);
4953 src = gen_lowpart (mode, src);
4957 emit_insn (gen_mov_sdl (dest, src, left));
4958 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4962 emit_insn (gen_mov_swl (dest, src, left));
4963 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4968 /* Return true if X is a MEM with the same size as MODE. */
4971 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4978 size = MEM_SIZE (x);
4979 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4982 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4983 source of an "ext" instruction or the destination of an "ins"
4984 instruction. OP must be a register operand and the following
4985 conditions must hold:
4987 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4988 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4989 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4991 Also reject lengths equal to a word as they are better handled
4992 by the move patterns. */
4995 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4997 HOST_WIDE_INT len, pos;
4999 if (!ISA_HAS_EXT_INS
5000 || !register_operand (op, VOIDmode)
5001 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5004 len = INTVAL (size);
5005 pos = INTVAL (position);
5007 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5008 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5014 /* Set up globals to generate code for the ISA or processor
5015 described by INFO. */
5018 mips_set_architecture (const struct mips_cpu_info *info)
5022 mips_arch_info = info;
5023 mips_arch = info->cpu;
5024 mips_isa = info->isa;
5029 /* Likewise for tuning. */
5032 mips_set_tune (const struct mips_cpu_info *info)
5036 mips_tune_info = info;
5037 mips_tune = info->cpu;
5041 /* Implement TARGET_HANDLE_OPTION. */
5044 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5049 if (strcmp (arg, "32") == 0)
5051 else if (strcmp (arg, "o64") == 0)
5053 else if (strcmp (arg, "n32") == 0)
5055 else if (strcmp (arg, "64") == 0)
5057 else if (strcmp (arg, "eabi") == 0)
5058 mips_abi = ABI_EABI;
5065 return mips_parse_cpu (arg) != 0;
5068 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5069 return mips_isa_info != 0;
5071 case OPT_mno_flush_func:
5072 mips_cache_flush_func = NULL;
5075 case OPT_mcode_readable_:
5076 if (strcmp (arg, "yes") == 0)
5077 mips_code_readable = CODE_READABLE_YES;
5078 else if (strcmp (arg, "pcrel") == 0)
5079 mips_code_readable = CODE_READABLE_PCREL;
5080 else if (strcmp (arg, "no") == 0)
5081 mips_code_readable = CODE_READABLE_NO;
5091 /* Set up the threshold for data to go into the small data area, instead
5092 of the normal data area, and detect any conflicts in the switches. */
5095 override_options (void)
5097 int i, start, regno;
5098 enum machine_mode mode;
5100 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5101 SUBTARGET_OVERRIDE_OPTIONS;
5104 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5106 /* The following code determines the architecture and register size.
5107 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5108 The GAS and GCC code should be kept in sync as much as possible. */
5110 if (mips_arch_string != 0)
5111 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5113 if (mips_isa_info != 0)
5115 if (mips_arch_info == 0)
5116 mips_set_architecture (mips_isa_info);
5117 else if (mips_arch_info->isa != mips_isa_info->isa)
5118 error ("-%s conflicts with the other architecture options, "
5119 "which specify a %s processor",
5120 mips_isa_info->name,
5121 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5124 if (mips_arch_info == 0)
5126 #ifdef MIPS_CPU_STRING_DEFAULT
5127 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5129 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5133 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5134 error ("-march=%s is not compatible with the selected ABI",
5135 mips_arch_info->name);
5137 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5138 if (mips_tune_string != 0)
5139 mips_set_tune (mips_parse_cpu (mips_tune_string));
5141 if (mips_tune_info == 0)
5142 mips_set_tune (mips_arch_info);
5144 /* Set cost structure for the processor. */
5146 mips_cost = &mips_rtx_cost_optimize_size;
5148 mips_cost = &mips_rtx_cost_data[mips_tune];
5150 /* If the user hasn't specified a branch cost, use the processor's
5152 if (mips_branch_cost == 0)
5153 mips_branch_cost = mips_cost->branch_cost;
5155 if ((target_flags_explicit & MASK_64BIT) != 0)
5157 /* The user specified the size of the integer registers. Make sure
5158 it agrees with the ABI and ISA. */
5159 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5160 error ("-mgp64 used with a 32-bit processor");
5161 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5162 error ("-mgp32 used with a 64-bit ABI");
5163 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5164 error ("-mgp64 used with a 32-bit ABI");
5168 /* Infer the integer register size from the ABI and processor.
5169 Restrict ourselves to 32-bit registers if that's all the
5170 processor has, or if the ABI cannot handle 64-bit registers. */
5171 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5172 target_flags &= ~MASK_64BIT;
5174 target_flags |= MASK_64BIT;
5177 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5179 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5180 only one right answer here. */
5181 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5182 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5183 else if (!TARGET_64BIT && TARGET_FLOAT64
5184 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5185 error ("-mgp32 and -mfp64 can only be combined if the target"
5186 " supports the mfhc1 and mthc1 instructions");
5187 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5188 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5192 /* -msingle-float selects 32-bit float registers. Otherwise the
5193 float registers should be the same size as the integer ones. */
5194 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5195 target_flags |= MASK_FLOAT64;
5197 target_flags &= ~MASK_FLOAT64;
5200 /* End of code shared with GAS. */
5202 if ((target_flags_explicit & MASK_LONG64) == 0)
5204 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5205 target_flags |= MASK_LONG64;
5207 target_flags &= ~MASK_LONG64;
5210 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5211 && (target_flags_explicit & MASK_SOFT_FLOAT_ABI) == 0)
5213 /* For some configurations, it is useful to have -march control
5214 the default setting of MASK_SOFT_FLOAT_ABI. */
5215 switch ((int) mips_arch)
5217 case PROCESSOR_R4100:
5218 case PROCESSOR_R4111:
5219 case PROCESSOR_R4120:
5220 case PROCESSOR_R4130:
5221 target_flags |= MASK_SOFT_FLOAT_ABI;
5225 target_flags &= ~MASK_SOFT_FLOAT_ABI;
5231 flag_pcc_struct_return = 0;
5233 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5235 /* If neither -mbranch-likely nor -mno-branch-likely was given
5236 on the command line, set MASK_BRANCHLIKELY based on the target
5239 By default, we enable use of Branch Likely instructions on
5240 all architectures which support them with the following
5241 exceptions: when creating MIPS32 or MIPS64 code, and when
5242 tuning for architectures where their use tends to hurt
5245 The MIPS32 and MIPS64 architecture specifications say "Software
5246 is strongly encouraged to avoid use of Branch Likely
5247 instructions, as they will be removed from a future revision
5248 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5249 issue those instructions unless instructed to do so by
5251 if (ISA_HAS_BRANCHLIKELY
5252 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5253 && !(TUNE_MIPS5500 || TUNE_SB1))
5254 target_flags |= MASK_BRANCHLIKELY;
5256 target_flags &= ~MASK_BRANCHLIKELY;
5258 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5259 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5261 /* The effect of -mabicalls isn't defined for the EABI. */
5262 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5264 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5265 target_flags &= ~MASK_ABICALLS;
5268 if (TARGET_ABICALLS)
5270 /* We need to set flag_pic for executables as well as DSOs
5271 because we may reference symbols that are not defined in
5272 the final executable. (MIPS does not use things like
5273 copy relocs, for example.)
5275 Also, there is a body of code that uses __PIC__ to distinguish
5276 between -mabicalls and -mno-abicalls code. */
5278 if (mips_section_threshold > 0)
5279 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5282 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5283 warning (0, "-G and -mrtp are incompatible");
5285 /* mips_split_addresses is a half-way house between explicit
5286 relocations and the traditional assembler macros. It can
5287 split absolute 32-bit symbolic constants into a high/lo_sum
5288 pair but uses macros for other sorts of access.
5290 Like explicit relocation support for REL targets, it relies
5291 on GNU extensions in the assembler and the linker.
5293 Although this code should work for -O0, it has traditionally
5294 been treated as an optimization. */
5295 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5296 && optimize && !flag_pic
5297 && !ABI_HAS_64BIT_SYMBOLS)
5298 mips_split_addresses = 1;
5300 mips_split_addresses = 0;
5302 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5303 faster code, but at the expense of more nops. Enable it at -O3 and
5305 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5306 target_flags |= MASK_VR4130_ALIGN;
5310 /* Don't run the scheduler before reload, since it tends to
5311 increase register pressure. */
5312 flag_schedule_insns = 0;
5314 /* Don't do hot/cold partitioning. The constant layout code expects
5315 the whole function to be in a single section. */
5316 flag_reorder_blocks_and_partition = 0;
5318 /* Silently disable -mexplicit-relocs since it doesn't apply
5319 to mips16 code. Even so, it would overly pedantic to warn
5320 about "-mips16 -mexplicit-relocs", especially given that
5321 we use a %gprel() operator. */
5322 target_flags &= ~MASK_EXPLICIT_RELOCS;
5325 /* When using explicit relocs, we call dbr_schedule from within
5327 if (TARGET_EXPLICIT_RELOCS)
5329 mips_flag_delayed_branch = flag_delayed_branch;
5330 flag_delayed_branch = 0;
5333 /* Prefer a call to memcpy over inline code when optimizing for size,
5334 though see MOVE_RATIO in mips.h. */
5335 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5336 target_flags |= MASK_MEMCPY;
5338 #ifdef MIPS_TFMODE_FORMAT
5339 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5342 /* Make sure that the user didn't turn off paired single support when
5343 MIPS-3D support is requested. */
5344 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5345 && !TARGET_PAIRED_SINGLE_FLOAT)
5346 error ("-mips3d requires -mpaired-single");
5348 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5350 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5352 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5353 and TARGET_HARD_FLOAT are both true. */
5354 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5355 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5357 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5359 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5360 error ("-mips3d/-mpaired-single must be used with -mips64");
5362 /* If TARGET_DSPR2, enable MASK_DSP. */
5364 target_flags |= MASK_DSP;
5366 if (TARGET_MIPS16 && TARGET_DSP)
5367 error ("-mips16 and -mdsp cannot be used together");
5369 mips_print_operand_punct['?'] = 1;
5370 mips_print_operand_punct['#'] = 1;
5371 mips_print_operand_punct['/'] = 1;
5372 mips_print_operand_punct['&'] = 1;
5373 mips_print_operand_punct['!'] = 1;
5374 mips_print_operand_punct['*'] = 1;
5375 mips_print_operand_punct['@'] = 1;
5376 mips_print_operand_punct['.'] = 1;
5377 mips_print_operand_punct['('] = 1;
5378 mips_print_operand_punct[')'] = 1;
5379 mips_print_operand_punct['['] = 1;
5380 mips_print_operand_punct[']'] = 1;
5381 mips_print_operand_punct['<'] = 1;
5382 mips_print_operand_punct['>'] = 1;
5383 mips_print_operand_punct['{'] = 1;
5384 mips_print_operand_punct['}'] = 1;
5385 mips_print_operand_punct['^'] = 1;
5386 mips_print_operand_punct['$'] = 1;
5387 mips_print_operand_punct['+'] = 1;
5388 mips_print_operand_punct['~'] = 1;
5390 /* Set up array to map GCC register number to debug register number.
5391 Ignore the special purpose register numbers. */
5393 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5395 mips_dbx_regno[i] = INVALID_REGNUM;
5396 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5397 mips_dwarf_regno[i] = i;
5399 mips_dwarf_regno[i] = INVALID_REGNUM;
5402 start = GP_DBX_FIRST - GP_REG_FIRST;
5403 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5404 mips_dbx_regno[i] = i + start;
5406 start = FP_DBX_FIRST - FP_REG_FIRST;
5407 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5408 mips_dbx_regno[i] = i + start;
5410 /* HI and LO debug registers use big-endian ordering. */
5411 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5412 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5413 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5414 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5415 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5417 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5418 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5421 /* Set up array giving whether a given register can hold a given mode. */
5423 for (mode = VOIDmode;
5424 mode != MAX_MACHINE_MODE;
5425 mode = (enum machine_mode) ((int)mode + 1))
5427 register int size = GET_MODE_SIZE (mode);
5428 register enum mode_class class = GET_MODE_CLASS (mode);
5430 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5434 if (mode == CCV2mode)
5437 && (regno - ST_REG_FIRST) % 2 == 0);
5439 else if (mode == CCV4mode)
5442 && (regno - ST_REG_FIRST) % 4 == 0);
5444 else if (mode == CCmode)
5447 temp = (regno == FPSW_REGNUM);
5449 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5450 || FP_REG_P (regno));
5453 else if (GP_REG_P (regno))
5454 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5456 else if (FP_REG_P (regno))
5457 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5458 || (MIN_FPRS_PER_FMT == 1
5459 && size <= UNITS_PER_FPREG))
5460 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5461 || class == MODE_VECTOR_FLOAT)
5462 && size <= UNITS_PER_FPVALUE)
5463 /* Allow integer modes that fit into a single
5464 register. We need to put integers into FPRs
5465 when using instructions like cvt and trunc.
5466 We can't allow sizes smaller than a word,
5467 the FPU has no appropriate load/store
5468 instructions for those. */
5469 || (class == MODE_INT
5470 && size >= MIN_UNITS_PER_WORD
5471 && size <= UNITS_PER_FPREG)
5472 /* Allow TFmode for CCmode reloads. */
5473 || (ISA_HAS_8CC && mode == TFmode)));
5475 else if (ACC_REG_P (regno))
5476 temp = (INTEGRAL_MODE_P (mode)
5477 && size <= UNITS_PER_WORD * 2
5478 && (size <= UNITS_PER_WORD
5479 || regno == MD_REG_FIRST
5480 || (DSP_ACC_REG_P (regno)
5481 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5483 else if (ALL_COP_REG_P (regno))
5484 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5488 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5492 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5493 initialized yet, so we can't use that here. */
5494 gpr_mode = TARGET_64BIT ? DImode : SImode;
5496 /* Provide default values for align_* for 64-bit targets. */
5497 if (TARGET_64BIT && !TARGET_MIPS16)
5499 if (align_loops == 0)
5501 if (align_jumps == 0)
5503 if (align_functions == 0)
5504 align_functions = 8;
5507 /* Function to allocate machine-dependent function status. */
5508 init_machine_status = &mips_init_machine_status;
5510 if (ABI_HAS_64BIT_SYMBOLS)
5512 if (TARGET_EXPLICIT_RELOCS)
5514 mips_split_p[SYMBOL_64_HIGH] = true;
5515 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5516 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5518 mips_split_p[SYMBOL_64_MID] = true;
5519 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5520 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5522 mips_split_p[SYMBOL_64_LOW] = true;
5523 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5524 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5526 mips_split_p[SYMBOL_ABSOLUTE] = true;
5527 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5532 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5534 mips_split_p[SYMBOL_ABSOLUTE] = true;
5535 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5536 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5538 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5544 /* The high part is provided by a pseudo copy of $gp. */
5545 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5546 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5549 if (TARGET_EXPLICIT_RELOCS)
5551 /* Small data constants are kept whole until after reload,
5552 then lowered by mips_rewrite_small_data. */
5553 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5555 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5558 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5559 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5563 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5564 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5569 /* The HIGH and LO_SUM are matched by special .md patterns. */
5570 mips_split_p[SYMBOL_GOT_DISP] = true;
5572 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5573 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5574 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5576 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5577 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5578 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5583 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5585 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5586 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5592 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5593 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5594 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5597 /* Thread-local relocation operators. */
5598 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5599 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5600 mips_split_p[SYMBOL_DTPREL] = 1;
5601 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5602 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5603 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5604 mips_split_p[SYMBOL_TPREL] = 1;
5605 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5606 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5608 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5610 /* Default to working around R4000 errata only if the processor
5611 was selected explicitly. */
5612 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5613 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5614 target_flags |= MASK_FIX_R4000;
5616 /* Default to working around R4400 errata only if the processor
5617 was selected explicitly. */
5618 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5619 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5620 target_flags |= MASK_FIX_R4400;
5623 /* Swap the register information for registers I and I + 1, which
5624 currently have the wrong endianness. Note that the registers'
5625 fixedness and call-clobberedness might have been set on the
5629 mips_swap_registers (unsigned int i)
5634 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5635 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5637 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5638 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5639 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5640 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5646 /* Implement CONDITIONAL_REGISTER_USAGE. */
5649 mips_conditional_register_usage (void)
5655 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5656 fixed_regs[regno] = call_used_regs[regno] = 1;
5658 if (!TARGET_HARD_FLOAT)
5662 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5663 fixed_regs[regno] = call_used_regs[regno] = 1;
5664 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5665 fixed_regs[regno] = call_used_regs[regno] = 1;
5667 else if (! ISA_HAS_8CC)
5671 /* We only have a single condition code register. We
5672 implement this by hiding all the condition code registers,
5673 and generating RTL that refers directly to ST_REG_FIRST. */
5674 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5675 fixed_regs[regno] = call_used_regs[regno] = 1;
5677 /* In mips16 mode, we permit the $t temporary registers to be used
5678 for reload. We prohibit the unused $s registers, since they
5679 are caller saved, and saving them via a mips16 register would
5680 probably waste more time than just reloading the value. */
5683 fixed_regs[18] = call_used_regs[18] = 1;
5684 fixed_regs[19] = call_used_regs[19] = 1;
5685 fixed_regs[20] = call_used_regs[20] = 1;
5686 fixed_regs[21] = call_used_regs[21] = 1;
5687 fixed_regs[22] = call_used_regs[22] = 1;
5688 fixed_regs[23] = call_used_regs[23] = 1;
5689 fixed_regs[26] = call_used_regs[26] = 1;
5690 fixed_regs[27] = call_used_regs[27] = 1;
5691 fixed_regs[30] = call_used_regs[30] = 1;
5693 /* fp20-23 are now caller saved. */
5694 if (mips_abi == ABI_64)
5697 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5698 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5700 /* Odd registers from fp21 to fp31 are now caller saved. */
5701 if (mips_abi == ABI_N32)
5704 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5705 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5707 /* Make sure that double-register accumulator values are correctly
5708 ordered for the current endianness. */
5709 if (TARGET_LITTLE_ENDIAN)
5712 mips_swap_registers (MD_REG_FIRST);
5713 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5714 mips_swap_registers (regno);
5718 /* Allocate a chunk of memory for per-function machine-dependent data. */
5719 static struct machine_function *
5720 mips_init_machine_status (void)
5722 return ((struct machine_function *)
5723 ggc_alloc_cleared (sizeof (struct machine_function)));
5726 /* On the mips16, we want to allocate $24 (T_REG) before other
5727 registers for instructions for which it is possible. This helps
5728 avoid shuffling registers around in order to set up for an xor,
5729 encouraging the compiler to use a cmp instead. */
5732 mips_order_regs_for_local_alloc (void)
5736 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5737 reg_alloc_order[i] = i;
5741 /* It really doesn't matter where we put register 0, since it is
5742 a fixed register anyhow. */
5743 reg_alloc_order[0] = 24;
5744 reg_alloc_order[24] = 0;
5749 /* The MIPS debug format wants all automatic variables and arguments
5750 to be in terms of the virtual frame pointer (stack pointer before
5751 any adjustment in the function), while the MIPS 3.0 linker wants
5752 the frame pointer to be the stack pointer after the initial
5753 adjustment. So, we do the adjustment here. The arg pointer (which
5754 is eliminated) points to the virtual frame pointer, while the frame
5755 pointer (which may be eliminated) points to the stack pointer after
5756 the initial adjustments. */
5759 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5761 rtx offset2 = const0_rtx;
5762 rtx reg = eliminate_constant_term (addr, &offset2);
5765 offset = INTVAL (offset2);
5767 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5768 || reg == hard_frame_pointer_rtx)
5770 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5771 ? compute_frame_size (get_frame_size ())
5772 : cfun->machine->frame.total_size;
5774 /* MIPS16 frame is smaller */
5775 if (frame_pointer_needed && TARGET_MIPS16)
5776 frame_size -= cfun->machine->frame.args_size;
5778 offset = offset - frame_size;
5781 /* sdbout_parms does not want this to crash for unrecognized cases. */
5783 else if (reg != arg_pointer_rtx)
5784 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5791 /* If OP is an UNSPEC address, return the address to which it refers,
5792 otherwise return OP itself. */
5795 mips_strip_unspec_address (rtx op)
5799 split_const (op, &base, &offset);
5800 if (UNSPEC_ADDRESS_P (base))
5801 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
5805 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5807 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5808 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5809 'h' OP is HIGH, prints %hi(X),
5810 'd' output integer constant in decimal,
5811 'z' if the operand is 0, use $0 instead of normal operand.
5812 'D' print second part of double-word register or memory operand.
5813 'L' print low-order register of double-word register operand.
5814 'M' print high-order register of double-word register operand.
5815 'C' print part of opcode for a branch condition.
5816 'F' print part of opcode for a floating-point branch condition.
5817 'N' print part of opcode for a branch condition, inverted.
5818 'W' print part of opcode for a floating-point branch condition, inverted.
5819 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5820 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5821 't' like 'T', but with the EQ/NE cases reversed
5822 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5823 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5824 'R' print the reloc associated with LO_SUM
5825 'q' print DSP accumulator registers
5827 The punctuation characters are:
5829 '(' Turn on .set noreorder
5830 ')' Turn on .set reorder
5831 '[' Turn on .set noat
5833 '<' Turn on .set nomacro
5834 '>' Turn on .set macro
5835 '{' Turn on .set volatile (not GAS)
5836 '}' Turn on .set novolatile (not GAS)
5837 '&' Turn on .set noreorder if filling delay slots
5838 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5839 '!' Turn on .set nomacro if filling delay slots
5840 '#' Print nop if in a .set noreorder section.
5841 '/' Like '#', but does nothing within a delayed branch sequence
5842 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5843 '@' Print the name of the assembler temporary register (at or $1).
5844 '.' Print the name of the register with a hard-wired zero (zero or $0).
5845 '^' Print the name of the pic call-through register (t9 or $25).
5846 '$' Print the name of the stack pointer register (sp or $29).
5847 '+' Print the name of the gp register (usually gp or $28).
5848 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5851 print_operand (FILE *file, rtx op, int letter)
5853 register enum rtx_code code;
5855 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5860 if (mips_branch_likely)
5865 fputs (reg_names [GP_REG_FIRST + 1], file);
5869 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5873 fputs (reg_names [GP_REG_FIRST + 0], file);
5877 fputs (reg_names[STACK_POINTER_REGNUM], file);
5881 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5885 if (final_sequence != 0 && set_noreorder++ == 0)
5886 fputs (".set\tnoreorder\n\t", file);
5890 if (final_sequence != 0)
5892 if (set_noreorder++ == 0)
5893 fputs (".set\tnoreorder\n\t", file);
5895 if (set_nomacro++ == 0)
5896 fputs (".set\tnomacro\n\t", file);
5901 if (final_sequence != 0 && set_nomacro++ == 0)
5902 fputs ("\n\t.set\tnomacro", file);
5906 if (set_noreorder != 0)
5907 fputs ("\n\tnop", file);
5911 /* Print an extra newline so that the delayed insn is separated
5912 from the following ones. This looks neater and is consistent
5913 with non-nop delayed sequences. */
5914 if (set_noreorder != 0 && final_sequence == 0)
5915 fputs ("\n\tnop\n", file);
5919 if (set_noreorder++ == 0)
5920 fputs (".set\tnoreorder\n\t", file);
5924 if (set_noreorder == 0)
5925 error ("internal error: %%) found without a %%( in assembler pattern");
5927 else if (--set_noreorder == 0)
5928 fputs ("\n\t.set\treorder", file);
5933 if (set_noat++ == 0)
5934 fputs (".set\tnoat\n\t", file);
5939 error ("internal error: %%] found without a %%[ in assembler pattern");
5940 else if (--set_noat == 0)
5941 fputs ("\n\t.set\tat", file);
5946 if (set_nomacro++ == 0)
5947 fputs (".set\tnomacro\n\t", file);
5951 if (set_nomacro == 0)
5952 error ("internal error: %%> found without a %%< in assembler pattern");
5953 else if (--set_nomacro == 0)
5954 fputs ("\n\t.set\tmacro", file);
5959 if (set_volatile++ == 0)
5960 fputs ("#.set\tvolatile\n\t", file);
5964 if (set_volatile == 0)
5965 error ("internal error: %%} found without a %%{ in assembler pattern");
5966 else if (--set_volatile == 0)
5967 fputs ("\n\t#.set\tnovolatile", file);
5973 if (align_labels_log > 0)
5974 ASM_OUTPUT_ALIGN (file, align_labels_log);
5979 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5988 error ("PRINT_OPERAND null pointer");
5992 code = GET_CODE (op);
5997 case EQ: fputs ("eq", file); break;
5998 case NE: fputs ("ne", file); break;
5999 case GT: fputs ("gt", file); break;
6000 case GE: fputs ("ge", file); break;
6001 case LT: fputs ("lt", file); break;
6002 case LE: fputs ("le", file); break;
6003 case GTU: fputs ("gtu", file); break;
6004 case GEU: fputs ("geu", file); break;
6005 case LTU: fputs ("ltu", file); break;
6006 case LEU: fputs ("leu", file); break;
6008 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6011 else if (letter == 'N')
6014 case EQ: fputs ("ne", file); break;
6015 case NE: fputs ("eq", file); break;
6016 case GT: fputs ("le", file); break;
6017 case GE: fputs ("lt", file); break;
6018 case LT: fputs ("ge", file); break;
6019 case LE: fputs ("gt", file); break;
6020 case GTU: fputs ("leu", file); break;
6021 case GEU: fputs ("ltu", file); break;
6022 case LTU: fputs ("geu", file); break;
6023 case LEU: fputs ("gtu", file); break;
6025 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6028 else if (letter == 'F')
6031 case EQ: fputs ("c1f", file); break;
6032 case NE: fputs ("c1t", file); break;
6034 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6037 else if (letter == 'W')
6040 case EQ: fputs ("c1t", file); break;
6041 case NE: fputs ("c1f", file); break;
6043 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6046 else if (letter == 'h')
6048 if (GET_CODE (op) == HIGH)
6051 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6054 else if (letter == 'R')
6055 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6057 else if (letter == 'Y')
6059 if (GET_CODE (op) == CONST_INT
6060 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6061 < ARRAY_SIZE (mips_fp_conditions)))
6062 fputs (mips_fp_conditions[INTVAL (op)], file);
6064 output_operand_lossage ("invalid %%Y value");
6067 else if (letter == 'Z')
6071 print_operand (file, op, 0);
6076 else if (letter == 'q')
6081 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6083 regnum = REGNO (op);
6084 if (MD_REG_P (regnum))
6085 fprintf (file, "$ac0");
6086 else if (DSP_ACC_REG_P (regnum))
6087 fprintf (file, "$ac%c", reg_names[regnum][3]);
6089 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6092 else if (code == REG || code == SUBREG)
6094 register int regnum;
6097 regnum = REGNO (op);
6099 regnum = true_regnum (op);
6101 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6102 || (letter == 'L' && WORDS_BIG_ENDIAN)
6106 fprintf (file, "%s", reg_names[regnum]);
6109 else if (code == MEM)
6112 output_address (plus_constant (XEXP (op, 0), 4));
6114 output_address (XEXP (op, 0));
6117 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6118 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6120 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6121 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6123 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6126 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6127 fputs (reg_names[GP_REG_FIRST], file);
6129 else if (letter == 'd' || letter == 'x' || letter == 'X')
6130 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6132 else if (letter == 'T' || letter == 't')
6134 int truth = (code == NE) == (letter == 'T');
6135 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6138 else if (CONST_GP_P (op))
6139 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6142 output_addr_const (file, mips_strip_unspec_address (op));
6146 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6147 in context CONTEXT. RELOCS is the array of relocations to use. */
6150 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6151 const char **relocs)
6153 enum mips_symbol_type symbol_type;
6156 if (!mips_symbolic_constant_p (op, context, &symbol_type)
6157 || relocs[symbol_type] == 0)
6158 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6160 fputs (relocs[symbol_type], file);
6161 output_addr_const (file, mips_strip_unspec_address (op));
6162 for (p = relocs[symbol_type]; *p != 0; p++)
6167 /* Output address operand X to FILE. */
6170 print_operand_address (FILE *file, rtx x)
6172 struct mips_address_info addr;
6174 if (mips_classify_address (&addr, x, word_mode, true))
6178 print_operand (file, addr.offset, 0);
6179 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6182 case ADDRESS_LO_SUM:
6183 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6185 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6188 case ADDRESS_CONST_INT:
6189 output_addr_const (file, x);
6190 fprintf (file, "(%s)", reg_names[0]);
6193 case ADDRESS_SYMBOLIC:
6194 output_addr_const (file, mips_strip_unspec_address (x));
6200 /* When using assembler macros, keep track of all of small-data externs
6201 so that mips_file_end can emit the appropriate declarations for them.
6203 In most cases it would be safe (though pointless) to emit .externs
6204 for other symbols too. One exception is when an object is within
6205 the -G limit but declared by the user to be in a section other
6206 than .sbss or .sdata. */
6209 mips_output_external (FILE *file, tree decl, const char *name)
6211 default_elf_asm_output_external (file, decl, name);
6213 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6214 set in order to avoid putting out names that are never really
6216 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6218 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6220 fputs ("\t.extern\t", file);
6221 assemble_name (file, name);
6222 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6223 int_size_in_bytes (TREE_TYPE (decl)));
6225 else if (TARGET_IRIX
6226 && mips_abi == ABI_32
6227 && TREE_CODE (decl) == FUNCTION_DECL)
6229 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6230 `.global name .text' directive for every used but
6231 undefined function. If we don't, the linker may perform
6232 an optimization (skipping over the insns that set $gp)
6233 when it is unsafe. */
6234 fputs ("\t.globl ", file);
6235 assemble_name (file, name);
6236 fputs (" .text\n", file);
6241 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6242 put out a MIPS ECOFF file and a stab. */
6245 mips_output_filename (FILE *stream, const char *name)
6248 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6250 if (write_symbols == DWARF2_DEBUG)
6252 else if (mips_output_filename_first_time)
6254 mips_output_filename_first_time = 0;
6255 num_source_filenames += 1;
6256 current_function_file = name;
6257 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6258 output_quoted_string (stream, name);
6259 putc ('\n', stream);
6262 /* If we are emitting stabs, let dbxout.c handle this (except for
6263 the mips_output_filename_first_time case). */
6264 else if (write_symbols == DBX_DEBUG)
6267 else if (name != current_function_file
6268 && strcmp (name, current_function_file) != 0)
6270 num_source_filenames += 1;
6271 current_function_file = name;
6272 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6273 output_quoted_string (stream, name);
6274 putc ('\n', stream);
6278 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6279 that should be written before the opening quote, such as "\t.ascii\t"
6280 for real string data or "\t# " for a comment. */
6283 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6288 register const unsigned char *string =
6289 (const unsigned char *)string_param;
6291 fprintf (stream, "%s\"", prefix);
6292 for (i = 0; i < len; i++)
6294 register int c = string[i];
6298 if (c == '\\' || c == '\"')
6300 putc ('\\', stream);
6308 fprintf (stream, "\\%03o", c);
6312 if (cur_pos > 72 && i+1 < len)
6315 fprintf (stream, "\"\n%s\"", prefix);
6318 fprintf (stream, "\"\n");
6321 /* Implement TARGET_ASM_FILE_START. */
6324 mips_file_start (void)
6326 default_file_start ();
6330 /* Generate a special section to describe the ABI switches used to
6331 produce the resultant binary. This used to be done by the assembler
6332 setting bits in the ELF header's flags field, but we have run out of
6333 bits. GDB needs this information in order to be able to correctly
6334 debug these binaries. See the function mips_gdbarch_init() in
6335 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6336 causes unnecessary IRIX 6 ld warnings. */
6337 const char * abi_string = NULL;
6341 case ABI_32: abi_string = "abi32"; break;
6342 case ABI_N32: abi_string = "abiN32"; break;
6343 case ABI_64: abi_string = "abi64"; break;
6344 case ABI_O64: abi_string = "abiO64"; break;
6345 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6349 /* Note - we use fprintf directly rather than calling switch_to_section
6350 because in this way we can avoid creating an allocated section. We
6351 do not want this section to take up any space in the running
6353 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6355 /* There is no ELF header flag to distinguish long32 forms of the
6356 EABI from long64 forms. Emit a special section to help tools
6357 such as GDB. Do the same for o64, which is sometimes used with
6359 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6360 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6361 TARGET_LONG64 ? 64 : 32);
6363 /* Restore the default section. */
6364 fprintf (asm_out_file, "\t.previous\n");
6366 #ifdef HAVE_AS_GNU_ATTRIBUTE
6367 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6368 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6372 /* Generate the pseudo ops that System V.4 wants. */
6373 if (TARGET_ABICALLS)
6374 fprintf (asm_out_file, "\t.abicalls\n");
6377 fprintf (asm_out_file, "\t.set\tmips16\n");
6379 if (flag_verbose_asm)
6380 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6382 mips_section_threshold, mips_arch_info->name, mips_isa);
6385 #ifdef BSS_SECTION_ASM_OP
6386 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6387 in the use of sbss. */
6390 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6391 unsigned HOST_WIDE_INT size, int align)
6393 extern tree last_assemble_variable_decl;
6395 if (mips_in_small_data_p (decl))
6396 switch_to_section (get_named_section (NULL, ".sbss", 0));
6398 switch_to_section (bss_section);
6399 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6400 last_assemble_variable_decl = decl;
6401 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6402 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6406 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6407 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6410 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6411 unsigned HOST_WIDE_INT size,
6414 /* If the target wants uninitialized const declarations in
6415 .rdata then don't put them in .comm. */
6416 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6417 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6418 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6420 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6421 targetm.asm_out.globalize_label (stream, name);
6423 switch_to_section (readonly_data_section);
6424 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6425 mips_declare_object (stream, name, "",
6426 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6430 mips_declare_common_object (stream, name, "\n\t.comm\t",
6434 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6435 NAME is the name of the object and ALIGN is the required alignment
6436 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6437 alignment argument. */
6440 mips_declare_common_object (FILE *stream, const char *name,
6441 const char *init_string,
6442 unsigned HOST_WIDE_INT size,
6443 unsigned int align, bool takes_alignment_p)
6445 if (!takes_alignment_p)
6447 size += (align / BITS_PER_UNIT) - 1;
6448 size -= size % (align / BITS_PER_UNIT);
6449 mips_declare_object (stream, name, init_string,
6450 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6453 mips_declare_object (stream, name, init_string,
6454 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6455 size, align / BITS_PER_UNIT);
6458 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6459 macros, mark the symbol as written so that mips_file_end won't emit an
6460 .extern for it. STREAM is the output file, NAME is the name of the
6461 symbol, INIT_STRING is the string that should be written before the
6462 symbol and FINAL_STRING is the string that should be written after it.
6463 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6466 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6467 const char *final_string, ...)
6471 fputs (init_string, stream);
6472 assemble_name (stream, name);
6473 va_start (ap, final_string);
6474 vfprintf (stream, final_string, ap);
6477 if (!TARGET_EXPLICIT_RELOCS)
6479 tree name_tree = get_identifier (name);
6480 TREE_ASM_WRITTEN (name_tree) = 1;
6484 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6485 extern int size_directive_output;
6487 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6488 definitions except that it uses mips_declare_object() to emit the label. */
6491 mips_declare_object_name (FILE *stream, const char *name,
6492 tree decl ATTRIBUTE_UNUSED)
6494 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6495 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6498 size_directive_output = 0;
6499 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6503 size_directive_output = 1;
6504 size = int_size_in_bytes (TREE_TYPE (decl));
6505 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6508 mips_declare_object (stream, name, "", ":\n");
6511 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6514 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6518 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6519 if (!flag_inhibit_size_directive
6520 && DECL_SIZE (decl) != 0
6521 && !at_end && top_level
6522 && DECL_INITIAL (decl) == error_mark_node
6523 && !size_directive_output)
6527 size_directive_output = 1;
6528 size = int_size_in_bytes (TREE_TYPE (decl));
6529 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6534 /* Return true if X in context CONTEXT is a small data address that can
6535 be rewritten as a LO_SUM. */
6538 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
6540 enum mips_symbol_type symbol_type;
6542 return (TARGET_EXPLICIT_RELOCS
6543 && mips_symbolic_constant_p (x, context, &symbol_type)
6544 && symbol_type == SYMBOL_GP_RELATIVE);
6548 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6549 containing MEM, or null if none. */
6552 mips_small_data_pattern_1 (rtx *loc, void *data)
6554 enum mips_symbol_context context;
6556 if (GET_CODE (*loc) == LO_SUM)
6561 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
6566 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6567 return mips_rewrite_small_data_p (*loc, context);
6570 /* Return true if OP refers to small data symbols directly, not through
6574 mips_small_data_pattern_p (rtx op)
6576 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6579 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6580 DATA is the containing MEM, or null if none. */
6583 mips_rewrite_small_data_1 (rtx *loc, void *data)
6585 enum mips_symbol_context context;
6589 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
6593 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6594 if (mips_rewrite_small_data_p (*loc, context))
6595 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6597 if (GET_CODE (*loc) == LO_SUM)
6603 /* If possible, rewrite OP so that it refers to small data using
6604 explicit relocations. */
6607 mips_rewrite_small_data (rtx op)
6609 op = copy_insn (op);
6610 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6614 /* Return true if the current function has an insn that implicitly
6618 mips_function_has_gp_insn (void)
6620 /* Don't bother rechecking if we found one last time. */
6621 if (!cfun->machine->has_gp_insn_p)
6625 push_topmost_sequence ();
6626 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6628 && GET_CODE (PATTERN (insn)) != USE
6629 && GET_CODE (PATTERN (insn)) != CLOBBER
6630 && (get_attr_got (insn) != GOT_UNSET
6631 || small_data_pattern (PATTERN (insn), VOIDmode)))
6633 pop_topmost_sequence ();
6635 cfun->machine->has_gp_insn_p = (insn != 0);
6637 return cfun->machine->has_gp_insn_p;
6641 /* Return the register that should be used as the global pointer
6642 within this function. Return 0 if the function doesn't need
6643 a global pointer. */
6646 mips_global_pointer (void)
6650 /* $gp is always available unless we're using a GOT. */
6651 if (!TARGET_USE_GOT)
6652 return GLOBAL_POINTER_REGNUM;
6654 /* We must always provide $gp when it is used implicitly. */
6655 if (!TARGET_EXPLICIT_RELOCS)
6656 return GLOBAL_POINTER_REGNUM;
6658 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6660 if (current_function_profile)
6661 return GLOBAL_POINTER_REGNUM;
6663 /* If the function has a nonlocal goto, $gp must hold the correct
6664 global pointer for the target function. */
6665 if (current_function_has_nonlocal_goto)
6666 return GLOBAL_POINTER_REGNUM;
6668 /* If the gp is never referenced, there's no need to initialize it.
6669 Note that reload can sometimes introduce constant pool references
6670 into a function that otherwise didn't need them. For example,
6671 suppose we have an instruction like:
6673 (set (reg:DF R1) (float:DF (reg:SI R2)))
6675 If R2 turns out to be constant such as 1, the instruction may have a
6676 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6677 using this constant if R2 doesn't get allocated to a register.
6679 In cases like these, reload will have added the constant to the pool
6680 but no instruction will yet refer to it. */
6681 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6682 && !current_function_uses_const_pool
6683 && !mips_function_has_gp_insn ())
6686 /* We need a global pointer, but perhaps we can use a call-clobbered
6687 register instead of $gp. */
6688 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6689 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6690 if (!df_regs_ever_live_p (regno)
6691 && call_used_regs[regno]
6692 && !fixed_regs[regno]
6693 && regno != PIC_FUNCTION_ADDR_REGNUM)
6696 return GLOBAL_POINTER_REGNUM;
6700 /* Return true if the function return value MODE will get returned in a
6701 floating-point register. */
6704 mips_return_mode_in_fpr_p (enum machine_mode mode)
6706 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6707 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6708 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6709 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6712 /* Return a two-character string representing a function floating-point
6713 return mode, used to name MIPS16 function stubs. */
6716 mips16_call_stub_mode_suffix (enum machine_mode mode)
6720 else if (mode == DFmode)
6722 else if (mode == SCmode)
6724 else if (mode == DCmode)
6726 else if (mode == V2SFmode)
6732 /* Return true if the current function returns its value in a floating-point
6733 register in MIPS16 mode. */
6736 mips16_cfun_returns_in_fpr_p (void)
6738 tree return_type = DECL_RESULT (current_function_decl);
6739 return (TARGET_MIPS16
6740 && TARGET_HARD_FLOAT_ABI
6741 && !aggregate_value_p (return_type, current_function_decl)
6742 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6746 /* Return true if the current function must save REGNO. */
6749 mips_save_reg_p (unsigned int regno)
6751 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6752 if we have not chosen a call-clobbered substitute. */
6753 if (regno == GLOBAL_POINTER_REGNUM)
6754 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6756 /* Check call-saved registers. */
6757 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6760 /* Save both registers in an FPR pair if either one is used. This is
6761 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6762 register to be used without the even register. */
6763 if (FP_REG_P (regno)
6764 && MAX_FPRS_PER_FMT == 2
6765 && df_regs_ever_live_p (regno + 1)
6766 && !call_used_regs[regno + 1])
6769 /* We need to save the old frame pointer before setting up a new one. */
6770 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6773 /* We need to save the incoming return address if it is ever clobbered
6774 within the function. */
6775 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6780 /* $18 is a special case in mips16 code. It may be used to call
6781 a function which returns a floating point value, but it is
6782 marked in call_used_regs. */
6783 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6786 /* $31 is also a special case. It will be used to copy a return
6787 value into the floating point registers if the return value is
6789 if (regno == GP_REG_FIRST + 31
6790 && mips16_cfun_returns_in_fpr_p ())
6797 /* Return the index of the lowest X in the range [0, SIZE) for which
6798 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6801 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6806 for (i = 0; i < size; i++)
6807 if (BITSET_P (mask, regs[i]))
6813 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6814 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6815 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6816 the same is true for all indexes (X, SIZE). */
6819 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6820 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6824 i = mips16e_find_first_register (*mask_ptr, regs, size);
6825 for (i++; i < size; i++)
6826 if (!BITSET_P (*mask_ptr, regs[i]))
6828 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6829 *mask_ptr |= 1 << regs[i];
6833 /* Return the bytes needed to compute the frame pointer from the current
6834 stack pointer. SIZE is the size (in bytes) of the local variables.
6836 MIPS stack frames look like:
6838 Before call After call
6839 high +-----------------------+ +-----------------------+
6841 | caller's temps. | | caller's temps. |
6843 +-----------------------+ +-----------------------+
6845 | arguments on stack. | | arguments on stack. |
6847 +-----------------------+ +-----------------------+
6848 | 4 words to save | | 4 words to save |
6849 | arguments passed | | arguments passed |
6850 | in registers, even | | in registers, even |
6851 | if not passed. | | if not passed. |
6852 SP->+-----------------------+ VFP->+-----------------------+
6853 (VFP = SP+fp_sp_offset) | |\
6854 | fp register save | | fp_reg_size
6856 SP+gp_sp_offset->+-----------------------+
6858 | | gp register save | | gp_reg_size
6859 gp_reg_rounded | | |/
6860 | +-----------------------+
6861 \| alignment padding |
6862 +-----------------------+
6864 | local variables | | var_size
6866 +-----------------------+
6868 | alloca allocations |
6870 +-----------------------+
6872 cprestore_size | | GP save for V.4 abi |
6874 +-----------------------+
6876 | arguments on stack | |
6878 +-----------------------+ |
6879 | 4 words to save | | args_size
6880 | arguments passed | |
6881 | in registers, even | |
6882 | if not passed. | |
6883 low | (TARGET_OLDABI only) |/
6884 memory SP->+-----------------------+
6889 compute_frame_size (HOST_WIDE_INT size)
6892 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6893 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6894 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6895 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6896 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6897 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6898 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6899 unsigned int mask; /* mask of saved gp registers */
6900 unsigned int fmask; /* mask of saved fp registers */
6902 cfun->machine->global_pointer = mips_global_pointer ();
6908 var_size = MIPS_STACK_ALIGN (size);
6909 args_size = current_function_outgoing_args_size;
6910 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6912 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6913 functions. If the function has local variables, we're committed
6914 to allocating it anyway. Otherwise reclaim it here. */
6915 if (var_size == 0 && current_function_is_leaf)
6916 cprestore_size = args_size = 0;
6918 /* The MIPS 3.0 linker does not like functions that dynamically
6919 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6920 looks like we are trying to create a second frame pointer to the
6921 function, so allocate some stack space to make it happy. */
6923 if (args_size == 0 && current_function_calls_alloca)
6924 args_size = 4 * UNITS_PER_WORD;
6926 total_size = var_size + args_size + cprestore_size;
6928 /* Calculate space needed for gp registers. */
6929 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6930 if (mips_save_reg_p (regno))
6932 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6933 mask |= 1 << (regno - GP_REG_FIRST);
6936 /* We need to restore these for the handler. */
6937 if (current_function_calls_eh_return)
6942 regno = EH_RETURN_DATA_REGNO (i);
6943 if (regno == INVALID_REGNUM)
6945 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6946 mask |= 1 << (regno - GP_REG_FIRST);
6950 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6951 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6952 save all later registers too. */
6953 if (GENERATE_MIPS16E_SAVE_RESTORE)
6955 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6956 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6957 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6958 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6961 /* This loop must iterate over the same space as its companion in
6962 mips_for_each_saved_reg. */
6963 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6964 regno >= FP_REG_FIRST;
6965 regno -= MAX_FPRS_PER_FMT)
6967 if (mips_save_reg_p (regno))
6969 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6970 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6974 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6975 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6977 /* Add in the space required for saving incoming register arguments. */
6978 total_size += current_function_pretend_args_size;
6979 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6981 /* Save other computed information. */
6982 cfun->machine->frame.total_size = total_size;
6983 cfun->machine->frame.var_size = var_size;
6984 cfun->machine->frame.args_size = args_size;
6985 cfun->machine->frame.cprestore_size = cprestore_size;
6986 cfun->machine->frame.gp_reg_size = gp_reg_size;
6987 cfun->machine->frame.fp_reg_size = fp_reg_size;
6988 cfun->machine->frame.mask = mask;
6989 cfun->machine->frame.fmask = fmask;
6990 cfun->machine->frame.initialized = reload_completed;
6991 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6992 cfun->machine->frame.num_fp = (fp_reg_size
6993 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6997 HOST_WIDE_INT offset;
6999 if (GENERATE_MIPS16E_SAVE_RESTORE)
7000 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7001 to be aligned at the high end with any padding at the low end.
7002 It is only safe to use this calculation for o32, where we never
7003 have pretend arguments, and where any varargs will be saved in
7004 the caller-allocated area rather than at the top of the frame. */
7005 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7007 offset = (args_size + cprestore_size + var_size
7008 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7009 cfun->machine->frame.gp_sp_offset = offset;
7010 cfun->machine->frame.gp_save_offset = offset - total_size;
7014 cfun->machine->frame.gp_sp_offset = 0;
7015 cfun->machine->frame.gp_save_offset = 0;
7020 HOST_WIDE_INT offset;
7022 offset = (args_size + cprestore_size + var_size
7023 + gp_reg_rounded + fp_reg_size
7024 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7025 cfun->machine->frame.fp_sp_offset = offset;
7026 cfun->machine->frame.fp_save_offset = offset - total_size;
7030 cfun->machine->frame.fp_sp_offset = 0;
7031 cfun->machine->frame.fp_save_offset = 0;
7034 /* Ok, we're done. */
7038 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7039 pointer or argument pointer. TO is either the stack pointer or
7040 hard frame pointer. */
7043 mips_initial_elimination_offset (int from, int to)
7045 HOST_WIDE_INT offset;
7047 compute_frame_size (get_frame_size ());
7049 /* Set OFFSET to the offset from the stack pointer. */
7052 case FRAME_POINTER_REGNUM:
7056 case ARG_POINTER_REGNUM:
7057 offset = (cfun->machine->frame.total_size
7058 - current_function_pretend_args_size);
7065 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7066 offset -= cfun->machine->frame.args_size;
7071 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7072 back to a previous frame. */
7074 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7079 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7082 /* Use FN to save or restore register REGNO. MODE is the register's
7083 mode and OFFSET is the offset of its save slot from the current
7087 mips_save_restore_reg (enum machine_mode mode, int regno,
7088 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7092 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7094 fn (gen_rtx_REG (mode, regno), mem);
7098 /* Call FN for each register that is saved by the current function.
7099 SP_OFFSET is the offset of the current stack pointer from the start
7103 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7105 enum machine_mode fpr_mode;
7106 HOST_WIDE_INT offset;
7109 /* Save registers starting from high to low. The debuggers prefer at least
7110 the return register be stored at func+4, and also it allows us not to
7111 need a nop in the epilogue if at least one register is reloaded in
7112 addition to return address. */
7113 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7114 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7115 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7117 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7118 offset -= GET_MODE_SIZE (gpr_mode);
7121 /* This loop must iterate over the same space as its companion in
7122 compute_frame_size. */
7123 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7124 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7125 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7126 regno >= FP_REG_FIRST;
7127 regno -= MAX_FPRS_PER_FMT)
7128 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7130 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7131 offset -= GET_MODE_SIZE (fpr_mode);
7135 /* If we're generating n32 or n64 abicalls, and the current function
7136 does not use $28 as its global pointer, emit a cplocal directive.
7137 Use pic_offset_table_rtx as the argument to the directive. */
7140 mips_output_cplocal (void)
7142 if (!TARGET_EXPLICIT_RELOCS
7143 && cfun->machine->global_pointer > 0
7144 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7145 output_asm_insn (".cplocal %+", 0);
7148 /* Return the style of GP load sequence that is being used for the
7149 current function. */
7151 enum mips_loadgp_style
7152 mips_current_loadgp_style (void)
7154 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7160 if (TARGET_ABSOLUTE_ABICALLS)
7161 return LOADGP_ABSOLUTE;
7163 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7166 /* The __gnu_local_gp symbol. */
7168 static GTY(()) rtx mips_gnu_local_gp;
7170 /* If we're generating n32 or n64 abicalls, emit instructions
7171 to set up the global pointer. */
7174 mips_emit_loadgp (void)
7176 rtx addr, offset, incoming_address, base, index;
7178 switch (mips_current_loadgp_style ())
7180 case LOADGP_ABSOLUTE:
7181 if (mips_gnu_local_gp == NULL)
7183 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7184 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7186 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7190 addr = XEXP (DECL_RTL (current_function_decl), 0);
7191 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7192 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7193 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7194 if (!TARGET_EXPLICIT_RELOCS)
7195 emit_insn (gen_loadgp_blockage ());
7199 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7200 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7201 emit_insn (gen_loadgp_rtp (base, index));
7202 if (!TARGET_EXPLICIT_RELOCS)
7203 emit_insn (gen_loadgp_blockage ());
7211 /* Set up the stack and frame (if desired) for the function. */
7214 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7217 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7219 #ifdef SDB_DEBUGGING_INFO
7220 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7221 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7224 /* In mips16 mode, we may need to generate a 32 bit to handle
7225 floating point arguments. The linker will arrange for any 32-bit
7226 functions to call this stub, which will then jump to the 16-bit
7229 && TARGET_HARD_FLOAT_ABI
7230 && current_function_args_info.fp_code != 0)
7231 build_mips16_function_stub (file);
7233 if (!FUNCTION_NAME_ALREADY_DECLARED)
7235 /* Get the function name the same way that toplev.c does before calling
7236 assemble_start_function. This is needed so that the name used here
7237 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7238 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7240 if (!flag_inhibit_size_directive)
7242 fputs ("\t.ent\t", file);
7243 assemble_name (file, fnname);
7247 assemble_name (file, fnname);
7248 fputs (":\n", file);
7251 /* Stop mips_file_end from treating this function as external. */
7252 if (TARGET_IRIX && mips_abi == ABI_32)
7253 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7255 if (!flag_inhibit_size_directive)
7257 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7259 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7260 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7261 ", args= " HOST_WIDE_INT_PRINT_DEC
7262 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7263 (reg_names[(frame_pointer_needed)
7264 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7265 ((frame_pointer_needed && TARGET_MIPS16)
7266 ? tsize - cfun->machine->frame.args_size
7268 reg_names[GP_REG_FIRST + 31],
7269 cfun->machine->frame.var_size,
7270 cfun->machine->frame.num_gp,
7271 cfun->machine->frame.num_fp,
7272 cfun->machine->frame.args_size,
7273 cfun->machine->frame.cprestore_size);
7275 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7276 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7277 cfun->machine->frame.mask,
7278 cfun->machine->frame.gp_save_offset);
7279 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7280 cfun->machine->frame.fmask,
7281 cfun->machine->frame.fp_save_offset);
7284 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7285 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7288 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7290 /* Handle the initialization of $gp for SVR4 PIC. */
7291 if (!cfun->machine->all_noreorder_p)
7292 output_asm_insn ("%(.cpload\t%^%)", 0);
7294 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7296 else if (cfun->machine->all_noreorder_p)
7297 output_asm_insn ("%(%<", 0);
7299 /* Tell the assembler which register we're using as the global
7300 pointer. This is needed for thunks, since they can use either
7301 explicit relocs or assembler macros. */
7302 mips_output_cplocal ();
7305 /* Make the last instruction frame related and note that it performs
7306 the operation described by FRAME_PATTERN. */
7309 mips_set_frame_expr (rtx frame_pattern)
7313 insn = get_last_insn ();
7314 RTX_FRAME_RELATED_P (insn) = 1;
7315 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7321 /* Return a frame-related rtx that stores REG at MEM.
7322 REG must be a single register. */
7325 mips_frame_set (rtx mem, rtx reg)
7329 /* If we're saving the return address register and the dwarf return
7330 address column differs from the hard register number, adjust the
7331 note reg to refer to the former. */
7332 if (REGNO (reg) == GP_REG_FIRST + 31
7333 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7334 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7336 set = gen_rtx_SET (VOIDmode, mem, reg);
7337 RTX_FRAME_RELATED_P (set) = 1;
7343 /* Save register REG to MEM. Make the instruction frame-related. */
7346 mips_save_reg (rtx reg, rtx mem)
7348 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7352 if (mips_split_64bit_move_p (mem, reg))
7353 mips_split_64bit_move (mem, reg);
7355 mips_emit_move (mem, reg);
7357 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7358 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7359 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7364 && REGNO (reg) != GP_REG_FIRST + 31
7365 && !M16_REG_P (REGNO (reg)))
7367 /* Save a non-mips16 register by moving it through a temporary.
7368 We don't need to do this for $31 since there's a special
7369 instruction for it. */
7370 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7371 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7374 mips_emit_move (mem, reg);
7376 mips_set_frame_expr (mips_frame_set (mem, reg));
7380 /* Return a move between register REGNO and memory location SP + OFFSET.
7381 Make the move a load if RESTORE_P, otherwise make it a frame-related
7385 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7390 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7391 reg = gen_rtx_REG (SImode, regno);
7393 ? gen_rtx_SET (VOIDmode, reg, mem)
7394 : mips_frame_set (mem, reg));
7397 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7398 The instruction must:
7400 - Allocate or deallocate SIZE bytes in total; SIZE is known
7403 - Save or restore as many registers in *MASK_PTR as possible.
7404 The instruction saves the first registers at the top of the
7405 allocated area, with the other registers below it.
7407 - Save NARGS argument registers above the allocated area.
7409 (NARGS is always zero if RESTORE_P.)
7411 The SAVE and RESTORE instructions cannot save and restore all general
7412 registers, so there may be some registers left over for the caller to
7413 handle. Destructively modify *MASK_PTR so that it contains the registers
7414 that still need to be saved or restored. The caller can save these
7415 registers in the memory immediately below *OFFSET_PTR, which is a
7416 byte offset from the bottom of the allocated stack area. */
7419 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7420 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7424 HOST_WIDE_INT offset, top_offset;
7425 unsigned int i, regno;
7428 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7430 /* Calculate the number of elements in the PARALLEL. We need one element
7431 for the stack adjustment, one for each argument register save, and one
7432 for each additional register move. */
7434 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7435 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7438 /* Create the final PARALLEL. */
7439 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7442 /* Add the stack pointer adjustment. */
7443 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7444 plus_constant (stack_pointer_rtx,
7445 restore_p ? size : -size));
7446 RTX_FRAME_RELATED_P (set) = 1;
7447 XVECEXP (pattern, 0, n++) = set;
7449 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7450 top_offset = restore_p ? size : 0;
7452 /* Save the arguments. */
7453 for (i = 0; i < nargs; i++)
7455 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7456 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7457 XVECEXP (pattern, 0, n++) = set;
7460 /* Then fill in the other register moves. */
7461 offset = top_offset;
7462 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7464 regno = mips16e_save_restore_regs[i];
7465 if (BITSET_P (*mask_ptr, regno))
7467 offset -= UNITS_PER_WORD;
7468 set = mips16e_save_restore_reg (restore_p, offset, regno);
7469 XVECEXP (pattern, 0, n++) = set;
7470 *mask_ptr &= ~(1 << regno);
7474 /* Tell the caller what offset it should use for the remaining registers. */
7475 *offset_ptr = size + (offset - top_offset) + size;
7477 gcc_assert (n == XVECLEN (pattern, 0));
7482 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7483 pointer. Return true if PATTERN matches the kind of instruction
7484 generated by mips16e_build_save_restore. If INFO is nonnull,
7485 initialize it when returning true. */
7488 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7489 struct mips16e_save_restore_info *info)
7491 unsigned int i, nargs, mask;
7492 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7493 rtx set, reg, mem, base;
7496 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7499 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7500 top_offset = adjust > 0 ? adjust : 0;
7502 /* Interpret all other members of the PARALLEL. */
7503 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7507 for (n = 1; n < XVECLEN (pattern, 0); n++)
7509 /* Check that we have a SET. */
7510 set = XVECEXP (pattern, 0, n);
7511 if (GET_CODE (set) != SET)
7514 /* Check that the SET is a load (if restoring) or a store
7516 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7520 /* Check that the address is the sum of the stack pointer and a
7521 possibly-zero constant offset. */
7522 mips_split_plus (XEXP (mem, 0), &base, &offset);
7523 if (base != stack_pointer_rtx)
7526 /* Check that SET's other operand is a register. */
7527 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7531 /* Check for argument saves. */
7532 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7533 && REGNO (reg) == GP_ARG_FIRST + nargs)
7535 else if (offset == save_offset)
7537 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7538 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7541 mask |= 1 << REGNO (reg);
7542 save_offset -= GET_MODE_SIZE (gpr_mode);
7548 /* Check that the restrictions on register ranges are met. */
7550 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7551 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7552 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7553 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7557 /* Make sure that the topmost argument register is not saved twice.
7558 The checks above ensure that the same is then true for the other
7559 argument registers. */
7560 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7563 /* Pass back information, if requested. */
7566 info->nargs = nargs;
7568 info->size = (adjust > 0 ? adjust : -adjust);
7574 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7575 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7576 the null terminator. */
7579 mips16e_add_register_range (char *s, unsigned int min_reg,
7580 unsigned int max_reg)
7582 if (min_reg != max_reg)
7583 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7585 s += sprintf (s, ",%s", reg_names[min_reg]);
7589 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7590 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7593 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7595 static char buffer[300];
7597 struct mips16e_save_restore_info info;
7598 unsigned int i, end;
7601 /* Parse the pattern. */
7602 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7605 /* Add the mnemonic. */
7606 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7609 /* Save the arguments. */
7611 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7612 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7613 else if (info.nargs == 1)
7614 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7616 /* Emit the amount of stack space to allocate or deallocate. */
7617 s += sprintf (s, "%d", (int) info.size);
7619 /* Save or restore $16. */
7620 if (BITSET_P (info.mask, 16))
7621 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7623 /* Save or restore $17. */
7624 if (BITSET_P (info.mask, 17))
7625 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7627 /* Save or restore registers in the range $s2...$s8, which
7628 mips16e_s2_s8_regs lists in decreasing order. Note that this
7629 is a software register range; the hardware registers are not
7630 numbered consecutively. */
7631 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7632 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7634 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7635 mips16e_s2_s8_regs[i]);
7637 /* Save or restore registers in the range $a0...$a3. */
7638 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7639 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7641 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7642 mips16e_a0_a3_regs[end - 1]);
7644 /* Save or restore $31. */
7645 if (BITSET_P (info.mask, 31))
7646 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7651 /* Return a simplified form of X using the register values in REG_VALUES.
7652 REG_VALUES[R] is the last value assigned to hard register R, or null
7653 if R has not been modified.
7655 This function is rather limited, but is good enough for our purposes. */
7658 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7662 x = avoid_constant_pool_reference (x);
7666 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7667 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7668 x0, GET_MODE (XEXP (x, 0)));
7671 if (ARITHMETIC_P (x))
7673 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7674 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7675 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7679 && reg_values[REGNO (x)]
7680 && !rtx_unstable_p (reg_values[REGNO (x)]))
7681 return reg_values[REGNO (x)];
7686 /* Return true if (set DEST SRC) stores an argument register into its
7687 caller-allocated save slot, storing the number of that argument
7688 register in *REGNO_PTR if so. REG_VALUES is as for
7689 mips16e_collect_propagate_value. */
7692 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7693 unsigned int *regno_ptr)
7695 unsigned int argno, regno;
7696 HOST_WIDE_INT offset, required_offset;
7699 /* Check that this is a word-mode store. */
7700 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7703 /* Check that the register being saved is an unmodified argument
7705 regno = REGNO (src);
7706 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7708 argno = regno - GP_ARG_FIRST;
7710 /* Check whether the address is an appropriate stack pointer or
7711 frame pointer access. The frame pointer is offset from the
7712 stack pointer by the size of the outgoing arguments. */
7713 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7714 mips_split_plus (addr, &base, &offset);
7715 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7716 if (base == hard_frame_pointer_rtx)
7717 required_offset -= cfun->machine->frame.args_size;
7718 else if (base != stack_pointer_rtx)
7720 if (offset != required_offset)
7727 /* A subroutine of mips_expand_prologue, called only when generating
7728 MIPS16e SAVE instructions. Search the start of the function for any
7729 instructions that save argument registers into their caller-allocated
7730 save slots. Delete such instructions and return a value N such that
7731 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7732 instructions redundant. */
7735 mips16e_collect_argument_saves (void)
7737 rtx reg_values[FIRST_PSEUDO_REGISTER];
7738 rtx insn, next, set, dest, src;
7739 unsigned int nargs, regno;
7741 push_topmost_sequence ();
7743 memset (reg_values, 0, sizeof (reg_values));
7744 for (insn = get_insns (); insn; insn = next)
7746 next = NEXT_INSN (insn);
7753 set = PATTERN (insn);
7754 if (GET_CODE (set) != SET)
7757 dest = SET_DEST (set);
7758 src = SET_SRC (set);
7759 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7761 if (!BITSET_P (cfun->machine->frame.mask, regno))
7764 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7767 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7768 reg_values[REGNO (dest)]
7769 = mips16e_collect_propagate_value (src, reg_values);
7773 pop_topmost_sequence ();
7778 /* Expand the prologue into a bunch of separate insns. */
7781 mips_expand_prologue (void)
7787 if (cfun->machine->global_pointer > 0)
7788 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7790 size = compute_frame_size (get_frame_size ());
7792 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7793 bytes beforehand; this is enough to cover the register save area
7794 without going out of range. */
7795 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7797 HOST_WIDE_INT step1;
7799 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7801 if (GENERATE_MIPS16E_SAVE_RESTORE)
7803 HOST_WIDE_INT offset;
7804 unsigned int mask, regno;
7806 /* Try to merge argument stores into the save instruction. */
7807 nargs = mips16e_collect_argument_saves ();
7809 /* Build the save instruction. */
7810 mask = cfun->machine->frame.mask;
7811 insn = mips16e_build_save_restore (false, &mask, &offset,
7813 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7816 /* Check if we need to save other registers. */
7817 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7818 if (BITSET_P (mask, regno - GP_REG_FIRST))
7820 offset -= GET_MODE_SIZE (gpr_mode);
7821 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7826 insn = gen_add3_insn (stack_pointer_rtx,
7829 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7831 mips_for_each_saved_reg (size, mips_save_reg);
7835 /* Allocate the rest of the frame. */
7838 if (SMALL_OPERAND (-size))
7839 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7841 GEN_INT (-size)))) = 1;
7844 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7847 /* There are no instructions to add or subtract registers
7848 from the stack pointer, so use the frame pointer as a
7849 temporary. We should always be using a frame pointer
7850 in this case anyway. */
7851 gcc_assert (frame_pointer_needed);
7852 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
7853 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7854 hard_frame_pointer_rtx,
7855 MIPS_PROLOGUE_TEMP (Pmode)));
7856 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
7859 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7861 MIPS_PROLOGUE_TEMP (Pmode)));
7863 /* Describe the combined effect of the previous instructions. */
7865 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7866 plus_constant (stack_pointer_rtx, -size)));
7870 /* Set up the frame pointer, if we're using one. In mips16 code,
7871 we point the frame pointer ahead of the outgoing argument area.
7872 This should allow more variables & incoming arguments to be
7873 accessed with unextended instructions. */
7874 if (frame_pointer_needed)
7876 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7878 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7879 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7881 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7886 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
7887 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
7888 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7889 hard_frame_pointer_rtx,
7890 MIPS_PROLOGUE_TEMP (Pmode)));
7892 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7893 plus_constant (stack_pointer_rtx,
7894 cfun->machine->frame.args_size)));
7898 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
7899 stack_pointer_rtx)) = 1;
7902 mips_emit_loadgp ();
7904 /* If generating o32/o64 abicalls, save $gp on the stack. */
7905 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7906 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7908 /* If we are profiling, make sure no instructions are scheduled before
7909 the call to mcount. */
7911 if (current_function_profile)
7912 emit_insn (gen_blockage ());
7915 /* Do any necessary cleanup after a function to restore stack, frame,
7918 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7921 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7922 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7924 /* Reinstate the normal $gp. */
7925 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7926 mips_output_cplocal ();
7928 if (cfun->machine->all_noreorder_p)
7930 /* Avoid using %>%) since it adds excess whitespace. */
7931 output_asm_insn (".set\tmacro", 0);
7932 output_asm_insn (".set\treorder", 0);
7933 set_noreorder = set_nomacro = 0;
7936 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7940 /* Get the function name the same way that toplev.c does before calling
7941 assemble_start_function. This is needed so that the name used here
7942 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7943 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7944 fputs ("\t.end\t", file);
7945 assemble_name (file, fnname);
7950 /* Emit instructions to restore register REG from slot MEM. */
7953 mips_restore_reg (rtx reg, rtx mem)
7955 /* There's no mips16 instruction to load $31 directly. Load into
7956 $7 instead and adjust the return insn appropriately. */
7957 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7958 reg = gen_rtx_REG (GET_MODE (reg), 7);
7960 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7962 /* Can't restore directly; move through a temporary. */
7963 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7964 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7967 mips_emit_move (reg, mem);
7971 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7972 if this epilogue precedes a sibling call, false if it is for a normal
7973 "epilogue" pattern. */
7976 mips_expand_epilogue (int sibcall_p)
7978 HOST_WIDE_INT step1, step2;
7981 if (!sibcall_p && mips_can_use_return_insn ())
7983 emit_jump_insn (gen_return ());
7987 /* In mips16 mode, if the return value should go into a floating-point
7988 register, we need to call a helper routine to copy it over. */
7989 if (mips16_cfun_returns_in_fpr_p ())
7998 enum machine_mode return_mode;
8000 return_type = DECL_RESULT (current_function_decl);
8001 return_mode = DECL_MODE (return_type);
8003 name = ACONCAT (("__mips16_ret_",
8004 mips16_call_stub_mode_suffix (return_mode),
8006 id = get_identifier (name);
8007 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8008 retval = gen_rtx_REG (return_mode, GP_RETURN);
8009 call = gen_call_value_internal (retval, func, const0_rtx);
8010 insn = emit_call_insn (call);
8011 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8014 /* Split the frame into two. STEP1 is the amount of stack we should
8015 deallocate before restoring the registers. STEP2 is the amount we
8016 should deallocate afterwards.
8018 Start off by assuming that no registers need to be restored. */
8019 step1 = cfun->machine->frame.total_size;
8022 /* Work out which register holds the frame address. Account for the
8023 frame pointer offset used by mips16 code. */
8024 if (!frame_pointer_needed)
8025 base = stack_pointer_rtx;
8028 base = hard_frame_pointer_rtx;
8030 step1 -= cfun->machine->frame.args_size;
8033 /* If we need to restore registers, deallocate as much stack as
8034 possible in the second step without going out of range. */
8035 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8037 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8041 /* Set TARGET to BASE + STEP1. */
8047 /* Get an rtx for STEP1 that we can add to BASE. */
8048 adjust = GEN_INT (step1);
8049 if (!SMALL_OPERAND (step1))
8051 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8052 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8055 /* Normal mode code can copy the result straight into $sp. */
8057 target = stack_pointer_rtx;
8059 emit_insn (gen_add3_insn (target, base, adjust));
8062 /* Copy TARGET into the stack pointer. */
8063 if (target != stack_pointer_rtx)
8064 mips_emit_move (stack_pointer_rtx, target);
8066 /* If we're using addressing macros, $gp is implicitly used by all
8067 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8069 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8070 emit_insn (gen_blockage ());
8072 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8074 unsigned int regno, mask;
8075 HOST_WIDE_INT offset;
8078 /* Generate the restore instruction. */
8079 mask = cfun->machine->frame.mask;
8080 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8082 /* Restore any other registers manually. */
8083 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8084 if (BITSET_P (mask, regno - GP_REG_FIRST))
8086 offset -= GET_MODE_SIZE (gpr_mode);
8087 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8090 /* Restore the remaining registers and deallocate the final bit
8092 emit_insn (restore);
8096 /* Restore the registers. */
8097 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8100 /* Deallocate the final bit of the frame. */
8102 emit_insn (gen_add3_insn (stack_pointer_rtx,
8107 /* Add in the __builtin_eh_return stack adjustment. We need to
8108 use a temporary in mips16 code. */
8109 if (current_function_calls_eh_return)
8113 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8114 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8115 MIPS_EPILOGUE_TEMP (Pmode),
8116 EH_RETURN_STACKADJ_RTX));
8117 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8120 emit_insn (gen_add3_insn (stack_pointer_rtx,
8122 EH_RETURN_STACKADJ_RTX));
8127 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8128 path will restore the return address into $7 rather than $31. */
8130 && !GENERATE_MIPS16E_SAVE_RESTORE
8131 && (cfun->machine->frame.mask & RA_MASK) != 0)
8132 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8133 GP_REG_FIRST + 7)));
8135 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8136 GP_REG_FIRST + 31)));
8140 /* Return nonzero if this function is known to have a null epilogue.
8141 This allows the optimizer to omit jumps to jumps if no stack
8145 mips_can_use_return_insn (void)
8147 if (! reload_completed)
8150 if (df_regs_ever_live_p (31) || current_function_profile)
8153 /* In mips16 mode, a function that returns a floating point value
8154 needs to arrange to copy the return value into the floating point
8156 if (mips16_cfun_returns_in_fpr_p ())
8159 if (cfun->machine->frame.initialized)
8160 return cfun->machine->frame.total_size == 0;
8162 return compute_frame_size (get_frame_size ()) == 0;
8165 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8166 in order to avoid duplicating too much logic from elsewhere. */
8169 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8170 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8173 rtx this, temp1, temp2, insn, fnaddr;
8175 /* Pretend to be a post-reload pass while generating rtl. */
8176 reload_completed = 1;
8178 /* Mark the end of the (empty) prologue. */
8179 emit_note (NOTE_INSN_PROLOGUE_END);
8181 /* Pick a global pointer. Use a call-clobbered register if
8182 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8185 cfun->machine->global_pointer =
8186 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8188 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8192 /* Set up the global pointer for n32 or n64 abicalls. If
8193 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8194 no need to load it.*/
8195 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8196 || !targetm.binds_local_p (function))
8197 mips_emit_loadgp ();
8199 /* We need two temporary registers in some cases. */
8200 temp1 = gen_rtx_REG (Pmode, 2);
8201 temp2 = gen_rtx_REG (Pmode, 3);
8203 /* Find out which register contains the "this" pointer. */
8204 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8205 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8207 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8209 /* Add DELTA to THIS. */
8212 rtx offset = GEN_INT (delta);
8213 if (!SMALL_OPERAND (delta))
8215 mips_emit_move (temp1, offset);
8218 emit_insn (gen_add3_insn (this, this, offset));
8221 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8222 if (vcall_offset != 0)
8226 /* Set TEMP1 to *THIS. */
8227 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8229 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8230 addr = mips_add_offset (temp2, temp1, vcall_offset);
8232 /* Load the offset and add it to THIS. */
8233 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8234 emit_insn (gen_add3_insn (this, this, temp1));
8237 /* Jump to the target function. Use a sibcall if direct jumps are
8238 allowed, otherwise load the address into a register first. */
8239 fnaddr = XEXP (DECL_RTL (function), 0);
8240 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
8242 /* This is messy. gas treats "la $25,foo" as part of a call
8243 sequence and may allow a global "foo" to be lazily bound.
8244 The general move patterns therefore reject this combination.
8246 In this context, lazy binding would actually be OK
8247 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8248 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8249 We must therefore load the address via a temporary
8250 register if mips_dangerous_for_la25_p.
8252 If we jump to the temporary register rather than $25, the assembler
8253 can use the move insn to fill the jump's delay slot. */
8254 if (TARGET_USE_PIC_FN_ADDR_REG
8255 && !mips_dangerous_for_la25_p (fnaddr))
8256 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8257 mips_load_call_address (temp1, fnaddr, true);
8259 if (TARGET_USE_PIC_FN_ADDR_REG
8260 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8261 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8262 emit_jump_insn (gen_indirect_jump (temp1));
8266 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8267 SIBLING_CALL_P (insn) = 1;
8270 /* Run just enough of rest_of_compilation. This sequence was
8271 "borrowed" from alpha.c. */
8272 insn = get_insns ();
8273 insn_locators_alloc ();
8274 split_all_insns_noflow ();
8275 mips16_lay_out_constants ();
8276 shorten_branches (insn);
8277 final_start_function (insn, file, 1);
8278 final (insn, file, 1);
8279 final_end_function ();
8281 /* Clean up the vars set above. Note that final_end_function resets
8282 the global pointer for us. */
8283 reload_completed = 0;
8286 /* Returns nonzero if X contains a SYMBOL_REF. */
8289 symbolic_expression_p (rtx x)
8291 if (GET_CODE (x) == SYMBOL_REF)
8294 if (GET_CODE (x) == CONST)
8295 return symbolic_expression_p (XEXP (x, 0));
8298 return symbolic_expression_p (XEXP (x, 0));
8300 if (ARITHMETIC_P (x))
8301 return (symbolic_expression_p (XEXP (x, 0))
8302 || symbolic_expression_p (XEXP (x, 1)));
8307 /* Choose the section to use for the constant rtx expression X that has
8311 mips_select_rtx_section (enum machine_mode mode, rtx x,
8312 unsigned HOST_WIDE_INT align)
8314 if (TARGET_EMBEDDED_DATA)
8316 /* For embedded applications, always put constants in read-only data,
8317 in order to reduce RAM usage. */
8318 return mergeable_constant_section (mode, align, 0);
8322 /* For hosted applications, always put constants in small data if
8323 possible, as this gives the best performance. */
8324 /* ??? Consider using mergeable small data sections. */
8326 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8327 && mips_section_threshold > 0)
8328 return get_named_section (NULL, ".sdata", 0);
8329 else if (flag_pic && symbolic_expression_p (x))
8330 return get_named_section (NULL, ".data.rel.ro", 3);
8332 return mergeable_constant_section (mode, align, 0);
8336 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8338 The complication here is that, with the combination TARGET_ABICALLS
8339 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8340 therefore not be included in the read-only part of a DSO. Handle such
8341 cases by selecting a normal data section instead of a read-only one.
8342 The logic apes that in default_function_rodata_section. */
8345 mips_function_rodata_section (tree decl)
8347 if (!TARGET_ABICALLS || TARGET_GPWORD)
8348 return default_function_rodata_section (decl);
8350 if (decl && DECL_SECTION_NAME (decl))
8352 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8353 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8355 char *rname = ASTRDUP (name);
8357 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8359 else if (flag_function_sections && flag_data_sections
8360 && strncmp (name, ".text.", 6) == 0)
8362 char *rname = ASTRDUP (name);
8363 memcpy (rname + 1, "data", 4);
8364 return get_section (rname, SECTION_WRITE, decl);
8367 return data_section;
8370 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8371 locally-defined objects go in a small data section. It also controls
8372 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8373 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8376 mips_in_small_data_p (const_tree decl)
8380 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8383 /* We don't yet generate small-data references for -mabicalls or
8384 VxWorks RTP code. See the related -G handling in override_options. */
8385 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8388 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8392 /* Reject anything that isn't in a known small-data section. */
8393 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8394 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8397 /* If a symbol is defined externally, the assembler will use the
8398 usual -G rules when deciding how to implement macros. */
8399 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8402 else if (TARGET_EMBEDDED_DATA)
8404 /* Don't put constants into the small data section: we want them
8405 to be in ROM rather than RAM. */
8406 if (TREE_CODE (decl) != VAR_DECL)
8409 if (TREE_READONLY (decl)
8410 && !TREE_SIDE_EFFECTS (decl)
8411 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8415 size = int_size_in_bytes (TREE_TYPE (decl));
8416 return (size > 0 && size <= mips_section_threshold);
8419 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8420 anchors for small data: the GP register acts as an anchor in that
8421 case. We also don't want to use them for PC-relative accesses,
8422 where the PC acts as an anchor. */
8425 mips_use_anchors_for_symbol_p (const_rtx symbol)
8427 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8429 case SYMBOL_PC_RELATIVE:
8430 case SYMBOL_GP_RELATIVE:
8438 /* See whether VALTYPE is a record whose fields should be returned in
8439 floating-point registers. If so, return the number of fields and
8440 list them in FIELDS (which should have two elements). Return 0
8443 For n32 & n64, a structure with one or two fields is returned in
8444 floating-point registers as long as every field has a floating-point
8448 mips_fpr_return_fields (const_tree valtype, tree *fields)
8456 if (TREE_CODE (valtype) != RECORD_TYPE)
8460 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8462 if (TREE_CODE (field) != FIELD_DECL)
8465 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8471 fields[i++] = field;
8477 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8478 a value in the most significant part of $2/$3 if:
8480 - the target is big-endian;
8482 - the value has a structure or union type (we generalize this to
8483 cover aggregates from other languages too); and
8485 - the structure is not returned in floating-point registers. */
8488 mips_return_in_msb (const_tree valtype)
8492 return (TARGET_NEWABI
8493 && TARGET_BIG_ENDIAN
8494 && AGGREGATE_TYPE_P (valtype)
8495 && mips_fpr_return_fields (valtype, fields) == 0);
8499 /* Return a composite value in a pair of floating-point registers.
8500 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8501 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8504 For n32 & n64, $f0 always holds the first value and $f2 the second.
8505 Otherwise the values are packed together as closely as possible. */
8508 mips_return_fpr_pair (enum machine_mode mode,
8509 enum machine_mode mode1, HOST_WIDE_INT offset1,
8510 enum machine_mode mode2, HOST_WIDE_INT offset2)
8514 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8515 return gen_rtx_PARALLEL
8518 gen_rtx_EXPR_LIST (VOIDmode,
8519 gen_rtx_REG (mode1, FP_RETURN),
8521 gen_rtx_EXPR_LIST (VOIDmode,
8522 gen_rtx_REG (mode2, FP_RETURN + inc),
8523 GEN_INT (offset2))));
8528 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8529 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8530 VALTYPE is null and MODE is the mode of the return value. */
8533 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
8534 enum machine_mode mode)
8541 mode = TYPE_MODE (valtype);
8542 unsignedp = TYPE_UNSIGNED (valtype);
8544 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8545 true, we must promote the mode just as PROMOTE_MODE does. */
8546 mode = promote_mode (valtype, mode, &unsignedp, 1);
8548 /* Handle structures whose fields are returned in $f0/$f2. */
8549 switch (mips_fpr_return_fields (valtype, fields))
8552 return gen_rtx_REG (mode, FP_RETURN);
8555 return mips_return_fpr_pair (mode,
8556 TYPE_MODE (TREE_TYPE (fields[0])),
8557 int_byte_position (fields[0]),
8558 TYPE_MODE (TREE_TYPE (fields[1])),
8559 int_byte_position (fields[1]));
8562 /* If a value is passed in the most significant part of a register, see
8563 whether we have to round the mode up to a whole number of words. */
8564 if (mips_return_in_msb (valtype))
8566 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8567 if (size % UNITS_PER_WORD != 0)
8569 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8570 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8574 /* For EABI, the class of return register depends entirely on MODE.
8575 For example, "struct { some_type x; }" and "union { some_type x; }"
8576 are returned in the same way as a bare "some_type" would be.
8577 Other ABIs only use FPRs for scalar, complex or vector types. */
8578 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8579 return gen_rtx_REG (mode, GP_RETURN);
8584 /* Handle long doubles for n32 & n64. */
8586 return mips_return_fpr_pair (mode,
8588 DImode, GET_MODE_SIZE (mode) / 2);
8590 if (mips_return_mode_in_fpr_p (mode))
8592 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8593 return mips_return_fpr_pair (mode,
8594 GET_MODE_INNER (mode), 0,
8595 GET_MODE_INNER (mode),
8596 GET_MODE_SIZE (mode) / 2);
8598 return gen_rtx_REG (mode, FP_RETURN);
8602 return gen_rtx_REG (mode, GP_RETURN);
8605 /* Return nonzero when an argument must be passed by reference. */
8608 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8609 enum machine_mode mode, const_tree type,
8610 bool named ATTRIBUTE_UNUSED)
8612 if (mips_abi == ABI_EABI)
8616 /* ??? How should SCmode be handled? */
8617 if (mode == DImode || mode == DFmode)
8620 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8621 return size == -1 || size > UNITS_PER_WORD;
8625 /* If we have a variable-sized parameter, we have no choice. */
8626 return targetm.calls.must_pass_in_stack (mode, type);
8631 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8632 enum machine_mode mode ATTRIBUTE_UNUSED,
8633 const_tree type ATTRIBUTE_UNUSED, bool named)
8635 return mips_abi == ABI_EABI && named;
8638 /* Return true if registers of class CLASS cannot change from mode FROM
8642 mips_cannot_change_mode_class (enum machine_mode from,
8643 enum machine_mode to, enum reg_class class)
8645 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8646 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8648 if (TARGET_BIG_ENDIAN)
8650 /* When a multi-word value is stored in paired floating-point
8651 registers, the first register always holds the low word.
8652 We therefore can't allow FPRs to change between single-word
8653 and multi-word modes. */
8654 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8659 /* gcc assumes that each word of a multiword register can be accessed
8660 individually using SUBREGs. This is not true for floating-point
8661 registers if they are bigger than a word. */
8662 if (UNITS_PER_FPREG > UNITS_PER_WORD
8663 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8664 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8665 && reg_classes_intersect_p (FP_REGS, class))
8668 /* Loading a 32-bit value into a 64-bit floating-point register
8669 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8670 We can't allow 64-bit float registers to change from SImode to
8675 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8676 && reg_classes_intersect_p (FP_REGS, class))
8682 /* Return true if X should not be moved directly into register $25.
8683 We need this because many versions of GAS will treat "la $25,foo" as
8684 part of a call sequence and so allow a global "foo" to be lazily bound. */
8687 mips_dangerous_for_la25_p (rtx x)
8689 return (!TARGET_EXPLICIT_RELOCS
8691 && GET_CODE (x) == SYMBOL_REF
8692 && mips_global_symbol_p (x));
8695 /* Implement PREFERRED_RELOAD_CLASS. */
8698 mips_preferred_reload_class (rtx x, enum reg_class class)
8700 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8703 if (TARGET_HARD_FLOAT
8704 && FLOAT_MODE_P (GET_MODE (x))
8705 && reg_class_subset_p (FP_REGS, class))
8708 if (reg_class_subset_p (GR_REGS, class))
8711 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8717 /* This function returns the register class required for a secondary
8718 register when copying between one of the registers in CLASS, and X,
8719 using MODE. If IN_P is nonzero, the copy is going from X to the
8720 register, otherwise the register is the source. A return value of
8721 NO_REGS means that no secondary register is required. */
8724 mips_secondary_reload_class (enum reg_class class,
8725 enum machine_mode mode, rtx x, int in_p)
8727 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8731 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8732 regno = true_regnum (x);
8734 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8736 if (mips_dangerous_for_la25_p (x))
8739 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8743 /* Copying from HI or LO to anywhere other than a general register
8744 requires a general register.
8745 This rule applies to both the original HI/LO pair and the new
8746 DSP accumulators. */
8747 if (reg_class_subset_p (class, ACC_REGS))
8749 if (TARGET_MIPS16 && in_p)
8751 /* We can't really copy to HI or LO at all in mips16 mode. */
8754 return gp_reg_p ? NO_REGS : gr_regs;
8756 if (ACC_REG_P (regno))
8758 if (TARGET_MIPS16 && ! in_p)
8760 /* We can't really copy to HI or LO at all in mips16 mode. */
8763 return class == gr_regs ? NO_REGS : gr_regs;
8766 /* We can only copy a value to a condition code register from a
8767 floating point register, and even then we require a scratch
8768 floating point register. We can only copy a value out of a
8769 condition code register into a general register. */
8770 if (class == ST_REGS)
8774 return gp_reg_p ? NO_REGS : gr_regs;
8776 if (ST_REG_P (regno))
8780 return class == gr_regs ? NO_REGS : gr_regs;
8783 if (class == FP_REGS)
8787 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8790 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8792 /* We can use the l.s and l.d macros to load floating-point
8793 constants. ??? For l.s, we could probably get better
8794 code by returning GR_REGS here. */
8797 else if (gp_reg_p || x == CONST0_RTX (mode))
8799 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8802 else if (FP_REG_P (regno))
8804 /* In this case we can use mov.s or mov.d. */
8809 /* Otherwise, we need to reload through an integer register. */
8814 /* In mips16 mode, going between memory and anything but M16_REGS
8815 requires an M16_REG. */
8818 if (class != M16_REGS && class != M16_NA_REGS)
8826 if (class == M16_REGS || class == M16_NA_REGS)
8835 /* Implement CLASS_MAX_NREGS.
8837 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8839 - ST_REGS are always hold CCmode values, and CCmode values are
8840 considered to be 4 bytes wide.
8842 All other register classes are covered by UNITS_PER_WORD. Note that
8843 this is true even for unions of integer and float registers when the
8844 latter are smaller than the former. The only supported combination
8845 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8846 words but 32-bit float registers. A word-based calculation is correct
8847 in that case since -msingle-float disallows multi-FPR values. */
8850 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8851 enum machine_mode mode)
8853 if (class == ST_REGS)
8854 return (GET_MODE_SIZE (mode) + 3) / 4;
8855 else if (class == FP_REGS)
8856 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8858 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8862 mips_valid_pointer_mode (enum machine_mode mode)
8864 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8867 /* Target hook for vector_mode_supported_p. */
8870 mips_vector_mode_supported_p (enum machine_mode mode)
8875 return TARGET_PAIRED_SINGLE_FLOAT;
8886 /* If we can access small data directly (using gp-relative relocation
8887 operators) return the small data pointer, otherwise return null.
8889 For each mips16 function which refers to GP relative symbols, we
8890 use a pseudo register, initialized at the start of the function, to
8891 hold the $gp value. */
8894 mips16_gp_pseudo_reg (void)
8896 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8897 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8899 /* Don't initialize the pseudo register if we are being called from
8900 the tree optimizers' cost-calculation routines. */
8901 if (!cfun->machine->initialized_mips16_gp_pseudo_p
8902 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
8906 /* We want to initialize this to a value which gcc will believe
8908 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8910 push_topmost_sequence ();
8911 /* We need to emit the initialization after the FUNCTION_BEG
8912 note, so that it will be integrated. */
8913 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8915 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8917 if (scan == NULL_RTX)
8918 scan = get_insns ();
8919 insn = emit_insn_after (insn, scan);
8920 pop_topmost_sequence ();
8922 cfun->machine->initialized_mips16_gp_pseudo_p = true;
8925 return cfun->machine->mips16_gp_pseudo_rtx;
8928 /* Write out code to move floating point arguments in or out of
8929 general registers. Output the instructions to FILE. FP_CODE is
8930 the code describing which arguments are present (see the comment at
8931 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8932 we are copying from the floating point registers. */
8935 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8940 CUMULATIVE_ARGS cum;
8942 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8943 gcc_assert (TARGET_OLDABI);
8950 init_cumulative_args (&cum, NULL, NULL);
8952 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8954 enum machine_mode mode;
8955 struct mips_arg_info info;
8959 else if ((f & 3) == 2)
8964 mips_arg_info (&cum, mode, NULL, true, &info);
8965 gparg = mips_arg_regno (&info, false);
8966 fparg = mips_arg_regno (&info, true);
8969 fprintf (file, "\t%s\t%s,%s\n", s,
8970 reg_names[gparg], reg_names[fparg]);
8971 else if (TARGET_64BIT)
8972 fprintf (file, "\td%s\t%s,%s\n", s,
8973 reg_names[gparg], reg_names[fparg]);
8974 else if (ISA_HAS_MXHC1)
8975 /* -mips32r2 -mfp64 */
8976 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8978 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8980 from_fp_p ? "mfhc1" : "mthc1",
8981 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8983 else if (TARGET_BIG_ENDIAN)
8984 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8985 reg_names[gparg], reg_names[fparg + 1], s,
8986 reg_names[gparg + 1], reg_names[fparg]);
8988 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8989 reg_names[gparg], reg_names[fparg], s,
8990 reg_names[gparg + 1], reg_names[fparg + 1]);
8992 function_arg_advance (&cum, mode, NULL, true);
8996 /* Build a mips16 function stub. This is used for functions which
8997 take arguments in the floating point registers. It is 32-bit code
8998 that moves the floating point args into the general registers, and
8999 then jumps to the 16-bit code. */
9002 build_mips16_function_stub (FILE *file)
9005 char *secname, *stubname;
9006 tree stubid, stubdecl;
9010 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9011 secname = (char *) alloca (strlen (fnname) + 20);
9012 sprintf (secname, ".mips16.fn.%s", fnname);
9013 stubname = (char *) alloca (strlen (fnname) + 20);
9014 sprintf (stubname, "__fn_stub_%s", fnname);
9015 stubid = get_identifier (stubname);
9016 stubdecl = build_decl (FUNCTION_DECL, stubid,
9017 build_function_type (void_type_node, NULL_TREE));
9018 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9019 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9021 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9023 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9025 fprintf (file, "%s%s",
9026 need_comma ? ", " : "",
9027 (f & 3) == 1 ? "float" : "double");
9030 fprintf (file, ")\n");
9032 fprintf (file, "\t.set\tnomips16\n");
9033 switch_to_section (function_section (stubdecl));
9034 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9036 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9037 within a .ent, and we cannot emit another .ent. */
9038 if (!FUNCTION_NAME_ALREADY_DECLARED)
9040 fputs ("\t.ent\t", file);
9041 assemble_name (file, stubname);
9045 assemble_name (file, stubname);
9046 fputs (":\n", file);
9048 /* We don't want the assembler to insert any nops here. */
9049 fprintf (file, "\t.set\tnoreorder\n");
9051 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9053 fprintf (asm_out_file, "\t.set\tnoat\n");
9054 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9055 assemble_name (file, fnname);
9056 fprintf (file, "\n");
9057 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9058 fprintf (asm_out_file, "\t.set\tat\n");
9060 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9061 with one of the mfc1 instructions, because the result is not
9062 available for one instruction, so if the very first instruction
9063 in the function refers to the register, it will see the wrong
9065 fprintf (file, "\tnop\n");
9067 fprintf (file, "\t.set\treorder\n");
9069 if (!FUNCTION_NAME_ALREADY_DECLARED)
9071 fputs ("\t.end\t", file);
9072 assemble_name (file, stubname);
9076 fprintf (file, "\t.set\tmips16\n");
9078 switch_to_section (function_section (current_function_decl));
9081 /* We keep a list of functions for which we have already built stubs
9082 in build_mips16_call_stub. */
9086 struct mips16_stub *next;
9091 static struct mips16_stub *mips16_stubs;
9093 /* Emit code to return a double value from a mips16 stub. GPREG is the
9094 first GP reg to use, FPREG is the first FP reg to use. */
9097 mips16_fpret_double (int gpreg, int fpreg)
9100 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9101 reg_names[gpreg], reg_names[fpreg]);
9102 else if (TARGET_FLOAT64)
9104 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9105 reg_names[gpreg + WORDS_BIG_ENDIAN],
9107 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9108 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9113 if (TARGET_BIG_ENDIAN)
9115 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9116 reg_names[gpreg + 0],
9117 reg_names[fpreg + 1]);
9118 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9119 reg_names[gpreg + 1],
9120 reg_names[fpreg + 0]);
9124 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9125 reg_names[gpreg + 0],
9126 reg_names[fpreg + 0]);
9127 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9128 reg_names[gpreg + 1],
9129 reg_names[fpreg + 1]);
9134 /* Build a call stub for a mips16 call. A stub is needed if we are
9135 passing any floating point values which should go into the floating
9136 point registers. If we are, and the call turns out to be to a
9137 32-bit function, the stub will be used to move the values into the
9138 floating point registers before calling the 32-bit function. The
9139 linker will magically adjust the function call to either the 16-bit
9140 function or the 32-bit stub, depending upon where the function call
9141 is actually defined.
9143 Similarly, we need a stub if the return value might come back in a
9144 floating point register.
9146 RETVAL is the location of the return value, or null if this is
9147 a call rather than a call_value. FN is the address of the
9148 function and ARG_SIZE is the size of the arguments. FP_CODE
9149 is the code built by function_arg. This function returns a nonzero
9150 value if it builds the call instruction itself. */
9153 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9157 char *secname, *stubname;
9158 struct mips16_stub *l;
9159 tree stubid, stubdecl;
9163 /* We don't need to do anything if we aren't in mips16 mode, or if
9164 we were invoked with the -msoft-float option. */
9165 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9168 /* Figure out whether the value might come back in a floating point
9171 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9173 /* We don't need to do anything if there were no floating point
9174 arguments and the value will not be returned in a floating point
9176 if (fp_code == 0 && ! fpret)
9179 /* We don't need to do anything if this is a call to a special
9180 mips16 support function. */
9181 if (GET_CODE (fn) == SYMBOL_REF
9182 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9185 /* This code will only work for o32 and o64 abis. The other ABI's
9186 require more sophisticated support. */
9187 gcc_assert (TARGET_OLDABI);
9189 /* If we're calling via a function pointer, then we must always call
9190 via a stub. There are magic stubs provided in libgcc.a for each
9191 of the required cases. Each of them expects the function address
9192 to arrive in register $2. */
9194 if (GET_CODE (fn) != SYMBOL_REF)
9200 /* ??? If this code is modified to support other ABI's, we need
9201 to handle PARALLEL return values here. */
9204 sprintf (buf, "__mips16_call_stub_%s_%d",
9205 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9208 sprintf (buf, "__mips16_call_stub_%d",
9211 id = get_identifier (buf);
9212 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9214 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9216 if (retval == NULL_RTX)
9217 insn = gen_call_internal (stub_fn, arg_size);
9219 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9220 insn = emit_call_insn (insn);
9222 /* Put the register usage information on the CALL. */
9223 CALL_INSN_FUNCTION_USAGE (insn) =
9224 gen_rtx_EXPR_LIST (VOIDmode,
9225 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9226 CALL_INSN_FUNCTION_USAGE (insn));
9228 /* If we are handling a floating point return value, we need to
9229 save $18 in the function prologue. Putting a note on the
9230 call will mean that df_regs_ever_live_p ($18) will be true if the
9231 call is not eliminated, and we can check that in the prologue
9234 CALL_INSN_FUNCTION_USAGE (insn) =
9235 gen_rtx_EXPR_LIST (VOIDmode,
9236 gen_rtx_USE (VOIDmode,
9237 gen_rtx_REG (word_mode, 18)),
9238 CALL_INSN_FUNCTION_USAGE (insn));
9240 /* Return 1 to tell the caller that we've generated the call
9245 /* We know the function we are going to call. If we have already
9246 built a stub, we don't need to do anything further. */
9248 fnname = XSTR (fn, 0);
9249 for (l = mips16_stubs; l != NULL; l = l->next)
9250 if (strcmp (l->name, fnname) == 0)
9255 /* Build a special purpose stub. When the linker sees a
9256 function call in mips16 code, it will check where the target
9257 is defined. If the target is a 32-bit call, the linker will
9258 search for the section defined here. It can tell which
9259 symbol this section is associated with by looking at the
9260 relocation information (the name is unreliable, since this
9261 might be a static function). If such a section is found, the
9262 linker will redirect the call to the start of the magic
9265 If the function does not return a floating point value, the
9266 special stub section is named
9269 If the function does return a floating point value, the stub
9271 .mips16.call.fp.FNNAME
9274 secname = (char *) alloca (strlen (fnname) + 40);
9275 sprintf (secname, ".mips16.call.%s%s",
9278 stubname = (char *) alloca (strlen (fnname) + 20);
9279 sprintf (stubname, "__call_stub_%s%s",
9282 stubid = get_identifier (stubname);
9283 stubdecl = build_decl (FUNCTION_DECL, stubid,
9284 build_function_type (void_type_node, NULL_TREE));
9285 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9286 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9288 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9290 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9294 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9296 fprintf (asm_out_file, "%s%s",
9297 need_comma ? ", " : "",
9298 (f & 3) == 1 ? "float" : "double");
9301 fprintf (asm_out_file, ")\n");
9303 fprintf (asm_out_file, "\t.set\tnomips16\n");
9304 assemble_start_function (stubdecl, stubname);
9306 if (!FUNCTION_NAME_ALREADY_DECLARED)
9308 fputs ("\t.ent\t", asm_out_file);
9309 assemble_name (asm_out_file, stubname);
9310 fputs ("\n", asm_out_file);
9312 assemble_name (asm_out_file, stubname);
9313 fputs (":\n", asm_out_file);
9316 /* We build the stub code by hand. That's the only way we can
9317 do it, since we can't generate 32-bit code during a 16-bit
9320 /* We don't want the assembler to insert any nops here. */
9321 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9323 mips16_fp_args (asm_out_file, fp_code, 0);
9327 fprintf (asm_out_file, "\t.set\tnoat\n");
9328 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9330 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9331 fprintf (asm_out_file, "\t.set\tat\n");
9332 /* Unfortunately, we can't fill the jump delay slot. We
9333 can't fill with one of the mtc1 instructions, because the
9334 result is not available for one instruction, so if the
9335 very first instruction in the function refers to the
9336 register, it will see the wrong value. */
9337 fprintf (asm_out_file, "\tnop\n");
9341 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9342 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9343 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9344 /* As above, we can't fill the delay slot. */
9345 fprintf (asm_out_file, "\tnop\n");
9346 if (GET_MODE (retval) == SFmode)
9347 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9348 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9349 else if (GET_MODE (retval) == SCmode)
9351 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9352 reg_names[GP_REG_FIRST + 2],
9353 reg_names[FP_REG_FIRST + 0]);
9354 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9355 reg_names[GP_REG_FIRST + 3],
9356 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9358 else if (GET_MODE (retval) == DFmode
9359 || GET_MODE (retval) == V2SFmode)
9361 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9363 else if (GET_MODE (retval) == DCmode)
9365 mips16_fpret_double (GP_REG_FIRST + 2,
9367 mips16_fpret_double (GP_REG_FIRST + 4,
9368 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9372 if (TARGET_BIG_ENDIAN)
9374 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9375 reg_names[GP_REG_FIRST + 2],
9376 reg_names[FP_REG_FIRST + 1]);
9377 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9378 reg_names[GP_REG_FIRST + 3],
9379 reg_names[FP_REG_FIRST + 0]);
9383 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9384 reg_names[GP_REG_FIRST + 2],
9385 reg_names[FP_REG_FIRST + 0]);
9386 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9387 reg_names[GP_REG_FIRST + 3],
9388 reg_names[FP_REG_FIRST + 1]);
9391 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9392 /* As above, we can't fill the delay slot. */
9393 fprintf (asm_out_file, "\tnop\n");
9396 fprintf (asm_out_file, "\t.set\treorder\n");
9398 #ifdef ASM_DECLARE_FUNCTION_SIZE
9399 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9402 if (!FUNCTION_NAME_ALREADY_DECLARED)
9404 fputs ("\t.end\t", asm_out_file);
9405 assemble_name (asm_out_file, stubname);
9406 fputs ("\n", asm_out_file);
9409 fprintf (asm_out_file, "\t.set\tmips16\n");
9411 /* Record this stub. */
9412 l = (struct mips16_stub *) xmalloc (sizeof *l);
9413 l->name = xstrdup (fnname);
9415 l->next = mips16_stubs;
9419 /* If we expect a floating point return value, but we've built a
9420 stub which does not expect one, then we're in trouble. We can't
9421 use the existing stub, because it won't handle the floating point
9422 value. We can't build a new stub, because the linker won't know
9423 which stub to use for the various calls in this object file.
9424 Fortunately, this case is illegal, since it means that a function
9425 was declared in two different ways in a single compilation. */
9426 if (fpret && ! l->fpret)
9427 error ("cannot handle inconsistent calls to %qs", fnname);
9429 /* If we are calling a stub which handles a floating point return
9430 value, we need to arrange to save $18 in the prologue. We do
9431 this by marking the function call as using the register. The
9432 prologue will later see that it is used, and emit code to save
9439 if (retval == NULL_RTX)
9440 insn = gen_call_internal (fn, arg_size);
9442 insn = gen_call_value_internal (retval, fn, arg_size);
9443 insn = emit_call_insn (insn);
9445 CALL_INSN_FUNCTION_USAGE (insn) =
9446 gen_rtx_EXPR_LIST (VOIDmode,
9447 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9448 CALL_INSN_FUNCTION_USAGE (insn));
9450 /* Return 1 to tell the caller that we've generated the call
9455 /* Return 0 to let the caller generate the call insn. */
9459 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9460 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9462 struct mips16_constant {
9463 struct mips16_constant *next;
9466 enum machine_mode mode;
9469 /* Information about an incomplete mips16 constant pool. FIRST is the
9470 first constant, HIGHEST_ADDRESS is the highest address that the first
9471 byte of the pool can have, and INSN_ADDRESS is the current instruction
9474 struct mips16_constant_pool {
9475 struct mips16_constant *first;
9476 int highest_address;
9480 /* Add constant VALUE to POOL and return its label. MODE is the
9481 value's mode (used for CONST_INTs, etc.). */
9484 add_constant (struct mips16_constant_pool *pool,
9485 rtx value, enum machine_mode mode)
9487 struct mips16_constant **p, *c;
9488 bool first_of_size_p;
9490 /* See whether the constant is already in the pool. If so, return the
9491 existing label, otherwise leave P pointing to the place where the
9492 constant should be added.
9494 Keep the pool sorted in increasing order of mode size so that we can
9495 reduce the number of alignments needed. */
9496 first_of_size_p = true;
9497 for (p = &pool->first; *p != 0; p = &(*p)->next)
9499 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9501 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9503 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9504 first_of_size_p = false;
9507 /* In the worst case, the constant needed by the earliest instruction
9508 will end up at the end of the pool. The entire pool must then be
9509 accessible from that instruction.
9511 When adding the first constant, set the pool's highest address to
9512 the address of the first out-of-range byte. Adjust this address
9513 downwards each time a new constant is added. */
9514 if (pool->first == 0)
9515 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9516 is the address of the instruction with the lowest two bits clear.
9517 The base PC value for ld has the lowest three bits clear. Assume
9518 the worst case here. */
9519 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9520 pool->highest_address -= GET_MODE_SIZE (mode);
9521 if (first_of_size_p)
9522 /* Take into account the worst possible padding due to alignment. */
9523 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9525 /* Create a new entry. */
9526 c = (struct mips16_constant *) xmalloc (sizeof *c);
9529 c->label = gen_label_rtx ();
9536 /* Output constant VALUE after instruction INSN and return the last
9537 instruction emitted. MODE is the mode of the constant. */
9540 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9542 switch (GET_MODE_CLASS (mode))
9546 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9547 return emit_insn_after (gen_consttable_int (value, size), insn);
9551 return emit_insn_after (gen_consttable_float (value), insn);
9553 case MODE_VECTOR_FLOAT:
9554 case MODE_VECTOR_INT:
9557 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9558 insn = dump_constants_1 (GET_MODE_INNER (mode),
9559 CONST_VECTOR_ELT (value, i), insn);
9569 /* Dump out the constants in CONSTANTS after INSN. */
9572 dump_constants (struct mips16_constant *constants, rtx insn)
9574 struct mips16_constant *c, *next;
9578 for (c = constants; c != NULL; c = next)
9580 /* If necessary, increase the alignment of PC. */
9581 if (align < GET_MODE_SIZE (c->mode))
9583 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9584 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9586 align = GET_MODE_SIZE (c->mode);
9588 insn = emit_label_after (c->label, insn);
9589 insn = dump_constants_1 (c->mode, c->value, insn);
9595 emit_barrier_after (insn);
9598 /* Return the length of instruction INSN. */
9601 mips16_insn_length (rtx insn)
9605 rtx body = PATTERN (insn);
9606 if (GET_CODE (body) == ADDR_VEC)
9607 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9608 if (GET_CODE (body) == ADDR_DIFF_VEC)
9609 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9611 return get_attr_length (insn);
9614 /* Rewrite *X so that constant pool references refer to the constant's
9615 label instead. DATA points to the constant pool structure. */
9618 mips16_rewrite_pool_refs (rtx *x, void *data)
9620 struct mips16_constant_pool *pool = data;
9621 rtx base, offset, label;
9625 else if (!TARGET_MIPS16_TEXT_LOADS)
9628 split_const (*x, &base, &offset);
9629 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
9631 label = add_constant (pool, get_pool_constant (base),
9632 get_pool_mode (base));
9633 base = gen_rtx_LABEL_REF (Pmode, label);
9634 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
9637 return GET_CODE (*x) == CONST ? -1 : 0;
9640 /* Build MIPS16 constant pools. */
9643 mips16_lay_out_constants (void)
9645 struct mips16_constant_pool pool;
9648 if (!TARGET_MIPS16_PCREL_LOADS)
9652 memset (&pool, 0, sizeof (pool));
9653 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9655 /* Rewrite constant pool references in INSN. */
9657 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9659 pool.insn_address += mips16_insn_length (insn);
9661 if (pool.first != NULL)
9663 /* If there are no natural barriers between the first user of
9664 the pool and the highest acceptable address, we'll need to
9665 create a new instruction to jump around the constant pool.
9666 In the worst case, this instruction will be 4 bytes long.
9668 If it's too late to do this transformation after INSN,
9669 do it immediately before INSN. */
9670 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9674 label = gen_label_rtx ();
9676 jump = emit_jump_insn_before (gen_jump (label), insn);
9677 JUMP_LABEL (jump) = label;
9678 LABEL_NUSES (label) = 1;
9679 barrier = emit_barrier_after (jump);
9681 emit_label_after (label, barrier);
9682 pool.insn_address += 4;
9685 /* See whether the constant pool is now out of range of the first
9686 user. If so, output the constants after the previous barrier.
9687 Note that any instructions between BARRIER and INSN (inclusive)
9688 will use negative offsets to refer to the pool. */
9689 if (pool.insn_address > pool.highest_address)
9691 dump_constants (pool.first, barrier);
9695 else if (BARRIER_P (insn))
9699 dump_constants (pool.first, get_last_insn ());
9702 /* A temporary variable used by for_each_rtx callbacks, etc. */
9703 static rtx mips_sim_insn;
9705 /* A structure representing the state of the processor pipeline.
9706 Used by the mips_sim_* family of functions. */
9708 /* The maximum number of instructions that can be issued in a cycle.
9709 (Caches mips_issue_rate.) */
9710 unsigned int issue_rate;
9712 /* The current simulation time. */
9715 /* How many more instructions can be issued in the current cycle. */
9716 unsigned int insns_left;
9718 /* LAST_SET[X].INSN is the last instruction to set register X.
9719 LAST_SET[X].TIME is the time at which that instruction was issued.
9720 INSN is null if no instruction has yet set register X. */
9724 } last_set[FIRST_PSEUDO_REGISTER];
9726 /* The pipeline's current DFA state. */
9730 /* Reset STATE to the initial simulation state. */
9733 mips_sim_reset (struct mips_sim *state)
9736 state->insns_left = state->issue_rate;
9737 memset (&state->last_set, 0, sizeof (state->last_set));
9738 state_reset (state->dfa_state);
9741 /* Initialize STATE before its first use. DFA_STATE points to an
9742 allocated but uninitialized DFA state. */
9745 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9747 state->issue_rate = mips_issue_rate ();
9748 state->dfa_state = dfa_state;
9749 mips_sim_reset (state);
9752 /* Advance STATE by one clock cycle. */
9755 mips_sim_next_cycle (struct mips_sim *state)
9758 state->insns_left = state->issue_rate;
9759 state_transition (state->dfa_state, 0);
9762 /* Advance simulation state STATE until instruction INSN can read
9766 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9770 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9771 if (state->last_set[REGNO (reg) + i].insn != 0)
9775 t = state->last_set[REGNO (reg) + i].time;
9776 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9777 while (state->time < t)
9778 mips_sim_next_cycle (state);
9782 /* A for_each_rtx callback. If *X is a register, advance simulation state
9783 DATA until mips_sim_insn can read the register's value. */
9786 mips_sim_wait_regs_2 (rtx *x, void *data)
9789 mips_sim_wait_reg (data, mips_sim_insn, *x);
9793 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9796 mips_sim_wait_regs_1 (rtx *x, void *data)
9798 for_each_rtx (x, mips_sim_wait_regs_2, data);
9801 /* Advance simulation state STATE until all of INSN's register
9802 dependencies are satisfied. */
9805 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9807 mips_sim_insn = insn;
9808 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9811 /* Advance simulation state STATE until the units required by
9812 instruction INSN are available. */
9815 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9819 tmp_state = alloca (state_size ());
9820 while (state->insns_left == 0
9821 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9822 state_transition (tmp_state, insn) >= 0))
9823 mips_sim_next_cycle (state);
9826 /* Advance simulation state STATE until INSN is ready to issue. */
9829 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9831 mips_sim_wait_regs (state, insn);
9832 mips_sim_wait_units (state, insn);
9835 /* mips_sim_insn has just set X. Update the LAST_SET array
9836 in simulation state DATA. */
9839 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9841 struct mips_sim *state;
9846 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9848 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9849 state->last_set[REGNO (x) + i].time = state->time;
9853 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9854 can issue immediately (i.e., that mips_sim_wait_insn has already
9858 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9860 state_transition (state->dfa_state, insn);
9861 state->insns_left--;
9863 mips_sim_insn = insn;
9864 note_stores (PATTERN (insn), mips_sim_record_set, state);
9867 /* Simulate issuing a NOP in state STATE. */
9870 mips_sim_issue_nop (struct mips_sim *state)
9872 if (state->insns_left == 0)
9873 mips_sim_next_cycle (state);
9874 state->insns_left--;
9877 /* Update simulation state STATE so that it's ready to accept the instruction
9878 after INSN. INSN should be part of the main rtl chain, not a member of a
9882 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9884 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9886 mips_sim_issue_nop (state);
9888 switch (GET_CODE (SEQ_BEGIN (insn)))
9892 /* We can't predict the processor state after a call or label. */
9893 mips_sim_reset (state);
9897 /* The delay slots of branch likely instructions are only executed
9898 when the branch is taken. Therefore, if the caller has simulated
9899 the delay slot instruction, STATE does not really reflect the state
9900 of the pipeline for the instruction after the delay slot. Also,
9901 branch likely instructions tend to incur a penalty when not taken,
9902 so there will probably be an extra delay between the branch and
9903 the instruction after the delay slot. */
9904 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9905 mips_sim_reset (state);
9913 /* The VR4130 pipeline issues aligned pairs of instructions together,
9914 but it stalls the second instruction if it depends on the first.
9915 In order to cut down the amount of logic required, this dependence
9916 check is not based on a full instruction decode. Instead, any non-SPECIAL
9917 instruction is assumed to modify the register specified by bits 20-16
9918 (which is usually the "rt" field).
9920 In beq, beql, bne and bnel instructions, the rt field is actually an
9921 input, so we can end up with a false dependence between the branch
9922 and its delay slot. If this situation occurs in instruction INSN,
9923 try to avoid it by swapping rs and rt. */
9926 vr4130_avoid_branch_rt_conflict (rtx insn)
9930 first = SEQ_BEGIN (insn);
9931 second = SEQ_END (insn);
9933 && NONJUMP_INSN_P (second)
9934 && GET_CODE (PATTERN (first)) == SET
9935 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9936 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9938 /* Check for the right kind of condition. */
9939 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9940 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9941 && REG_P (XEXP (cond, 0))
9942 && REG_P (XEXP (cond, 1))
9943 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9944 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9946 /* SECOND mentions the rt register but not the rs register. */
9947 rtx tmp = XEXP (cond, 0);
9948 XEXP (cond, 0) = XEXP (cond, 1);
9949 XEXP (cond, 1) = tmp;
9954 /* Implement -mvr4130-align. Go through each basic block and simulate the
9955 processor pipeline. If we find that a pair of instructions could execute
9956 in parallel, and the first of those instruction is not 8-byte aligned,
9957 insert a nop to make it aligned. */
9960 vr4130_align_insns (void)
9962 struct mips_sim state;
9963 rtx insn, subinsn, last, last2, next;
9968 /* LAST is the last instruction before INSN to have a nonzero length.
9969 LAST2 is the last such instruction before LAST. */
9973 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9976 mips_sim_init (&state, alloca (state_size ()));
9977 for (insn = get_insns (); insn != 0; insn = next)
9979 unsigned int length;
9981 next = NEXT_INSN (insn);
9983 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9984 This isn't really related to the alignment pass, but we do it on
9985 the fly to avoid a separate instruction walk. */
9986 vr4130_avoid_branch_rt_conflict (insn);
9988 if (USEFUL_INSN_P (insn))
9989 FOR_EACH_SUBINSN (subinsn, insn)
9991 mips_sim_wait_insn (&state, subinsn);
9993 /* If we want this instruction to issue in parallel with the
9994 previous one, make sure that the previous instruction is
9995 aligned. There are several reasons why this isn't worthwhile
9996 when the second instruction is a call:
9998 - Calls are less likely to be performance critical,
9999 - There's a good chance that the delay slot can execute
10000 in parallel with the call.
10001 - The return address would then be unaligned.
10003 In general, if we're going to insert a nop between instructions
10004 X and Y, it's better to insert it immediately after X. That
10005 way, if the nop makes Y aligned, it will also align any labels
10006 between X and Y. */
10007 if (state.insns_left != state.issue_rate
10008 && !CALL_P (subinsn))
10010 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10012 /* SUBINSN is the first instruction in INSN and INSN is
10013 aligned. We want to align the previous instruction
10014 instead, so insert a nop between LAST2 and LAST.
10016 Note that LAST could be either a single instruction
10017 or a branch with a delay slot. In the latter case,
10018 LAST, like INSN, is already aligned, but the delay
10019 slot must have some extra delay that stops it from
10020 issuing at the same time as the branch. We therefore
10021 insert a nop before the branch in order to align its
10023 emit_insn_after (gen_nop (), last2);
10026 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10028 /* SUBINSN is the delay slot of INSN, but INSN is
10029 currently unaligned. Insert a nop between
10030 LAST and INSN to align it. */
10031 emit_insn_after (gen_nop (), last);
10035 mips_sim_issue_insn (&state, subinsn);
10037 mips_sim_finish_insn (&state, insn);
10039 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10040 length = get_attr_length (insn);
10043 /* If the instruction is an asm statement or multi-instruction
10044 mips.md patern, the length is only an estimate. Insert an
10045 8 byte alignment after it so that the following instructions
10046 can be handled correctly. */
10047 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10048 && (recog_memoized (insn) < 0 || length >= 8))
10050 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10051 next = NEXT_INSN (next);
10052 mips_sim_next_cycle (&state);
10055 else if (length & 4)
10056 aligned_p = !aligned_p;
10061 /* See whether INSN is an aligned label. */
10062 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10068 /* Subroutine of mips_reorg. If there is a hazard between INSN
10069 and a previous instruction, avoid it by inserting nops after
10072 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10073 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10074 before using the value of that register. *HILO_DELAY counts the
10075 number of instructions since the last hilo hazard (that is,
10076 the number of instructions since the last mflo or mfhi).
10078 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10079 for the next instruction.
10081 LO_REG is an rtx for the LO register, used in dependence checking. */
10084 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10085 rtx *delayed_reg, rtx lo_reg)
10090 if (!INSN_P (insn))
10093 pattern = PATTERN (insn);
10095 /* Do not put the whole function in .set noreorder if it contains
10096 an asm statement. We don't know whether there will be hazards
10097 between the asm statement and the gcc-generated code. */
10098 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10099 cfun->machine->all_noreorder_p = false;
10101 /* Ignore zero-length instructions (barriers and the like). */
10102 ninsns = get_attr_length (insn) / 4;
10106 /* Work out how many nops are needed. Note that we only care about
10107 registers that are explicitly mentioned in the instruction's pattern.
10108 It doesn't matter that calls use the argument registers or that they
10109 clobber hi and lo. */
10110 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10111 nops = 2 - *hilo_delay;
10112 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10117 /* Insert the nops between this instruction and the previous one.
10118 Each new nop takes us further from the last hilo hazard. */
10119 *hilo_delay += nops;
10121 emit_insn_after (gen_hazard_nop (), after);
10123 /* Set up the state for the next instruction. */
10124 *hilo_delay += ninsns;
10126 if (INSN_CODE (insn) >= 0)
10127 switch (get_attr_hazard (insn))
10137 set = single_set (insn);
10138 gcc_assert (set != 0);
10139 *delayed_reg = SET_DEST (set);
10145 /* Go through the instruction stream and insert nops where necessary.
10146 See if the whole function can then be put into .set noreorder &
10150 mips_avoid_hazards (void)
10152 rtx insn, last_insn, lo_reg, delayed_reg;
10155 /* Force all instructions to be split into their final form. */
10156 split_all_insns_noflow ();
10158 /* Recalculate instruction lengths without taking nops into account. */
10159 cfun->machine->ignore_hazard_length_p = true;
10160 shorten_branches (get_insns ());
10162 cfun->machine->all_noreorder_p = true;
10164 /* Profiled functions can't be all noreorder because the profiler
10165 support uses assembler macros. */
10166 if (current_function_profile)
10167 cfun->machine->all_noreorder_p = false;
10169 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10170 we rely on the assembler to work around some errata. */
10171 if (TARGET_FIX_VR4120)
10172 cfun->machine->all_noreorder_p = false;
10174 /* The same is true for -mfix-vr4130 if we might generate mflo or
10175 mfhi instructions. Note that we avoid using mflo and mfhi if
10176 the VR4130 macc and dmacc instructions are available instead;
10177 see the *mfhilo_{si,di}_macc patterns. */
10178 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10179 cfun->machine->all_noreorder_p = false;
10184 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10186 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10189 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10190 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10191 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10192 &hilo_delay, &delayed_reg, lo_reg);
10194 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10195 &delayed_reg, lo_reg);
10202 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10207 mips16_lay_out_constants ();
10208 if (TARGET_EXPLICIT_RELOCS)
10210 if (mips_flag_delayed_branch)
10211 dbr_schedule (get_insns ());
10212 mips_avoid_hazards ();
10213 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10214 vr4130_align_insns ();
10218 /* This function does three things:
10220 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10221 - Register the mips16 hardware floating point stubs.
10222 - Register the gofast functions if selected using --enable-gofast. */
10224 #include "config/gofast.h"
10227 mips_init_libfuncs (void)
10229 if (TARGET_FIX_VR4120)
10231 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10232 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10235 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10237 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10238 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10239 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10240 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10242 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10243 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10244 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10245 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10246 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10247 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10248 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10250 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10251 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10252 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10254 if (TARGET_DOUBLE_FLOAT)
10256 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10257 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10258 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10259 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10261 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10262 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10263 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10264 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10265 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10266 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10267 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10269 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10270 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10272 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10273 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10274 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10278 gofast_maybe_init_libfuncs ();
10281 /* Return a number assessing the cost of moving a register in class
10282 FROM to class TO. The classes are expressed using the enumeration
10283 values such as `GENERAL_REGS'. A value of 2 is the default; other
10284 values are interpreted relative to that.
10286 It is not required that the cost always equal 2 when FROM is the
10287 same as TO; on some machines it is expensive to move between
10288 registers if they are not general registers.
10290 If reload sees an insn consisting of a single `set' between two
10291 hard registers, and if `REGISTER_MOVE_COST' applied to their
10292 classes returns a value of 2, reload does not check to ensure that
10293 the constraints of the insn are met. Setting a cost of other than
10294 2 will allow reload to verify that the constraints are met. You
10295 should do this if the `movM' pattern's constraints do not allow
10298 ??? We make the cost of moving from HI/LO into general
10299 registers the same as for one of moving general registers to
10300 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10301 pseudo to HI/LO. This might hurt optimizations though, it
10302 isn't clear if it is wise. And it might not work in all cases. We
10303 could solve the DImode LO reg problem by using a multiply, just
10304 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10305 problem by using divide instructions. divu puts the remainder in
10306 the HI reg, so doing a divide by -1 will move the value in the HI
10307 reg for all values except -1. We could handle that case by using a
10308 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10309 a compare/branch to test the input value to see which instruction
10310 we need to use. This gets pretty messy, but it is feasible. */
10313 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10314 enum reg_class to, enum reg_class from)
10316 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10318 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10320 else if (reg_class_subset_p (from, GENERAL_REGS))
10322 if (to == M16_REGS)
10324 else if (to == M16_NA_REGS)
10326 else if (reg_class_subset_p (to, GENERAL_REGS))
10333 else if (to == FP_REGS)
10335 else if (reg_class_subset_p (to, ACC_REGS))
10342 else if (reg_class_subset_p (to, ALL_COP_REGS))
10347 else if (from == FP_REGS)
10349 if (reg_class_subset_p (to, GENERAL_REGS))
10351 else if (to == FP_REGS)
10353 else if (to == ST_REGS)
10356 else if (reg_class_subset_p (from, ACC_REGS))
10358 if (reg_class_subset_p (to, GENERAL_REGS))
10366 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10368 else if (reg_class_subset_p (from, ALL_COP_REGS))
10374 ??? What cases are these? Shouldn't we return 2 here? */
10379 /* Return the length of INSN. LENGTH is the initial length computed by
10380 attributes in the machine-description file. */
10383 mips_adjust_insn_length (rtx insn, int length)
10385 /* A unconditional jump has an unfilled delay slot if it is not part
10386 of a sequence. A conditional jump normally has a delay slot, but
10387 does not on MIPS16. */
10388 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10391 /* See how many nops might be needed to avoid hardware hazards. */
10392 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10393 switch (get_attr_hazard (insn))
10407 /* All MIPS16 instructions are a measly two bytes. */
10415 /* Return an asm sequence to start a noat block and load the address
10416 of a label into $1. */
10419 mips_output_load_label (void)
10421 if (TARGET_EXPLICIT_RELOCS)
10425 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10428 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10431 if (ISA_HAS_LOAD_DELAY)
10432 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10433 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10437 if (Pmode == DImode)
10438 return "%[dla\t%@,%0";
10440 return "%[la\t%@,%0";
10444 /* Return the assembly code for INSN, which has the operands given by
10445 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10446 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10447 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10448 version of BRANCH_IF_TRUE. */
10451 mips_output_conditional_branch (rtx insn, rtx *operands,
10452 const char *branch_if_true,
10453 const char *branch_if_false)
10455 unsigned int length;
10456 rtx taken, not_taken;
10458 length = get_attr_length (insn);
10461 /* Just a simple conditional branch. */
10462 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10463 return branch_if_true;
10466 /* Generate a reversed branch around a direct jump. This fallback does
10467 not use branch-likely instructions. */
10468 mips_branch_likely = false;
10469 not_taken = gen_label_rtx ();
10470 taken = operands[1];
10472 /* Generate the reversed branch to NOT_TAKEN. */
10473 operands[1] = not_taken;
10474 output_asm_insn (branch_if_false, operands);
10476 /* If INSN has a delay slot, we must provide delay slots for both the
10477 branch to NOT_TAKEN and the conditional jump. We must also ensure
10478 that INSN's delay slot is executed in the appropriate cases. */
10479 if (final_sequence)
10481 /* This first delay slot will always be executed, so use INSN's
10482 delay slot if is not annulled. */
10483 if (!INSN_ANNULLED_BRANCH_P (insn))
10485 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10486 asm_out_file, optimize, 1, NULL);
10487 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10490 output_asm_insn ("nop", 0);
10491 fprintf (asm_out_file, "\n");
10494 /* Output the unconditional branch to TAKEN. */
10496 output_asm_insn ("j\t%0%/", &taken);
10499 output_asm_insn (mips_output_load_label (), &taken);
10500 output_asm_insn ("jr\t%@%]%/", 0);
10503 /* Now deal with its delay slot; see above. */
10504 if (final_sequence)
10506 /* This delay slot will only be executed if the branch is taken.
10507 Use INSN's delay slot if is annulled. */
10508 if (INSN_ANNULLED_BRANCH_P (insn))
10510 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10511 asm_out_file, optimize, 1, NULL);
10512 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10515 output_asm_insn ("nop", 0);
10516 fprintf (asm_out_file, "\n");
10519 /* Output NOT_TAKEN. */
10520 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10521 CODE_LABEL_NUMBER (not_taken));
10525 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10526 if some ordered condition is true. The condition is given by
10527 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10528 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10529 its second is always zero. */
10532 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10534 const char *branch[2];
10536 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10537 Make BRANCH[0] branch on the inverse condition. */
10538 switch (GET_CODE (operands[0]))
10540 /* These cases are equivalent to comparisons against zero. */
10542 inverted_p = !inverted_p;
10543 /* Fall through. */
10545 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10546 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10549 /* These cases are always true or always false. */
10551 inverted_p = !inverted_p;
10552 /* Fall through. */
10554 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10555 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10559 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10560 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10563 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10566 /* Used to output div or ddiv instruction DIVISION, which has the operands
10567 given by OPERANDS. Add in a divide-by-zero check if needed.
10569 When working around R4000 and R4400 errata, we need to make sure that
10570 the division is not immediately followed by a shift[1][2]. We also
10571 need to stop the division from being put into a branch delay slot[3].
10572 The easiest way to avoid both problems is to add a nop after the
10573 division. When a divide-by-zero check is needed, this nop can be
10574 used to fill the branch delay slot.
10576 [1] If a double-word or a variable shift executes immediately
10577 after starting an integer division, the shift may give an
10578 incorrect result. See quotations of errata #16 and #28 from
10579 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10580 in mips.md for details.
10582 [2] A similar bug to [1] exists for all revisions of the
10583 R4000 and the R4400 when run in an MC configuration.
10584 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10586 "19. In this following sequence:
10588 ddiv (or ddivu or div or divu)
10589 dsll32 (or dsrl32, dsra32)
10591 if an MPT stall occurs, while the divide is slipping the cpu
10592 pipeline, then the following double shift would end up with an
10595 Workaround: The compiler needs to avoid generating any
10596 sequence with divide followed by extended double shift."
10598 This erratum is also present in "MIPS R4400MC Errata, Processor
10599 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10600 & 3.0" as errata #10 and #4, respectively.
10602 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10603 (also valid for MIPS R4000MC processors):
10605 "52. R4000SC: This bug does not apply for the R4000PC.
10607 There are two flavors of this bug:
10609 1) If the instruction just after divide takes an RF exception
10610 (tlb-refill, tlb-invalid) and gets an instruction cache
10611 miss (both primary and secondary) and the line which is
10612 currently in secondary cache at this index had the first
10613 data word, where the bits 5..2 are set, then R4000 would
10614 get a wrong result for the div.
10619 ------------------- # end-of page. -tlb-refill
10624 ------------------- # end-of page. -tlb-invalid
10627 2) If the divide is in the taken branch delay slot, where the
10628 target takes RF exception and gets an I-cache miss for the
10629 exception vector or where I-cache miss occurs for the
10630 target address, under the above mentioned scenarios, the
10631 div would get wrong results.
10634 j r2 # to next page mapped or unmapped
10635 div r8,r9 # this bug would be there as long
10636 # as there is an ICache miss and
10637 nop # the "data pattern" is present
10640 beq r0, r0, NextPage # to Next page
10644 This bug is present for div, divu, ddiv, and ddivu
10647 Workaround: For item 1), OS could make sure that the next page
10648 after the divide instruction is also mapped. For item 2), the
10649 compiler could make sure that the divide instruction is not in
10650 the branch delay slot."
10652 These processors have PRId values of 0x00004220 and 0x00004300 for
10653 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10656 mips_output_division (const char *division, rtx *operands)
10661 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10663 output_asm_insn (s, operands);
10666 if (TARGET_CHECK_ZERO_DIV)
10670 output_asm_insn (s, operands);
10671 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10673 else if (GENERATE_DIVIDE_TRAPS)
10675 output_asm_insn (s, operands);
10676 s = "teq\t%2,%.,7";
10680 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10681 output_asm_insn (s, operands);
10682 s = "break\t7%)\n1:";
10688 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10689 with a final "000" replaced by "k". Ignore case.
10691 Note: this function is shared between GCC and GAS. */
10694 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10696 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10697 given++, canonical++;
10699 return ((*given == 0 && *canonical == 0)
10700 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10704 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10705 CPU name. We've traditionally allowed a lot of variation here.
10707 Note: this function is shared between GCC and GAS. */
10710 mips_matching_cpu_name_p (const char *canonical, const char *given)
10712 /* First see if the name matches exactly, or with a final "000"
10713 turned into "k". */
10714 if (mips_strict_matching_cpu_name_p (canonical, given))
10717 /* If not, try comparing based on numerical designation alone.
10718 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10719 if (TOLOWER (*given) == 'r')
10721 if (!ISDIGIT (*given))
10724 /* Skip over some well-known prefixes in the canonical name,
10725 hoping to find a number there too. */
10726 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10728 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10730 else if (TOLOWER (canonical[0]) == 'r')
10733 return mips_strict_matching_cpu_name_p (canonical, given);
10737 /* Return the mips_cpu_info entry for the processor or ISA given
10738 by CPU_STRING. Return null if the string isn't recognized.
10740 A similar function exists in GAS. */
10742 static const struct mips_cpu_info *
10743 mips_parse_cpu (const char *cpu_string)
10745 const struct mips_cpu_info *p;
10748 /* In the past, we allowed upper-case CPU names, but it doesn't
10749 work well with the multilib machinery. */
10750 for (s = cpu_string; *s != 0; s++)
10753 warning (0, "the cpu name must be lower case");
10757 /* 'from-abi' selects the most compatible architecture for the given
10758 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10759 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10760 version. Look first at the -mgp options, if given, otherwise base
10761 the choice on MASK_64BIT in TARGET_DEFAULT. */
10762 if (strcasecmp (cpu_string, "from-abi") == 0)
10763 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10764 : ABI_NEEDS_64BIT_REGS ? 3
10765 : (TARGET_64BIT ? 3 : 1));
10767 /* 'default' has traditionally been a no-op. Probably not very useful. */
10768 if (strcasecmp (cpu_string, "default") == 0)
10771 for (p = mips_cpu_info_table; p->name != 0; p++)
10772 if (mips_matching_cpu_name_p (p->name, cpu_string))
10779 /* Return the processor associated with the given ISA level, or null
10780 if the ISA isn't valid. */
10782 static const struct mips_cpu_info *
10783 mips_cpu_info_from_isa (int isa)
10785 const struct mips_cpu_info *p;
10787 for (p = mips_cpu_info_table; p->name != 0; p++)
10794 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10795 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10796 they only hold condition code modes, and CCmode is always considered to
10797 be 4 bytes wide. All other registers are word sized. */
10800 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10802 if (ST_REG_P (regno))
10803 return ((GET_MODE_SIZE (mode) + 3) / 4);
10804 else if (! FP_REG_P (regno))
10805 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10807 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10810 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10811 all BLKmode objects are returned in memory. Under the new (N32 and
10812 64-bit MIPS ABIs) small structures are returned in a register.
10813 Objects with varying size must still be returned in memory, of
10817 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
10820 return (TYPE_MODE (type) == BLKmode);
10822 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10823 || (int_size_in_bytes (type) == -1));
10827 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10829 return !TARGET_OLDABI;
10832 /* Return true if INSN is a multiply-add or multiply-subtract
10833 instruction and PREV assigns to the accumulator operand. */
10836 mips_linked_madd_p (rtx prev, rtx insn)
10840 x = single_set (insn);
10846 if (GET_CODE (x) == PLUS
10847 && GET_CODE (XEXP (x, 0)) == MULT
10848 && reg_set_p (XEXP (x, 1), prev))
10851 if (GET_CODE (x) == MINUS
10852 && GET_CODE (XEXP (x, 1)) == MULT
10853 && reg_set_p (XEXP (x, 0), prev))
10859 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10860 that may clobber hi or lo. */
10862 static rtx mips_macc_chains_last_hilo;
10864 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10865 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10868 mips_macc_chains_record (rtx insn)
10870 if (get_attr_may_clobber_hilo (insn))
10871 mips_macc_chains_last_hilo = insn;
10874 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10875 has NREADY elements, looking for a multiply-add or multiply-subtract
10876 instruction that is cumulative with mips_macc_chains_last_hilo.
10877 If there is one, promote it ahead of anything else that might
10878 clobber hi or lo. */
10881 mips_macc_chains_reorder (rtx *ready, int nready)
10885 if (mips_macc_chains_last_hilo != 0)
10886 for (i = nready - 1; i >= 0; i--)
10887 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10889 for (j = nready - 1; j > i; j--)
10890 if (recog_memoized (ready[j]) >= 0
10891 && get_attr_may_clobber_hilo (ready[j]))
10893 mips_promote_ready (ready, i, j);
10900 /* The last instruction to be scheduled. */
10902 static rtx vr4130_last_insn;
10904 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10905 points to an rtx that is initially an instruction. Nullify the rtx
10906 if the instruction uses the value of register X. */
10909 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10911 rtx *insn_ptr = data;
10914 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10918 /* Return true if there is true register dependence between vr4130_last_insn
10922 vr4130_true_reg_dependence_p (rtx insn)
10924 note_stores (PATTERN (vr4130_last_insn),
10925 vr4130_true_reg_dependence_p_1, &insn);
10929 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10930 the ready queue and that INSN2 is the instruction after it, return
10931 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10932 in which INSN1 and INSN2 can probably issue in parallel, but for
10933 which (INSN2, INSN1) should be less sensitive to instruction
10934 alignment than (INSN1, INSN2). See 4130.md for more details. */
10937 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10939 sd_iterator_def sd_it;
10942 /* Check for the following case:
10944 1) there is some other instruction X with an anti dependence on INSN1;
10945 2) X has a higher priority than INSN2; and
10946 3) X is an arithmetic instruction (and thus has no unit restrictions).
10948 If INSN1 is the last instruction blocking X, it would better to
10949 choose (INSN1, X) over (INSN2, INSN1). */
10950 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
10951 if (DEP_TYPE (dep) == REG_DEP_ANTI
10952 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
10953 && recog_memoized (DEP_CON (dep)) >= 0
10954 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
10957 if (vr4130_last_insn != 0
10958 && recog_memoized (insn1) >= 0
10959 && recog_memoized (insn2) >= 0)
10961 /* See whether INSN1 and INSN2 use different execution units,
10962 or if they are both ALU-type instructions. If so, they can
10963 probably execute in parallel. */
10964 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10965 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10966 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10968 /* If only one of the instructions has a dependence on
10969 vr4130_last_insn, prefer to schedule the other one first. */
10970 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10971 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10975 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10976 is not an ALU-type instruction and if INSN1 uses the same
10977 execution unit. (Note that if this condition holds, we already
10978 know that INSN2 uses a different execution unit.) */
10979 if (class1 != VR4130_CLASS_ALU
10980 && recog_memoized (vr4130_last_insn) >= 0
10981 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10988 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10989 queue with at least two instructions. Swap the first two if
10990 vr4130_swap_insns_p says that it could be worthwhile. */
10993 vr4130_reorder (rtx *ready, int nready)
10995 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10996 mips_promote_ready (ready, nready - 2, nready - 1);
10999 /* Remove the instruction at index LOWER from ready queue READY and
11000 reinsert it in front of the instruction at index HIGHER. LOWER must
11004 mips_promote_ready (rtx *ready, int lower, int higher)
11009 new_head = ready[lower];
11010 for (i = lower; i < higher; i++)
11011 ready[i] = ready[i + 1];
11012 ready[i] = new_head;
11015 /* If the priority of the instruction at POS2 in the ready queue READY
11016 is within LIMIT units of that of the instruction at POS1, swap the
11017 instructions if POS2 is not already less than POS1. */
11020 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11023 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11026 temp = ready[pos1];
11027 ready[pos1] = ready[pos2];
11028 ready[pos2] = temp;
11032 /* Record whether last 74k AGEN instruction was a load or store. */
11034 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11036 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11037 resets to TYPE_UNKNOWN state. */
11040 mips_74k_agen_init (rtx insn)
11042 if (!insn || !NONJUMP_INSN_P (insn))
11043 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11044 else if (USEFUL_INSN_P (insn))
11046 enum attr_type type = get_attr_type (insn);
11047 if (type == TYPE_LOAD || type == TYPE_STORE)
11048 mips_last_74k_agen_insn = type;
11052 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11053 loads to be grouped together, and multiple stores to be grouped
11054 together. Swap things around in the ready queue to make this happen. */
11057 mips_74k_agen_reorder (rtx *ready, int nready)
11060 int store_pos, load_pos;
11065 for (i = nready - 1; i >= 0; i--)
11067 rtx insn = ready[i];
11068 if (USEFUL_INSN_P (insn))
11069 switch (get_attr_type (insn))
11072 if (store_pos == -1)
11077 if (load_pos == -1)
11086 if (load_pos == -1 || store_pos == -1)
11089 switch (mips_last_74k_agen_insn)
11092 /* Prefer to schedule loads since they have a higher latency. */
11094 /* Swap loads to the front of the queue. */
11095 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11098 /* Swap stores to the front of the queue. */
11099 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11106 /* Implement TARGET_SCHED_INIT. */
11109 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11110 int max_ready ATTRIBUTE_UNUSED)
11112 mips_macc_chains_last_hilo = 0;
11113 vr4130_last_insn = 0;
11114 mips_74k_agen_init (NULL_RTX);
11117 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11120 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11121 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11123 if (!reload_completed
11124 && TUNE_MACC_CHAINS
11126 mips_macc_chains_reorder (ready, *nreadyp);
11127 if (reload_completed
11129 && !TARGET_VR4130_ALIGN
11131 vr4130_reorder (ready, *nreadyp);
11133 mips_74k_agen_reorder (ready, *nreadyp);
11134 return mips_issue_rate ();
11137 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11140 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11141 rtx insn, int more)
11144 mips_74k_agen_init (insn);
11145 switch (GET_CODE (PATTERN (insn)))
11149 /* Don't count USEs and CLOBBERs against the issue rate. */
11154 if (!reload_completed && TUNE_MACC_CHAINS)
11155 mips_macc_chains_record (insn);
11156 vr4130_last_insn = insn;
11162 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11163 dependencies have no cost, except on the 20Kc where output-dependence
11164 is treated like input-dependence. */
11167 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11168 rtx dep ATTRIBUTE_UNUSED, int cost)
11170 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11173 if (REG_NOTE_KIND (link) != 0)
11178 /* Return the number of instructions that can be issued per cycle. */
11181 mips_issue_rate (void)
11185 case PROCESSOR_74KC:
11186 case PROCESSOR_74KF2_1:
11187 case PROCESSOR_74KF1_1:
11188 case PROCESSOR_74KF3_2:
11189 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11190 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11191 but in reality only a maximum of 3 insns can be issued as the
11192 floating point load/stores also require a slot in the AGEN pipe. */
11195 case PROCESSOR_20KC:
11196 case PROCESSOR_R4130:
11197 case PROCESSOR_R5400:
11198 case PROCESSOR_R5500:
11199 case PROCESSOR_R7000:
11200 case PROCESSOR_R9000:
11203 case PROCESSOR_SB1:
11204 case PROCESSOR_SB1A:
11205 /* This is actually 4, but we get better performance if we claim 3.
11206 This is partly because of unwanted speculative code motion with the
11207 larger number, and partly because in most common cases we can't
11208 reach the theoretical max of 4. */
11216 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11217 be as wide as the scheduling freedom in the DFA. */
11220 mips_multipass_dfa_lookahead (void)
11222 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11229 /* Implements a store data bypass check. We need this because the cprestore
11230 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11231 default routine to abort. We just return false for that case. */
11232 /* ??? Should try to give a better result here than assuming false. */
11235 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11237 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11240 return ! store_data_bypass_p (out_insn, in_insn);
11243 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11244 return the first operand of the associated "pref" or "prefx" insn. */
11247 mips_prefetch_cookie (rtx write, rtx locality)
11249 /* store_streamed / load_streamed. */
11250 if (INTVAL (locality) <= 0)
11251 return GEN_INT (INTVAL (write) + 4);
11253 /* store / load. */
11254 if (INTVAL (locality) <= 2)
11257 /* store_retained / load_retained. */
11258 return GEN_INT (INTVAL (write) + 6);
11261 /* MIPS builtin function support. */
11263 struct builtin_description
11265 /* The code of the main .md file instruction. See mips_builtin_type
11266 for more information. */
11267 enum insn_code icode;
11269 /* The floating-point comparison code to use with ICODE, if any. */
11270 enum mips_fp_condition cond;
11272 /* The name of the builtin function. */
11275 /* Specifies how the function should be expanded. */
11276 enum mips_builtin_type builtin_type;
11278 /* The function's prototype. */
11279 enum mips_function_type function_type;
11281 /* The target flags required for this function. */
11285 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11286 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11287 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11288 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11289 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11291 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11293 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11294 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11295 "__builtin_mips_" #INSN "_" #COND "_s", \
11296 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11297 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11298 "__builtin_mips_" #INSN "_" #COND "_d", \
11299 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11301 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11302 The lower and upper forms require TARGET_FLAGS while the any and all
11303 forms require MASK_MIPS3D. */
11304 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11305 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11306 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11307 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11308 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11309 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11310 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11311 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11312 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11313 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11314 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11315 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11316 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11318 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11319 require MASK_MIPS3D. */
11320 #define CMP_4S_BUILTINS(INSN, COND) \
11321 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11322 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11323 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11325 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11326 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11327 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11330 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11331 instruction requires TARGET_FLAGS. */
11332 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11333 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11334 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11335 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11337 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11338 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11339 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11342 /* Define all the builtins related to c.cond.fmt condition COND. */
11343 #define CMP_BUILTINS(COND) \
11344 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11345 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11346 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11347 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11348 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11349 CMP_4S_BUILTINS (c, COND), \
11350 CMP_4S_BUILTINS (cabs, COND)
11352 static const struct builtin_description mips_bdesc[] =
11354 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11355 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11356 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11357 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11358 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11359 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11360 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11361 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11363 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11364 MASK_PAIRED_SINGLE_FLOAT),
11365 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11366 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11367 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11368 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11370 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11371 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11372 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11373 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11374 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11375 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11377 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11378 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11379 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11380 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11381 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11382 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11384 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11387 /* Builtin functions for the SB-1 processor. */
11389 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11391 static const struct builtin_description sb1_bdesc[] =
11393 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11396 /* Builtin functions for DSP ASE. */
11398 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11399 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11400 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11401 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11402 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11404 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11405 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11406 builtin_description fields. */
11407 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11408 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11409 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11411 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11412 branch instruction. TARGET_FLAGS is a builtin_description field. */
11413 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11414 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11415 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11417 static const struct builtin_description dsp_bdesc[] =
11419 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11420 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11421 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11422 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11423 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11424 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11425 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11426 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11427 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11428 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11429 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11430 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11431 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11432 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11433 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11434 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11435 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11436 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11437 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11438 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11439 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11440 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11441 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11442 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11443 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11444 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11445 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11446 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11447 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11448 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11449 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11450 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11451 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11452 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11453 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11454 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11455 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11456 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11457 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11458 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11459 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11460 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11461 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11462 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11463 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11464 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11465 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11466 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11467 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11468 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11469 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11470 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11471 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11472 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11473 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11474 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11475 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11476 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11477 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11478 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11479 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11480 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11481 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11482 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11483 BPOSGE_BUILTIN (32, MASK_DSP),
11485 /* The following are for the MIPS DSP ASE REV 2. */
11486 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11487 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11488 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11489 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11490 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11491 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11492 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11493 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11494 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11495 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11496 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11497 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11498 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11499 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11500 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11501 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11502 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11503 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11504 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11505 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11506 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11507 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11508 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11509 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11510 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11511 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11512 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11513 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11514 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11515 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11516 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11517 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11518 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11519 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11522 static const struct builtin_description dsp_32only_bdesc[] =
11524 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11525 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11526 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11527 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11528 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11529 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11530 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11531 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11532 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11533 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11534 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11535 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11536 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11537 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11538 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11539 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11540 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11541 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11542 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11543 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11544 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11546 /* The following are for the MIPS DSP ASE REV 2. */
11547 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11548 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11549 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11550 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11551 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11552 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11553 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11554 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11555 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11556 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11557 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11558 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11559 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11560 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11561 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11564 /* This helps provide a mapping from builtin function codes to bdesc
11569 /* The builtin function table that this entry describes. */
11570 const struct builtin_description *bdesc;
11572 /* The number of entries in the builtin function table. */
11575 /* The target processor that supports these builtin functions.
11576 PROCESSOR_MAX means we enable them for all processors. */
11577 enum processor_type proc;
11579 /* If the target has these flags, this builtin function table
11580 will not be supported. */
11581 int unsupported_target_flags;
11584 static const struct bdesc_map bdesc_arrays[] =
11586 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11587 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11588 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11589 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11593 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11594 suitable for input operand OP of instruction ICODE. Return the value. */
11597 mips_prepare_builtin_arg (enum insn_code icode,
11598 unsigned int op, tree exp, unsigned int argnum)
11601 enum machine_mode mode;
11603 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11604 mode = insn_data[icode].operand[op].mode;
11605 if (!insn_data[icode].operand[op].predicate (value, mode))
11607 value = copy_to_mode_reg (mode, value);
11608 /* Check the predicate again. */
11609 if (!insn_data[icode].operand[op].predicate (value, mode))
11611 error ("invalid argument to builtin function");
11619 /* Return an rtx suitable for output operand OP of instruction ICODE.
11620 If TARGET is non-null, try to use it where possible. */
11623 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11625 enum machine_mode mode;
11627 mode = insn_data[icode].operand[op].mode;
11628 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11629 target = gen_reg_rtx (mode);
11634 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11637 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11638 enum machine_mode mode ATTRIBUTE_UNUSED,
11639 int ignore ATTRIBUTE_UNUSED)
11641 enum insn_code icode;
11642 enum mips_builtin_type type;
11644 unsigned int fcode;
11645 const struct builtin_description *bdesc;
11646 const struct bdesc_map *m;
11648 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11649 fcode = DECL_FUNCTION_CODE (fndecl);
11652 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11654 if (fcode < m->size)
11657 icode = bdesc[fcode].icode;
11658 type = bdesc[fcode].builtin_type;
11668 case MIPS_BUILTIN_DIRECT:
11669 return mips_expand_builtin_direct (icode, target, exp, true);
11671 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11672 return mips_expand_builtin_direct (icode, target, exp, false);
11674 case MIPS_BUILTIN_MOVT:
11675 case MIPS_BUILTIN_MOVF:
11676 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11679 case MIPS_BUILTIN_CMP_ANY:
11680 case MIPS_BUILTIN_CMP_ALL:
11681 case MIPS_BUILTIN_CMP_UPPER:
11682 case MIPS_BUILTIN_CMP_LOWER:
11683 case MIPS_BUILTIN_CMP_SINGLE:
11684 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11687 case MIPS_BUILTIN_BPOSGE32:
11688 return mips_expand_builtin_bposge (type, target);
11695 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11698 mips_init_builtins (void)
11700 const struct builtin_description *d;
11701 const struct bdesc_map *m;
11702 tree types[(int) MIPS_MAX_FTYPE_MAX];
11703 tree V2SF_type_node;
11704 tree V2HI_type_node;
11705 tree V4QI_type_node;
11706 unsigned int offset;
11708 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11709 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11712 if (TARGET_PAIRED_SINGLE_FLOAT)
11714 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11716 types[MIPS_V2SF_FTYPE_V2SF]
11717 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11719 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11720 = build_function_type_list (V2SF_type_node,
11721 V2SF_type_node, V2SF_type_node, NULL_TREE);
11723 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11724 = build_function_type_list (V2SF_type_node,
11725 V2SF_type_node, V2SF_type_node,
11726 integer_type_node, NULL_TREE);
11728 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11729 = build_function_type_list (V2SF_type_node,
11730 V2SF_type_node, V2SF_type_node,
11731 V2SF_type_node, V2SF_type_node, NULL_TREE);
11733 types[MIPS_V2SF_FTYPE_SF_SF]
11734 = build_function_type_list (V2SF_type_node,
11735 float_type_node, float_type_node, NULL_TREE);
11737 types[MIPS_INT_FTYPE_V2SF_V2SF]
11738 = build_function_type_list (integer_type_node,
11739 V2SF_type_node, V2SF_type_node, NULL_TREE);
11741 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11742 = build_function_type_list (integer_type_node,
11743 V2SF_type_node, V2SF_type_node,
11744 V2SF_type_node, V2SF_type_node, NULL_TREE);
11746 types[MIPS_INT_FTYPE_SF_SF]
11747 = build_function_type_list (integer_type_node,
11748 float_type_node, float_type_node, NULL_TREE);
11750 types[MIPS_INT_FTYPE_DF_DF]
11751 = build_function_type_list (integer_type_node,
11752 double_type_node, double_type_node, NULL_TREE);
11754 types[MIPS_SF_FTYPE_V2SF]
11755 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11757 types[MIPS_SF_FTYPE_SF]
11758 = build_function_type_list (float_type_node,
11759 float_type_node, NULL_TREE);
11761 types[MIPS_SF_FTYPE_SF_SF]
11762 = build_function_type_list (float_type_node,
11763 float_type_node, float_type_node, NULL_TREE);
11765 types[MIPS_DF_FTYPE_DF]
11766 = build_function_type_list (double_type_node,
11767 double_type_node, NULL_TREE);
11769 types[MIPS_DF_FTYPE_DF_DF]
11770 = build_function_type_list (double_type_node,
11771 double_type_node, double_type_node, NULL_TREE);
11776 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11777 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11779 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11780 = build_function_type_list (V2HI_type_node,
11781 V2HI_type_node, V2HI_type_node,
11784 types[MIPS_SI_FTYPE_SI_SI]
11785 = build_function_type_list (intSI_type_node,
11786 intSI_type_node, intSI_type_node,
11789 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11790 = build_function_type_list (V4QI_type_node,
11791 V4QI_type_node, V4QI_type_node,
11794 types[MIPS_SI_FTYPE_V4QI]
11795 = build_function_type_list (intSI_type_node,
11799 types[MIPS_V2HI_FTYPE_V2HI]
11800 = build_function_type_list (V2HI_type_node,
11804 types[MIPS_SI_FTYPE_SI]
11805 = build_function_type_list (intSI_type_node,
11809 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11810 = build_function_type_list (V4QI_type_node,
11811 V2HI_type_node, V2HI_type_node,
11814 types[MIPS_V2HI_FTYPE_SI_SI]
11815 = build_function_type_list (V2HI_type_node,
11816 intSI_type_node, intSI_type_node,
11819 types[MIPS_SI_FTYPE_V2HI]
11820 = build_function_type_list (intSI_type_node,
11824 types[MIPS_V2HI_FTYPE_V4QI]
11825 = build_function_type_list (V2HI_type_node,
11829 types[MIPS_V4QI_FTYPE_V4QI_SI]
11830 = build_function_type_list (V4QI_type_node,
11831 V4QI_type_node, intSI_type_node,
11834 types[MIPS_V2HI_FTYPE_V2HI_SI]
11835 = build_function_type_list (V2HI_type_node,
11836 V2HI_type_node, intSI_type_node,
11839 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11840 = build_function_type_list (V2HI_type_node,
11841 V4QI_type_node, V2HI_type_node,
11844 types[MIPS_SI_FTYPE_V2HI_V2HI]
11845 = build_function_type_list (intSI_type_node,
11846 V2HI_type_node, V2HI_type_node,
11849 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11850 = build_function_type_list (intDI_type_node,
11851 intDI_type_node, V4QI_type_node, V4QI_type_node,
11854 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11855 = build_function_type_list (intDI_type_node,
11856 intDI_type_node, V2HI_type_node, V2HI_type_node,
11859 types[MIPS_DI_FTYPE_DI_SI_SI]
11860 = build_function_type_list (intDI_type_node,
11861 intDI_type_node, intSI_type_node, intSI_type_node,
11864 types[MIPS_V4QI_FTYPE_SI]
11865 = build_function_type_list (V4QI_type_node,
11869 types[MIPS_V2HI_FTYPE_SI]
11870 = build_function_type_list (V2HI_type_node,
11874 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11875 = build_function_type_list (void_type_node,
11876 V4QI_type_node, V4QI_type_node,
11879 types[MIPS_SI_FTYPE_V4QI_V4QI]
11880 = build_function_type_list (intSI_type_node,
11881 V4QI_type_node, V4QI_type_node,
11884 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11885 = build_function_type_list (void_type_node,
11886 V2HI_type_node, V2HI_type_node,
11889 types[MIPS_SI_FTYPE_DI_SI]
11890 = build_function_type_list (intSI_type_node,
11891 intDI_type_node, intSI_type_node,
11894 types[MIPS_DI_FTYPE_DI_SI]
11895 = build_function_type_list (intDI_type_node,
11896 intDI_type_node, intSI_type_node,
11899 types[MIPS_VOID_FTYPE_SI_SI]
11900 = build_function_type_list (void_type_node,
11901 intSI_type_node, intSI_type_node,
11904 types[MIPS_SI_FTYPE_PTR_SI]
11905 = build_function_type_list (intSI_type_node,
11906 ptr_type_node, intSI_type_node,
11909 types[MIPS_SI_FTYPE_VOID]
11910 = build_function_type (intSI_type_node, void_list_node);
11914 types[MIPS_V4QI_FTYPE_V4QI]
11915 = build_function_type_list (V4QI_type_node,
11919 types[MIPS_SI_FTYPE_SI_SI_SI]
11920 = build_function_type_list (intSI_type_node,
11921 intSI_type_node, intSI_type_node,
11922 intSI_type_node, NULL_TREE);
11924 types[MIPS_DI_FTYPE_DI_USI_USI]
11925 = build_function_type_list (intDI_type_node,
11927 unsigned_intSI_type_node,
11928 unsigned_intSI_type_node, NULL_TREE);
11930 types[MIPS_DI_FTYPE_SI_SI]
11931 = build_function_type_list (intDI_type_node,
11932 intSI_type_node, intSI_type_node,
11935 types[MIPS_DI_FTYPE_USI_USI]
11936 = build_function_type_list (intDI_type_node,
11937 unsigned_intSI_type_node,
11938 unsigned_intSI_type_node, NULL_TREE);
11940 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11941 = build_function_type_list (V2HI_type_node,
11942 intSI_type_node, intSI_type_node,
11943 intSI_type_node, NULL_TREE);
11948 /* Iterate through all of the bdesc arrays, initializing all of the
11949 builtin functions. */
11952 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11954 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11955 && (m->unsupported_target_flags & target_flags) == 0)
11956 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11957 if ((d->target_flags & target_flags) == d->target_flags)
11958 add_builtin_function (d->name, types[d->function_type],
11959 d - m->bdesc + offset,
11960 BUILT_IN_MD, NULL, NULL);
11965 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11966 .md pattern and CALL is the function expr with arguments. TARGET,
11967 if nonnull, suggests a good place to put the result.
11968 HAS_TARGET indicates the function must return something. */
11971 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11974 rtx ops[MAX_RECOG_OPERANDS];
11980 /* We save target to ops[0]. */
11981 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11985 /* We need to test if the arglist is not zero. Some instructions have extra
11986 clobber registers. */
11987 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11988 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11993 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11997 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12001 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12005 gcc_unreachable ();
12010 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12011 function (TYPE says which). EXP is the tree for the function
12012 function, ICODE is the instruction that should be used to compare
12013 the first two arguments, and COND is the condition it should test.
12014 TARGET, if nonnull, suggests a good place to put the result. */
12017 mips_expand_builtin_movtf (enum mips_builtin_type type,
12018 enum insn_code icode, enum mips_fp_condition cond,
12019 rtx target, tree exp)
12021 rtx cmp_result, op0, op1;
12023 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12024 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12025 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12026 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12028 icode = CODE_FOR_mips_cond_move_tf_ps;
12029 target = mips_prepare_builtin_target (icode, 0, target);
12030 if (type == MIPS_BUILTIN_MOVT)
12032 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12033 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12037 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12038 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12040 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12044 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12045 into TARGET otherwise. Return TARGET. */
12048 mips_builtin_branch_and_move (rtx condition, rtx target,
12049 rtx value_if_true, rtx value_if_false)
12051 rtx true_label, done_label;
12053 true_label = gen_label_rtx ();
12054 done_label = gen_label_rtx ();
12056 /* First assume that CONDITION is false. */
12057 mips_emit_move (target, value_if_false);
12059 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12060 emit_jump_insn (gen_condjump (condition, true_label));
12061 emit_jump_insn (gen_jump (done_label));
12064 /* Fix TARGET if CONDITION is true. */
12065 emit_label (true_label);
12066 mips_emit_move (target, value_if_true);
12068 emit_label (done_label);
12072 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12073 of the comparison instruction and COND is the condition it should test.
12074 EXP is the function call and arguments and TARGET, if nonnull,
12075 suggests a good place to put the boolean result. */
12078 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12079 enum insn_code icode, enum mips_fp_condition cond,
12080 rtx target, tree exp)
12082 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12086 if (target == 0 || GET_MODE (target) != SImode)
12087 target = gen_reg_rtx (SImode);
12089 /* Prepare the operands to the comparison. */
12090 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12091 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12092 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12094 switch (insn_data[icode].n_operands)
12097 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12101 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12102 ops[3], ops[4], GEN_INT (cond)));
12106 gcc_unreachable ();
12109 /* If the comparison sets more than one register, we define the result
12110 to be 0 if all registers are false and -1 if all registers are true.
12111 The value of the complete result is indeterminate otherwise. */
12112 switch (builtin_type)
12114 case MIPS_BUILTIN_CMP_ALL:
12115 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12116 return mips_builtin_branch_and_move (condition, target,
12117 const0_rtx, const1_rtx);
12119 case MIPS_BUILTIN_CMP_UPPER:
12120 case MIPS_BUILTIN_CMP_LOWER:
12121 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12122 condition = gen_single_cc (cmp_result, offset);
12123 return mips_builtin_branch_and_move (condition, target,
12124 const1_rtx, const0_rtx);
12127 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12128 return mips_builtin_branch_and_move (condition, target,
12129 const1_rtx, const0_rtx);
12133 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12134 suggests a good place to put the boolean result. */
12137 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12139 rtx condition, cmp_result;
12142 if (target == 0 || GET_MODE (target) != SImode)
12143 target = gen_reg_rtx (SImode);
12145 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12147 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12152 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12153 return mips_builtin_branch_and_move (condition, target,
12154 const1_rtx, const0_rtx);
12157 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12158 FIRST is true if this is the first time handling this decl. */
12161 mips_encode_section_info (tree decl, rtx rtl, int first)
12163 default_encode_section_info (decl, rtl, first);
12165 if (TREE_CODE (decl) == FUNCTION_DECL)
12167 rtx symbol = XEXP (rtl, 0);
12169 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
12170 || mips_far_type_p (TREE_TYPE (decl)))
12171 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12175 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12176 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12179 mips_extra_live_on_entry (bitmap regs)
12181 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12182 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12185 /* SImode values are represented as sign-extended to DImode. */
12188 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12190 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12191 return SIGN_EXTEND;
12196 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12199 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12204 fputs ("\t.dtprelword\t", file);
12208 fputs ("\t.dtpreldword\t", file);
12212 gcc_unreachable ();
12214 output_addr_const (file, x);
12215 fputs ("+0x8000", file);
12218 #include "gt-mips.h"